Repository: ucb-art/BAG_framework
Branch: master
Commit: daf4b0aaa72f
Files: 162
Total size: 1.4 MB
Directory structure:
gitextract_e9aig5le/
├── .gitignore
├── .gitmodules
├── LICENSE
├── README.md
├── bag/
│ ├── LICENSE
│ ├── __init__.py
│ ├── concurrent/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ └── core.py
│ ├── core.py
│ ├── data/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── core.py
│ │ ├── dc.py
│ │ ├── digital.py
│ │ ├── lti.py
│ │ ├── ltv.py
│ │ ├── mos.py
│ │ └── plot.py
│ ├── design/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ └── module.py
│ ├── interface/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── database.py
│ │ ├── ocean.py
│ │ ├── server.py
│ │ ├── simulator.py
│ │ ├── skill.py
│ │ ├── templates/
│ │ │ ├── LICENSE
│ │ │ ├── Module.pyi
│ │ │ ├── PrimModule.pyi
│ │ │ ├── calibreview_setup.txt
│ │ │ ├── load_results.ocn
│ │ │ └── run_simulation.ocn
│ │ └── zmqwrapper.py
│ ├── io/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── common.py
│ │ ├── file.py
│ │ ├── gui.py
│ │ ├── process.py
│ │ ├── sim_data.py
│ │ └── template.py
│ ├── layout/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── core.py
│ │ ├── digital.py
│ │ ├── objects.py
│ │ ├── routing/
│ │ │ ├── LICENSE
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── fill.py
│ │ │ └── grid.py
│ │ ├── tech.py
│ │ ├── template.py
│ │ └── util.py
│ ├── math/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── dfun.py
│ │ └── interpolate.py
│ ├── mdao/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── components.py
│ │ └── core.py
│ ├── simulation/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── core.py
│ │ └── core_v2.py
│ ├── tech/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── core.py
│ │ └── mos.py
│ ├── util/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── cache.py
│ │ ├── immutable.py
│ │ ├── interval.py
│ │ ├── parse.py
│ │ └── search.py
│ ├── verification/
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── calibre.py
│ │ ├── icv.py
│ │ ├── pvs.py
│ │ ├── templates/
│ │ │ ├── LICENSE
│ │ │ ├── layout_export_config.txt
│ │ │ └── si_env.txt
│ │ └── virtuoso.py
│ └── virtuoso.py
├── docs/
│ ├── .gitignore
│ ├── LICENSE
│ ├── Makefile
│ ├── README
│ ├── refresh_api.sh
│ └── source/
│ ├── LICENSE
│ ├── api/
│ │ ├── LICENSE
│ │ ├── bag.data.rst
│ │ ├── bag.design.rst
│ │ ├── bag.interface.rst
│ │ ├── bag.io.rst
│ │ ├── bag.layout.routing.rst
│ │ ├── bag.layout.rst
│ │ ├── bag.math.rst
│ │ ├── bag.mdao.rst
│ │ ├── bag.rst
│ │ ├── bag.tech.rst
│ │ ├── bag.util.rst
│ │ ├── bag.verification.rst
│ │ └── modules.rst
│ ├── conf.py
│ ├── developer/
│ │ ├── LICENSE
│ │ └── developer.rst
│ ├── index.rst
│ ├── overview/
│ │ ├── LICENSE
│ │ ├── design.rst
│ │ ├── overview.rst
│ │ ├── schematic.rst
│ │ └── testbench.rst
│ ├── setup/
│ │ ├── LICENSE
│ │ ├── bag_config/
│ │ │ ├── LICENSE
│ │ │ ├── bag_config.rst
│ │ │ ├── database/
│ │ │ │ └── database.rst
│ │ │ ├── misc.rst
│ │ │ ├── simulation/
│ │ │ │ └── simulation.rst
│ │ │ └── socket/
│ │ │ └── socket.rst
│ │ ├── config_summary.rst
│ │ ├── install_python.rst
│ │ ├── new_pdk.rst
│ │ ├── pyoptsparse.rst
│ │ ├── setup.rst
│ │ └── tech_config/
│ │ ├── LICENSE
│ │ ├── layout/
│ │ │ └── layout.rst
│ │ ├── misc.rst
│ │ ├── mos/
│ │ │ └── mos.rst
│ │ └── tech_config.rst
│ └── tutorial/
│ ├── LICENSE
│ ├── figures/
│ │ └── LICENSE
│ └── tutorial.rst
├── run_scripts/
│ ├── LICENSE
│ ├── clean_cds_lib.py
│ ├── compile_verilog.il
│ ├── gen_cell.py
│ ├── generate_verilog.py
│ ├── meas_cell.py
│ ├── run_bag.sh
│ ├── setup_submodules.py
│ ├── sim_cell.py
│ ├── start_bag.il
│ ├── start_bag.sh
│ ├── start_bag_ICADV12d3.il
│ └── virt_server.sh
├── setup.py
└── tests/
├── LICENSE
├── __init__.py
└── layout/
├── LICENSE
├── __init__.py
└── routing/
├── LICENSE
├── __init__.py
└── test_fill.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
*~
*.pyc
.idea
build
dist
bag.egg-info
__pycache__
*.swp
================================================
FILE: .gitmodules
================================================
[submodule "cybag_oa"]
path = cybag_oa
url = https://github.com/ucb-art/cybag_oa.git
================================================
FILE: LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: README.md
================================================
Berkeley Analog Generator (BAG) version 2.0 and later.
BAG 2.0 is a complete rewrite of BAG 1.x (which is in pre-alpha stage and
never released publicly).
(Very outdated) Documentation and install instructions can be found at
A tutorial setup is available at
================================================
FILE: bag/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This is the bag root package.
"""
import signal
from . import math
from .math import float_to_si_string, si_string_to_float
from . import interface
from . import design
from . import data
from . import tech
from . import layout
from .core import BagProject, create_tech_info
__all__ = ['interface', 'design', 'data', 'math', 'tech', 'layout', 'BagProject',
'float_to_si_string', 'si_string_to_float', 'create_tech_info']
# make sure that SIGINT will always be catched by python.
signal.signal(signal.SIGINT, signal.default_int_handler)
================================================
FILE: bag/concurrent/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/concurrent/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package define helper classes used to perform concurrent operations.
"""
================================================
FILE: bag/concurrent/core.py
================================================
# -*- coding: utf-8 -*-
"""This module define utility classes for performing concurrent operations.
"""
from typing import Optional, Sequence, Dict, Union, Tuple, Callable, Any
import os
import asyncio
# noinspection PyProtectedMember
from asyncio.subprocess import Process
import subprocess
import multiprocessing
from concurrent.futures import CancelledError
def batch_async_task(coro_list):
"""Execute a list of coroutines or futures concurrently.
User may press Ctrl-C to cancel all given tasks.
Parameters
----------
coro_list :
a list of coroutines or futures to run concurrently.
Returns
-------
results :
a list of return values or raised exceptions of given tasks.
"""
top_future = asyncio.gather(*coro_list, return_exceptions=True)
loop = asyncio.get_event_loop()
try:
print('Running tasks, Press Ctrl-C to cancel.')
results = loop.run_until_complete(top_future)
except KeyboardInterrupt:
print('Ctrl-C detected, Cancelling tasks.')
top_future.cancel()
loop.run_forever()
results = None
return results
ProcInfo = Tuple[Union[str, Sequence[str]], str, Optional[Dict[str, str]], Optional[str]]
FlowInfo = Tuple[Union[str, Sequence[str]], str, Optional[Dict[str, str]], Optional[str],
Callable[[Optional[int], str], Any]]
class SubProcessManager(object):
"""A class that provides convenient methods to run multiple subprocesses in parallel using asyncio.
Parameters
----------
max_workers : Optional[int]
number of maximum allowed subprocesses. If None, defaults to system
CPU count.
cancel_timeout : Optional[float]
Number of seconds to wait for a process to terminate once SIGTERM or
SIGKILL is issued. Defaults to 10 seconds.
"""
def __init__(self, max_workers=None, cancel_timeout=10.0):
# type: (Optional[int], Optional[float]) -> None
if max_workers is None:
max_workers = multiprocessing.cpu_count()
if cancel_timeout is None:
cancel_timeout = 10.0
self._cancel_timeout = cancel_timeout
self._semaphore = asyncio.Semaphore(max_workers)
async def _kill_subprocess(self, proc: Optional[Process]) -> None:
"""Helper method; send SIGTERM/SIGKILL to a subprocess.
This method first sends SIGTERM to the subprocess. If the process hasn't terminated
after a given timeout, it sends SIGKILL.
Parameter
---------
proc : Optional[Process]
the process to attempt to terminate. If None, this method does nothing.
"""
if proc is not None:
if proc.returncode is None:
try:
proc.terminate()
try:
await asyncio.shield(asyncio.wait_for(proc.wait(), self._cancel_timeout))
except CancelledError:
pass
if proc.returncode is None:
proc.kill()
try:
await asyncio.shield(
asyncio.wait_for(proc.wait(), self._cancel_timeout))
except CancelledError:
pass
except ProcessLookupError:
pass
async def async_new_subprocess(self,
args: Union[str, Sequence[str]],
log: str,
env: Optional[Dict[str, str]] = None,
cwd: Optional[str] = None) -> Optional[int]:
"""A coroutine which starts a subprocess.
If this coroutine is cancelled, it will shut down the subprocess gracefully using
SIGTERM/SIGKILL, then raise CancelledError.
Parameters
----------
args : Union[str, Sequence[str]]
command to run, as string or sequence of strings.
log : str
the log file name.
env : Optional[Dict[str, str]]
an optional dictionary of environment variables. None to inherit from parent.
cwd : Optional[str]
the working directory. None to inherit from parent.
Returns
-------
retcode : Optional[int]
the return code of the subprocess.
"""
if isinstance(args, str):
args = [args]
# get log file name, make directory if necessary
log = os.path.abspath(log)
if os.path.isdir(log):
raise ValueError('log file %s is a directory.' % log)
os.makedirs(os.path.dirname(log), exist_ok=True)
async with self._semaphore:
proc = None
with open(log, 'w') as logf:
logf.write('command: %s\n' % (' '.join(args)))
logf.flush()
try:
proc = await asyncio.create_subprocess_exec(*args, stdout=logf,
stderr=subprocess.STDOUT,
env=env, cwd=cwd)
retcode = await proc.wait()
return retcode
except CancelledError as err:
await self._kill_subprocess(proc)
raise err
async def async_new_subprocess_flow(self,
proc_info_list: Sequence[FlowInfo]) -> Any:
"""A coroutine which runs a series of subprocesses.
If this coroutine is cancelled, it will shut down the current subprocess gracefully using
SIGTERM/SIGKILL, then raise CancelledError.
Parameters
----------
proc_info_list : Sequence[FlowInfo]
a list of processes to execute in series. Each element is a tuple of:
args : Union[str, Sequence[str]]
command to run, as string or list of string arguments.
log : str
log file name.
env : Optional[Dict[str, str]]
environment variable dictionary. None to inherit from parent.
cwd : Optional[str]
working directory path. None to inherit from parent.
vfun : Sequence[Callable[[Optional[int], str], Any]]
a function to validate if it is ok to execute the next process. The output of the
last function is returned. The first argument is the return code, the second
argument is the log file name.
Returns
-------
result : Any
the return value of the last validate function. None if validate function
returns False.
"""
num_proc = len(proc_info_list)
if num_proc == 0:
return None
async with self._semaphore:
for idx, (args, log, env, cwd, vfun) in enumerate(proc_info_list):
if isinstance(args, str):
args = [args]
# get log file name, make directory if necessary
log = os.path.abspath(log)
if os.path.isdir(log):
raise ValueError('log file %s is a directory.' % log)
os.makedirs(os.path.dirname(log), exist_ok=True)
proc, retcode = None, None
with open(log, 'w') as logf:
logf.write('command: %s\n' % (' '.join(args)))
logf.flush()
try:
proc = await asyncio.create_subprocess_exec(*args, stdout=logf,
stderr=subprocess.STDOUT,
env=env, cwd=cwd)
retcode = await proc.wait()
except CancelledError as err:
await self._kill_subprocess(proc)
raise err
fun_output = vfun(retcode, log)
if idx == num_proc - 1:
return fun_output
elif not fun_output:
return None
def batch_subprocess(self, proc_info_list):
# type: (Sequence[ProcInfo]) -> Optional[Sequence[Union[int, Exception]]]
"""Run all given subprocesses in parallel.
Parameters
----------
proc_info_list : Sequence[ProcInfo]
a list of process information. Each element is a tuple of:
args : Union[str, Sequence[str]]
command to run, as string or list of string arguments.
log : str
log file name.
env : Optional[Dict[str, str]]
environment variable dictionary. None to inherit from parent.
cwd : Optional[str]
working directory path. None to inherit from parent.
Returns
-------
results : Optional[Sequence[Union[int, Exception]]]
if user cancelled the subprocesses, None is returned. Otherwise, a list of
subprocess return codes or exceptions are returned.
"""
num_proc = len(proc_info_list)
if num_proc == 0:
return []
coro_list = [self.async_new_subprocess(args, log, env, cwd) for args, log, env, cwd in
proc_info_list]
return batch_async_task(coro_list)
def batch_subprocess_flow(self, proc_info_list):
# type: (Sequence[Sequence[FlowInfo]]) -> Optional[Sequence[Union[int, Exception]]]
"""Run all given subprocesses flow in parallel.
Parameters
----------
proc_info_list : Sequence[Sequence[FlowInfo]
a list of process flow information. Each element is a sequence of tuples of:
args : Union[str, Sequence[str]]
command to run, as string or list of string arguments.
log : str
log file name.
env : Optional[Dict[str, str]]
environment variable dictionary. None to inherit from parent.
cwd : Optional[str]
working directory path. None to inherit from parent.
vfun : Sequence[Callable[[Optional[int], str], Any]]
a function to validate if it is ok to execute the next process. The output of the
last function is returned. The first argument is the return code, the second
argument is the log file name.
Returns
-------
results : Optional[Sequence[Any]]
if user cancelled the subprocess flows, None is returned. Otherwise, a list of
flow return values or exceptions are returned.
"""
num_proc = len(proc_info_list)
if num_proc == 0:
return []
coro_list = [self.async_new_subprocess_flow(flow_info) for flow_info in proc_info_list]
return batch_async_task(coro_list)
================================================
FILE: bag/core.py
================================================
# -*- coding: utf-8 -*-
"""This is the core bag module.
"""
from typing import TYPE_CHECKING, Dict, Any, Tuple, Optional, Union, Type, Sequence, TypeVar
import os
import importlib
import cProfile
import pstats
from pathlib import Path
# noinspection PyPackageRequirements
from .interface import ZMQDealer
from .interface.database import DbAccess
from .design import ModuleDB, SchInstance
from .layout.routing import RoutingGrid
from .layout.template import TemplateDB
from .layout.core import DummyTechInfo
from .io import read_file, sim_data, read_yaml_env
from .concurrent.core import batch_async_task
if TYPE_CHECKING:
from .interface.simulator import SimAccess
from .layout.template import TemplateBase
from .layout.core import TechInfo
from .design.module import Module
from .simulation.core_v2 import TestbenchManager, MeasurementManager
ModuleType = TypeVar('ModuleType', bound=Module)
TemplateType = TypeVar('TemplateType', bound=TemplateBase)
def _get_config_file_abspath(fname):
"""Get absolute path of configuration file using BAG_WORK_DIR environment variable."""
fname = os.path.basename(fname)
if 'BAG_WORK_DIR' not in os.environ:
raise ValueError('Environment variable BAG_WORK_DIR not defined')
work_dir = os.environ['BAG_WORK_DIR']
if not os.path.isdir(work_dir):
raise ValueError('$BAG_WORK_DIR = %s is not a directory' % work_dir)
# read port number
fname = os.path.join(work_dir, fname)
if not os.path.isfile(fname):
raise ValueError('Cannot find file: %s' % fname)
return fname
def _get_port_number(port_file):
# type: (str) -> Tuple[Optional[int], str]
"""Read the port number from the given port file.
Parameters
----------
port_file : str
a file containing the communication port number.
Returns
-------
port : Optional[int]
the port number if reading is successful.
msg : str
Empty string on success, the error message on failure.
"""
try:
port_file = _get_config_file_abspath(port_file)
except ValueError as err:
return None, str(err)
port = int(read_file(port_file))
return port, ''
def _import_class_from_str(class_str):
# type: (str) -> Type
"""Given a Python class string, convert it to the Python class.
Parameters
----------
class_str : str
a Python class string/
Returns
-------
py_class : class
a Python class.
"""
sections = class_str.split('.')
module_str = '.'.join(sections[:-1])
class_str = sections[-1]
modul = importlib.import_module(module_str)
return getattr(modul, class_str)
class Testbench(object):
"""A class that represents a testbench instance.
Parameters
----------
sim : :class:`bag.interface.simulator.SimAccess`
The SimAccess instance used to issue simulation commands.
db : :class:`bag.interface.database.DbAccess`
The DbAccess instance used to update testbench schematic.
lib : str
testbench library.
cell : str
testbench cell.
parameters : Dict[str, str]
the simulation parameter dictionary. The values are string representation
of actual parameter values.
env_list : Sequence[str]
list of defined simulation environments.
default_envs : Sequence[str]
the selected simulation environments.
outputs : Dict[str, str]
default output expressions
Attributes
----------
lib : str
testbench library.
cell : str
testbench cell.
save_dir : str
directory containing the last simulation data.
"""
def __init__(self, # type: Testbench
sim, # type: SimAccess
db, # type: DbAccess
lib, # type: str
cell, # type: str
parameters, # type: Dict[str, str]
env_list, # type: Sequence[str]
default_envs, # type: Sequence[str]
outputs, # type: Dict[str, str]
):
# type: (...) -> None
"""Create a new testbench instance.
"""
self.sim = sim
self.db = db
self.lib = lib
self.cell = cell
self.parameters = parameters
self.env_parameters = {}
self.env_list = env_list
self.sim_envs = default_envs
self.config_rules = {}
self.outputs = outputs
self.save_dir = None
def get_defined_simulation_environments(self):
# type: () -> Sequence[str]
"""Return a list of defined simulation environments"""
return self.env_list
def get_current_simulation_environments(self):
# type: () -> Sequence[str]
"""Returns a list of simulation environments this testbench will simulate."""
return self.sim_envs
def add_output(self, var, expr):
# type: (str, str) -> None
"""Add an output expression to be recorded and exported back to python.
Parameters
----------
var : str
output variable name.
expr : str
the output expression.
"""
if var in sim_data.illegal_var_name:
raise ValueError('Variable name %s is illegal.' % var)
self.outputs[var] = expr
def set_parameter(self, name, val, precision=6):
# type: (str, Union[int, float], int) -> None
"""Sets the value of the given simulation parameter.
Parameters
----------
name : str
parameter name.
val : Union[int, float]
parameter value
precision : int
the parameter value will be rounded to this precision.
"""
param_config = dict(type='single', value=val)
if isinstance(val, str):
self.parameters[name] = val
else:
self.parameters[name] = self.sim.format_parameter_value(param_config, precision)
def set_env_parameter(self, name, val_list, precision=6):
# type: (str, Sequence[float], int) -> None
"""Configure the given parameter to have different value across simulation environments.
Parameters
----------
name : str
the parameter name.
val_list : Sequence[float]
the parameter values for each simulation environment. the order of the simulation
environments can be found in self.sim_envs
precision : int
the parameter value will be rounded to this precision.
"""
if len(self.sim_envs) != len(val_list):
raise ValueError('env parameter must have %d values.' % len(self.sim_envs))
default_val = None
for env, val in zip(self.sim_envs, val_list):
if env not in self.env_parameters:
cur_dict = {}
self.env_parameters[env] = cur_dict
else:
cur_dict = self.env_parameters[env]
param_config = dict(type='single', value=val)
cur_val = self.sim.format_parameter_value(param_config, precision)
if default_val is None:
default_val = cur_val
cur_dict[name] = self.sim.format_parameter_value(param_config, precision)
self.parameters[name] = default_val
def set_sweep_parameter(self, name, precision=6, **kwargs):
# type: (str, int, **Any) -> None
"""Set to sweep the given parameter.
To set the sweep values directly:
tb.set_sweep_parameter('var', values=[1.0, 5.0, 10.0])
To set a linear sweep with start/stop/step (inclusive start and stop):
tb.set_sweep_parameter('var', start=1.0, stop=9.0, step=4.0)
To set a logarithmic sweep with points per decade (inclusive start and stop):
tb.set_sweep_parameter('var', start=1.0, stop=10.0, num_decade=3)
Parameters
----------
name : str
parameter name.
precision : int
the parameter value will be rounded to this precision.
**kwargs : Any
the sweep parameters. Refer to the above for example calls.
"""
if 'values' in kwargs:
param_config = dict(type='list', values=kwargs['values'])
elif 'start' in kwargs and 'stop' in kwargs:
start = kwargs['start']
stop = kwargs['stop']
if 'step' in kwargs:
step = kwargs['step']
param_config = dict(type='linstep', start=start, stop=stop, step=step)
elif 'num_decade' in kwargs:
num = kwargs['num_decade']
param_config = dict(type='decade', start=start, stop=stop, num=num)
else:
raise Exception('Unsupported sweep arguments: %s' % kwargs)
else:
raise Exception('Unsupported sweep arguments: %s' % kwargs)
self.parameters[name] = self.sim.format_parameter_value(param_config, precision)
def set_simulation_environments(self, env_list):
# type: (Sequence[str]) -> None
"""Enable the given list of simulation environments.
If more than one simulation environment is specified, then a sweep
will be performed.
Parameters
----------
env_list : Sequence[str]
"""
self.sim_envs = env_list
def set_simulation_view(self, lib_name, cell_name, sim_view):
# type: (str, str, str) -> None
"""Set the simulation view of the given design.
For simulation, each design may have multiple views, such as schematic,
veriloga, extracted, etc. This method lets you choose which view to
use for netlisting. the given design can be the top level design or
an intermediate instance.
Parameters
----------
lib_name : str
design library name.
cell_name : str
design cell name.
sim_view : str
the view to simulate with.
"""
key = '%s__%s' % (lib_name, cell_name)
self.config_rules[key] = sim_view
def update_testbench(self):
# type: () -> None
"""Commit the testbench changes to the CAD database.
"""
config_list = []
for key, view in self.config_rules.items():
lib, cell = key.split('__')
config_list.append([lib, cell, view])
env_params = []
for env in self.sim_envs:
if env in self.env_parameters:
val_table = self.env_parameters[env]
env_params.append(list(val_table.items()))
self.db.update_testbench(self.lib, self.cell, self.parameters, self.sim_envs, config_list,
env_params)
def run_simulation(self, precision=6, sim_tag=None):
# type: (int, Optional[str]) -> Optional[str]
"""Run simulation.
Parameters
----------
precision : int
the floating point number precision.
sim_tag : Optional[str]
optional description for this simulation run.
Returns
-------
value : Optional[str]
the save directory path. If simulation is cancelled, return None.
"""
coro = self.async_run_simulation(precision=precision, sim_tag=sim_tag)
batch_async_task([coro])
return self.save_dir
def load_sim_results(self, hist_name, precision=6):
# type: (str, int) -> Optional[str]
"""Load previous simulation data.
Parameters
----------
hist_name : str
the simulation history name.
precision : int
the floating point number precision.
Returns
-------
value : Optional[str]
the save directory path. If result loading is cancelled, return None.
"""
coro = self.async_load_results(hist_name, precision=precision)
batch_async_task([coro])
return self.save_dir
async def async_run_simulation(self,
precision: int = 6,
sim_tag: Optional[str] = None) -> str:
"""A coroutine that runs the simulation.
Parameters
----------
precision : int
the floating point number precision.
sim_tag : Optional[str]
optional description for this simulation run.
Returns
-------
value : str
the save directory path.
"""
self.save_dir = None
self.save_dir = await self.sim.async_run_simulation(self.lib, self.cell, self.outputs,
precision=precision, sim_tag=sim_tag)
return self.save_dir
async def async_load_results(self, hist_name: str, precision: int = 6) -> str:
"""A coroutine that loads previous simulation data.
Parameters
----------
hist_name : str
the simulation history name.
precision : int
the floating point number precision.
Returns
-------
value : str
the save directory path.
"""
self.save_dir = None
self.save_dir = await self.sim.async_load_results(self.lib, self.cell, hist_name,
self.outputs, precision=precision)
return self.save_dir
def create_tech_info(bag_config_path=None):
# type: (Optional[str]) -> TechInfo
"""Create TechInfo object."""
if bag_config_path is None:
if 'BAG_CONFIG_PATH' not in os.environ:
raise Exception('BAG_CONFIG_PATH not defined.')
bag_config_path = os.environ['BAG_CONFIG_PATH']
bag_config = read_yaml_env(bag_config_path)
tech_params = read_yaml_env(bag_config['tech_config_path'])
if 'class' in tech_params:
tech_cls = _import_class_from_str(tech_params['class'])
tech_info = tech_cls(tech_params)
else:
# just make a default tech_info object as place holder.
print('*WARNING*: No TechInfo class defined. Using a dummy version.')
tech_info = DummyTechInfo(tech_params)
return tech_info
class BagProject(object):
"""The main bag controller class.
This class mainly stores all the user configurations, and issue
high level bag commands.
Parameters
----------
bag_config_path : Optional[str]
the bag configuration file path. If None, will attempt to read from
environment variable BAG_CONFIG_PATH.
port : Optional[int]
the BAG server process port number. If not given, will read from port file.
Attributes
----------
bag_config : Dict[str, Any]
the BAG configuration parameters dictionary.
tech_info : bag.layout.core.TechInfo
the BAG process technology class.
"""
def __init__(self, bag_config_path=None, port=None):
# type: (Optional[str], Optional[int]) -> None
if bag_config_path is None:
if 'BAG_CONFIG_PATH' not in os.environ:
raise Exception('BAG_CONFIG_PATH not defined.')
bag_config_path = os.environ['BAG_CONFIG_PATH']
self.bag_config = read_yaml_env(bag_config_path)
bag_tmp_dir = os.environ.get('BAG_TEMP_DIR', None)
# get port files
if port is None:
socket_config = self.bag_config['socket']
if 'port_file' in socket_config:
port, msg = _get_port_number(socket_config['port_file'])
if msg:
print('*WARNING* %s' % msg)
# create ZMQDealer object
dealer_kwargs = {}
dealer_kwargs.update(self.bag_config['socket'])
del dealer_kwargs['port_file']
# create TechInfo instance
self.tech_info = create_tech_info(bag_config_path=bag_config_path)
# create design module database.
try:
lib_defs_file = _get_config_file_abspath(self.bag_config['lib_defs'])
except ValueError:
lib_defs_file = ''
sch_exc_libs = self.bag_config['database']['schematic']['exclude_libraries']
self.dsn_db = ModuleDB(lib_defs_file, self.tech_info, sch_exc_libs, prj=self)
if port is not None:
# make DbAccess instance.
dealer = ZMQDealer(port, **dealer_kwargs)
db_cls = _import_class_from_str(self.bag_config['database']['class'])
self.impl_db = db_cls(dealer, bag_tmp_dir, self.bag_config['database'])
self._default_lib_path = self.impl_db.default_lib_path
else:
self.impl_db = None # type: Optional[DbAccess]
self._default_lib_path = DbAccess.get_default_lib_path(self.bag_config['database'])
# make SimAccess instance.
sim_cls = _import_class_from_str(self.bag_config['simulation']['class'])
self.sim = sim_cls(bag_tmp_dir, self.bag_config['simulation']) # type: SimAccess
@property
def default_lib_path(self):
# type: () -> str
return self._default_lib_path
def close_bag_server(self):
# type: () -> None
"""Close the BAG database server."""
if self.impl_db is not None:
self.impl_db.close()
self.impl_db = None
def close_sim_server(self):
# type: () -> None
"""Close the BAG simulation server."""
if self.sim is not None:
self.sim.close()
self.sim = None
def import_design_library(self, lib_name):
# type: (str) -> None
"""Import all design templates in the given library from CAD database.
Parameters
----------
lib_name : str
name of the library.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
new_lib_path = self.bag_config['new_lib_path']
self.impl_db.import_design_library(lib_name, self.dsn_db, new_lib_path)
def import_sch_cellview(self, lib_name: str, cell_name: str) -> None:
"""Import the given schematic and symbol template into Python.
This import process is done recursively.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
new_lib_path = self.bag_config['new_lib_path']
self.impl_db.import_sch_cellview(lib_name, cell_name, self.dsn_db, new_lib_path)
def get_cells_in_library(self, lib_name):
# type: (str) -> Sequence[str]
"""Get a list of cells in the given library.
Returns an empty list if the given library does not exist.
Parameters
----------
lib_name : str
the library name.
Returns
-------
cell_list : Sequence[str]
a list of cells in the library
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
return self.impl_db.get_cells_in_library(lib_name)
def make_template_db(self, impl_lib, grid_specs, use_cybagoa=True, gds_lay_file='',
cache_dir=''):
# type: (str, Dict[str, Any], bool, str, str) -> TemplateDB
"""Create and return a new TemplateDB instance.
Parameters
----------
impl_lib : str
the library name to put generated layouts in.
grid_specs : Dict[str, Any]
the routing grid specification dictionary.
use_cybagoa : bool
True to enable cybagoa acceleration if available.
gds_lay_file : str
the GDS layout information file.
cache_dir : str
the cache directory name.
"""
layers = grid_specs['layers']
widths = grid_specs['widths']
spaces = grid_specs['spaces']
bot_dir = grid_specs['bot_dir']
width_override = grid_specs.get('width_override', None)
routing_grid = RoutingGrid(self.tech_info, layers, spaces, widths, bot_dir,
width_override=width_override)
tdb = TemplateDB('template_libs.def', routing_grid, impl_lib, use_cybagoa=use_cybagoa,
gds_lay_file=gds_lay_file, cache_dir=cache_dir, prj=self)
return tdb
def generate_cell(self, # type: BagProject
specs, # type: Dict[str, Any]
temp_cls=None, # type: Optional[Type[TemplateType]]
gen_lay=True, # type: bool
gen_sch=False, # type: bool
run_lvs=False, # type: bool
run_rcx=False, # type: bool
use_cybagoa=True, # type: bool
debug=False, # type: bool
profile_fname='', # type: str
use_cache=False, # type: bool
save_cache=False, # type: bool
**kwargs,
):
# type: (...) -> Optional[Union[pstats.Stats, Dict[str, Any]]]
"""Generate layout/schematic of a given cell from specification file.
Parameters
----------
specs : Dict[str, Any]
the specification dictionary.
temp_cls : Optional[Type[TemplateType]]
the TemplateBase subclass to instantiate
if not provided, it will be imported from lay_class entry in specs dictionary.
gen_lay : bool
True to generate layout.
gen_sch : bool
True to generate schematics.
run_lvs : bool
True to run LVS.
run_rcx : bool
True to run RCX.
use_cybagoa : bool
True to enable cybagoa acceleration if available.
debug : bool
True to print debug messages.
profile_fname : str
If not empty, profile layout generation, and save statistics to this file.
use_cache : bool
True to use cached layouts.
save_cache : bool
True to save instances in this template to cache.
**kwargs :
Additional optional arguments.
Returns
-------
result: Optional[Union[pstats.Stats, Dict[str, Any]]]
If profiling is enabled, result will be the statistics object.
If the last thing done is layout or schematic, result will contain sch_params
If the last thing done is lvs, in case of failure result will
contain lvs log file in a dictionary, otherwise None
If the last thing done is rcx, in case of failure result will
contain rcx log file in a dictionary, otherwise None
"""
prefix = kwargs.get('prefix', '')
suffix = kwargs.get('suffix', '')
grid_specs = specs['routing_grid']
impl_lib = specs['impl_lib']
impl_cell = specs['impl_cell']
lay_str = specs.get('lay_class', '')
sch_lib = specs.get('sch_lib', '')
sch_cell = specs.get('sch_cell', '')
params = specs['params']
gds_lay_file = specs.get('gds_lay_file', '')
cache_dir = specs.get('cache_dir', '')
if temp_cls is None and lay_str:
temp_cls = _import_class_from_str(lay_str)
has_lay = temp_cls is not None
if gen_lay and not has_lay:
raise ValueError('layout_class is not specified')
if use_cache:
db_cache_dir = specs.get('cache_dir', '')
else:
db_cache_dir = ''
result_pstat = None
if has_lay:
temp_db = self.make_template_db(impl_lib, grid_specs, use_cybagoa=use_cybagoa,
gds_lay_file=gds_lay_file, cache_dir=db_cache_dir)
name_list = [impl_cell]
print('computing layout...')
if profile_fname:
profiler = cProfile.Profile()
profiler.runcall(temp_db.new_template, params=params, temp_cls=temp_cls,
debug=False)
profiler.dump_stats(profile_fname)
result_pstat = pstats.Stats(profile_fname).strip_dirs()
temp = temp_db.new_template(params=params, temp_cls=temp_cls, debug=debug)
print('computation done.')
temp_list = [temp]
if save_cache and cache_dir:
master_list = [inst.master for inst in temp.instance_iter()]
print('saving layouts to cache...')
temp_db.save_to_cache(master_list, cache_dir, debug=debug)
print('saving done.')
if gen_lay:
print('creating layout...')
temp_db.batch_layout(self, temp_list, name_list, debug=debug)
print('layout done.')
sch_params = temp.sch_params
else:
sch_params = params
if gen_sch:
dsn = self.create_design_module(lib_name=sch_lib, cell_name=sch_cell)
print('computing schematic...')
dsn.design(**sch_params)
print('creating schematic...')
dsn.implement_design(impl_lib, top_cell_name=impl_cell, prefix=prefix,
suffix=suffix)
print('schematic done.')
result = sch_params
lvs_passed = False
if run_lvs:
print('running lvs...')
lvs_passed, lvs_log = self.run_lvs(impl_lib, impl_cell, gds_lay_file=gds_lay_file)
if lvs_passed:
print('LVS passed!')
result = dict(log='')
else:
raise ValueError(f'LVS failed, lvs_log: {lvs_log}')
if run_rcx and ((run_lvs and lvs_passed) or not run_lvs):
print('running rcx...')
rcx_passed, rcx_log = self.run_rcx(impl_lib, impl_cell)
if rcx_passed:
print('RCX passed!')
result = dict(log='')
else:
raise ValueError(f'RCX failed, rcx_log: {rcx_log}')
if result_pstat:
return result_pstat
return result
def replace_dut_in_wrapper(self, params: Dict[str, Any], dut_lib: str,
dut_cell: str) -> None:
# helper function that replaces dut_lib and dut_cell in the wrapper recursively base on
# dut_params
dut_params = params.get('dut_params', None)
if dut_params is None:
params['dut_lib'] = dut_lib
params['dut_cell'] = dut_cell
return
return self.replace_dut_in_wrapper(dut_params, dut_lib, dut_cell)
def simulate_cell(self,
specs: Dict[str, Any],
gen_cell: bool = True,
gen_wrapper: bool = True,
gen_tb: bool = True,
load_results: bool = False,
extract: bool = False,
run_sim: bool = True) -> Optional[Dict[str, Any]]:
"""
Runs a minimum executable parts of the Testbench Manager flow selectively according to
a spec dictionary.
For example you can set the flags to generate a new cell, but since wrapper and test bench
exist, maybe you want to skip those, and run the simulation in the end. Maybe you
already created the cell all the way up to test bench level, and now you only need to
run simulation.
This function only works with Testbench Managers written in format of
simulation.core_v2.TestbenchManager
Parameters
----------
specs:
Dictionary of specifications
Some non-obvious conventions:
- if contains tbm_specs keyword, simulation is ran through testbench manager v2,
otherwise there should be a sim_params entry that specifies the simulation.
- Wrapper is assumed to be in the specs dictionary, if it is generated outside of
this function, gen_wrapper should be False.
gen_cell:
True to call generate_cell on specs
gen_wrapper:
True to generate Wrapper. Currently only one top-level wrapper is supported.
gen_tb:
True to generate test bench. If test bench is created, this flag can be set to False.
load_results:
True to skip simulation and load the results.
extract:
False to skip layout generation and only simulate schematic
run_sim:
True to run simulation. If the purpose of calling this function is just to generate
some part of simulation flow to debug, this flag can be set to False.
Returns
-------
results: Optional[Dict[str, Any]]
if run_sim/load_results = True, contains simulations results, otherwise it's None.
"""
impl_lib = specs['impl_lib']
impl_cell = specs['impl_cell']
root_dir = Path(specs['root_dir'])
if gen_cell and not load_results:
print('generating cell ...')
self.generate_cell(specs,
gen_lay=extract,
gen_sch=True,
run_lvs=extract,
run_rcx=extract,
use_cybagoa=True)
print('cell generated.')
# if testbench manager v2 found use that instead of interpreting simulation directly
tbm_specs = specs.get('tbm_specs', None)
if tbm_specs:
tbm_cls_str = tbm_specs['tbm_cls']
tbm_cls = _import_class_from_str(tbm_cls_str)
tbm: TestbenchManager = tbm_cls(root_dir)
sim_view_list = tbm_specs.get('sim_view_list', [])
if not sim_view_list:
view_name = 'netlist' if extract else 'schematic'
sim_view_list.append((impl_cell, view_name))
sim_envs = tbm_specs['sim_envs']
if load_results:
return tbm.load_results(impl_cell, tbm_specs)
results = tbm.simulate(bprj=self,
impl_lib=impl_lib,
impl_cell=impl_cell,
sim_view_list=sim_view_list,
env_list=sim_envs,
tb_dict=tbm_specs,
wrapper_dict=None,
gen_tb=gen_tb,
gen_wrapper=gen_wrapper,
run_sim=run_sim)
return results
sim_params = specs.get('sim_params', None)
wrapper = sim_params.get('wrapper', None)
has_wrapper = wrapper is not None
if gen_wrapper and not has_wrapper:
raise ValueError('must provide a wrapper in sim_params')
wrapper_lib = wrapper_cell = wrapped_cell = wrapper_params = None
if has_wrapper:
wrapper_lib = wrapper['wrapper_lib']
wrapper_cell = wrapper['wrapper_cell']
wrapper_params = wrapper.get('params', {})
wrapper_suffix = wrapper.get('wrapper_suffix', '')
if not wrapper_suffix:
wrapper_suffix = f'{wrapper_cell}'
wrapped_cell = f'{impl_cell}_{wrapper_suffix}'
if gen_wrapper and not gen_tb:
raise ValueError('generated a new wrapper, therefore gen_tb should also be true')
tb_lib = sim_params['tb_lib']
tb_cell = sim_params['tb_cell']
tb_params = sim_params.get('tb_params', {})
tb_suffix = sim_params.get('tb_suffix', '')
if not tb_suffix:
tb_suffix = f'{tb_cell}'
tb_name = f'{impl_cell}_{tb_suffix}'
tb_fname = root_dir / Path(tb_name, f'{tb_name}.hdf5')
if load_results:
print("loading results ...")
if tb_fname.exists():
return sim_data.load_sim_file(tb_fname)
raise ValueError(f'simulation results does not exist in {str(tb_fname)}')
if gen_wrapper and has_wrapper:
print('generating wrapper ...')
master = self.create_design_module(lib_name=wrapper_lib, cell_name=wrapper_cell)
self.replace_dut_in_wrapper(wrapper_params, impl_lib, impl_cell)
master.design(**wrapper_params)
master.implement_design(impl_lib, wrapped_cell)
print('wrapper generated.')
if gen_tb:
print('generating testbench ...')
tb_master = self.create_design_module(tb_lib, tb_cell)
dut_cell = wrapped_cell if has_wrapper else impl_cell
tb_master.design(dut_lib=impl_lib, dut_cell=dut_cell, **tb_params)
tb_master.implement_design(impl_lib, tb_name)
print('testbench generated.')
if run_sim:
print('setting up ADEXL ...')
sim_view_list = sim_params.get('sim_view_list', [])
if not sim_view_list:
view_name = 'netlist' if extract else 'schematic'
sim_view_list.append((impl_cell, view_name))
sim_envs = sim_params['sim_envs']
sim_swp_params = sim_params.get('sim_swp_params', {})
sim_vars = sim_params.get('sim_vars', {})
sim_outputs = sim_params.get('sim_outputs', {})
tb = self.configure_testbench(impl_lib, tb_name)
# set simulation variables
for key, val in sim_vars.items():
tb.set_parameter(key, val)
# set sweep parameters
for key, val in sim_swp_params.items():
tb.set_sweep_parameter(key, **val)
# set the simulation outputs
for key, val in sim_outputs.items():
tb.add_output(key, val)
# change the view_name (netlist or schematic)
for cell, view in sim_view_list:
tb.set_simulation_view(impl_lib, cell, view)
tb.set_simulation_environments(sim_envs)
tb.update_testbench()
print('setup completed.')
print('running simulation ...')
tb.run_simulation()
print('simulation done.')
print('loading results ...')
results = sim_data.load_sim_results(tb.save_dir)
if not results.get('sweep_params', {}):
raise ValueError(f'results are empty, either you forgot to specify outputs, or '
f'simulation failed. check sim_log: {tb.save_dir}/ocn_output.log')
print('results loaded.')
print('saving results into hdf5')
sim_data.save_sim_results(results, tb_fname)
print('results saved.')
return results
def measure_cell(self,
specs: Dict[str, Any],
gen_cell: bool = True,
gen_wrapper: bool = True,
gen_tb: bool = True,
load_results: bool = False,
extract: bool = False,
run_sims: bool = True) -> Optional[Dict[str, Any]]:
"""
Runs a minimum executable parts of the Measurement Manager flow selectively according to
a spec dictionary.
For example you can set the flags to generate a new cell, but since wrapper and test bench
exist, maybe you want to skip those, and run the measurement in the end. Maybe you
already created the cell all the way up to test bench level, and now you only need to
run simulation.
This function only works with Measurement Managers written in format of
simulation.core_v2.MeasurementManager
Parameters
----------
specs:
Dictionary of specifications
Some non-obvious conventions:
- if contains tbm_specs keyword, simulation is ran through testbench manager v2,
otherwise there should be a sim_params entry that specifies the simulation.
- Wrapper is assumed to be in the specs dictionary, if it is generated outside of
this function, gen_wrapper should be False.
gen_cell:
True to call generate_cell on specs
gen_wrapper:
True to generate Wrapper. Currently only one top-level wrapper is supported.
gen_tb:
True to generate test bench. If test bench is created, this flag can be set to False.
load_results:
True to skip simulation and load the results.
extract:
False to skip layout generation and only simulate schematic
run_sims:
True to run simulations. If the purpose of calling this function is just to generate
some part of simulation flow to debug, this flag can be set to False.
Returns
-------
results: Optional[Dict[str, Any]]
if run_sim/load_results = True, contains measurement results, otherwise it's None.
"""
impl_lib = specs['impl_lib']
impl_cell = specs['impl_cell']
root_dir = Path(specs['root_dir'])
if gen_cell and not load_results:
print('generating cell ...')
self.generate_cell(specs,
gen_lay=extract,
gen_sch=True,
run_lvs=extract,
run_rcx=extract,
use_cybagoa=True)
print('cell generated.')
mm_specs = specs['mm_specs']
mm_cls_str = mm_specs['mm_cls']
mm_cls = _import_class_from_str(mm_cls_str)
mm: MeasurementManager = mm_cls(root_dir, mm_specs)
return mm.measure(self, impl_lib, impl_cell, load_results=load_results,
gen_wrapper=gen_wrapper, gen_tb=gen_tb, run_sims=run_sims,
extract=extract)
def create_library(self, lib_name, lib_path=''):
# type: (str, str) -> None
"""Create a new library if one does not exist yet.
Parameters
----------
lib_name : str
the library name.
lib_path : str
directory to create the library in. If Empty, use default location.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
return self.impl_db.create_library(lib_name, lib_path=lib_path)
# noinspection PyUnusedLocal
def create_design_module(self, lib_name, cell_name, **kwargs):
# type: (str, str, **Any) -> SchInstance
"""Create a new top level design module for the given schematic template
Parameters
----------
lib_name : str
the library name.
cell_name : str
the cell name.
**kwargs : Any
optional parameters.
Returns
-------
dsn : SchInstance
a configurable schematic instance of the given schematic generator.
"""
return SchInstance(self.dsn_db, lib_name, cell_name, 'XTOP', static=False)
def new_schematic_instance(self, lib_name='', cell_name='', params=None, sch_cls=None,
debug=False, **kwargs):
# type: (str, str, Dict[str, Any], Type[ModuleType], bool, **Any) -> SchInstance
"""Create a new schematic instance
This method is the schematic equivalent of TemplateDB's new_template() method.
By default, we assume the design() function is used to set the schematic parameters.
If you use another function (such as design_specs()), then you should specify
an optional parameter design_fun equal to the name of that function.
Parameters
----------
lib_name : str
schematic library name.
cell_name : str
schematic name
params : Dict[str, Any]
the parameter dictionary.
sch_cls : Type[TemplateType]
the schematic generator class to instantiate.
debug : bool
True to print debug messages.
**kwargs : Any
optional parameters.
Returns
-------
dsn : SchInstance
a schematic instance of the given schematic generator.
"""
design_fun = kwargs.get('design_fun', 'design')
master = self.dsn_db.new_master(lib_name, cell_name, gen_cls=sch_cls, params=params,
debug=debug, design_args=None, design_fun=design_fun)
return SchInstance(self.dsn_db, lib_name, cell_name, 'XTOP', static=False,
master=master)
def clear_schematic_database(self):
# type: () -> None
"""Reset schematic database."""
self.dsn_db.clear()
def instantiate_schematic(self, lib_name, content_list, lib_path=''):
# type: (str, Sequence[Any], str) -> None
"""Create the given schematic contents in CAD database.
NOTE: this is BAG's internal method. TO create schematics, call batch_schematic() instead.
Parameters
----------
lib_name : str
name of the new library to put the schematic instances.
content_list : Sequence[Any]
list of schematics to create.
lib_path : str
the path to create the library in. If empty, use default location.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
self.impl_db.instantiate_schematic(lib_name, content_list, lib_path=lib_path)
def batch_schematic(self, # type: BagProject
lib_name, # type: str
sch_inst_list, # type: Sequence[SchInstance]
name_list=None, # type: Optional[Sequence[Optional[str]]]
prefix='', # type: str
suffix='', # type: str
debug=False, # type: bool
rename_dict=None, # type: Optional[Dict[str, str]]
):
# type: (...) -> None
"""create all the given schematics in CAD database.
Parameters
----------
lib_name : str
name of the new library to put the schematic instances.
sch_inst_list : Sequence[SchInstance]
list of SchInstance objects.
name_list : Optional[Sequence[Optional[str]]]
list of master cell names. If not given, default names will be used.
prefix : str
prefix to add to cell names.
suffix : str
suffix to add to cell names.
debug : bool
True to print debugging messages
rename_dict : Optional[Dict[str, str]]
optional master cell renaming dictionary.
"""
master_list = [inst.master for inst in sch_inst_list]
self.dsn_db.cell_prefix = prefix
self.dsn_db.cell_suffix = suffix
self.dsn_db.instantiate_masters(master_list, name_list=name_list, lib_name=lib_name,
debug=debug, rename_dict=rename_dict)
def configure_testbench(self, tb_lib, tb_cell):
# type: (str, str) -> Testbench
"""Update testbench state for the given testbench.
This method fill in process-specific information for the given testbench, then returns
a testbench object which you can use to control simulation.
Parameters
----------
tb_lib : str
testbench library name.
tb_cell : str
testbench cell name.
Returns
-------
tb : :class:`bag.core.Testbench`
the :class:`~bag.core.Testbench` instance.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
if self.sim is None:
raise Exception('SimAccess is not set up.')
c, clist, params, outputs = self.impl_db.configure_testbench(tb_lib, tb_cell)
return Testbench(self.sim, self.impl_db, tb_lib, tb_cell, params, clist, [c], outputs)
def load_testbench(self, tb_lib, tb_cell):
# type: (str, str) -> Testbench
"""Loads a testbench from the database.
Parameters
----------
tb_lib : str
testbench library name.
tb_cell : str
testbench cell name.
Returns
-------
tb : :class:`bag.core.Testbench`
the :class:`~bag.core.Testbench` instance.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
if self.sim is None:
raise Exception('SimAccess is not set up.')
cur_envs, all_envs, params, outputs = self.impl_db.get_testbench_info(tb_lib, tb_cell)
return Testbench(self.sim, self.impl_db, tb_lib, tb_cell, params, all_envs,
cur_envs, outputs)
def instantiate_layout_pcell(self, lib_name, cell_name, inst_lib, inst_cell, params,
pin_mapping=None, view_name='layout'):
# type: (str, str, str, str, Dict[str, Any], Optional[Dict[str, str]], str) -> None
"""Create a layout cell with a single pcell instance.
Parameters
----------
lib_name : str
layout library name.
cell_name : str
layout cell name.
inst_lib : str
pcell library name.
inst_cell : str
pcell cell name.
params : Dict[str, Any]
the parameter dictionary.
pin_mapping: Optional[Dict[str, str]]
the pin renaming dictionary.
view_name : str
layout view name, default is "layout".
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
pin_mapping = pin_mapping or {}
self.impl_db.instantiate_layout_pcell(lib_name, cell_name, view_name,
inst_lib, inst_cell, params, pin_mapping)
def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):
# type: (str, str, str, Sequence[Any]) -> None
"""Create a batch of layouts.
Parameters
----------
lib_name : str
layout library name.
view_name : str
layout view name.
via_tech : str
via technology name.
layout_list : Sequence[Any]
a list of layouts to create
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
self.impl_db.instantiate_layout(lib_name, view_name, via_tech, layout_list)
def release_write_locks(self, lib_name, cell_view_list):
# type: (str, Sequence[Tuple[str, str]]) -> None
"""Release write locks from all the given cells.
Parameters
----------
lib_name : str
the library name.
cell_view_list : Sequence[Tuple[str, str]]
list of cell/view name tuples.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
self.impl_db.release_write_locks(lib_name, cell_view_list)
def run_lvs(self, # type: BagProject
lib_name, # type: str
cell_name, # type: str
**kwargs
):
# type: (...) -> Tuple[bool, str]
"""Run LVS on the given cell.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell_name
**kwargs :
optional keyword arguments. See DbAccess class for details.
Returns
-------
value : bool
True if LVS succeeds
log_fname : str
name of the LVS log file.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
coro = self.impl_db.async_run_lvs(lib_name, cell_name, **kwargs)
results = batch_async_task([coro])
if results is None or isinstance(results[0], Exception):
return False, ''
return results[0]
def run_rcx(self, # type: BagProject
lib_name, # type: str
cell_name, # type: str
**kwargs
):
# type: (...) -> Tuple[Union[bool, Optional[str]], str]
"""Run RCX on the given cell.
The behavior and the first return value of this method depends on the
input arguments. The second return argument will always be the RCX
log file name.
If create_schematic is True, this method will run RCX, then if it succeeds,
create a schematic of the extracted netlist in the database. It then returns
a boolean value which will be True if RCX succeeds.
If create_schematic is False, this method will run RCX, then return a string
which is the extracted netlist filename. If RCX failed, None will be returned
instead.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell_name
override RCX parameter values.
**kwargs :
optional keyword arguments. See DbAccess class for details.
Returns
-------
value : Union[bool, str]
The return value, as described.
log_fname : str
name of the RCX log file.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
create_schematic = kwargs.get('create_schematic', True)
coro = self.impl_db.async_run_rcx(lib_name, cell_name, **kwargs)
results = batch_async_task([coro])
if results is None or isinstance(results[0], Exception):
if create_schematic:
return False, ''
else:
return None, ''
return results[0]
def export_layout(self, lib_name, cell_name, out_file, **kwargs):
# type: (str, str, str, **Any) -> str
"""export layout.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
out_file : str
output file name.
**kwargs : Any
optional keyword arguments. See Checker class for details.
Returns
-------
log_fname : str
log file name. Empty if task cancelled.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
coro = self.impl_db.async_export_layout(lib_name, cell_name, out_file, **kwargs)
results = batch_async_task([coro])
if results is None or isinstance(results[0], Exception):
return ''
return results[0]
def batch_export_layout(self, info_list):
# type: (Sequence[Tuple[Any, ...]]) -> Optional[Sequence[str]]
"""Export layout of all given cells
Parameters
----------
info_list:
list of cell information. Each element is a tuple of:
lib_name : str
library name.
cell_name : str
cell name.
out_file : str
layout output file name.
view_name : str
layout view name. Optional.
params : Optional[Dict[str, Any]]
optional export parameter values.
Returns
-------
results : Optional[Sequence[str]]
If task is cancelled, return None. Otherwise, this is a
list of log file names.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
coro_list = [self.impl_db.async_export_layout(*info) for info in info_list]
temp_results = batch_async_task(coro_list)
if temp_results is None:
return None
return ['' if isinstance(val, Exception) else val for val in temp_results]
async def async_run_lvs(self, lib_name: str, cell_name: str, **kwargs: Any) -> Tuple[bool, str]:
"""A coroutine for running LVS.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell_name
**kwargs : Any
optional keyword arguments. See Checker class for details.
LVS parameters should be specified as lvs_params.
Returns
-------
value : bool
True if LVS succeeds
log_fname : str
name of the LVS log file.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
return await self.impl_db.async_run_lvs(lib_name, cell_name, **kwargs)
async def async_run_rcx(self, # type: BagProject
lib_name: str,
cell_name: str,
**kwargs
) -> Tuple[Union[bool, Optional[str]], str]:
"""Run RCX on the given cell.
The behavior and the first return value of this method depends on the
input arguments. The second return argument will always be the RCX
log file name.
If create_schematic is True, this method will run RCX, then if it succeeds,
create a schematic of the extracted netlist in the database. It then returns
a boolean value which will be True if RCX succeeds.
If create_schematic is False, this method will run RCX, then return a string
which is the extracted netlist filename. If RCX failed, None will be returned
instead.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell_name
override RCX parameter values.
**kwargs :
optional keyword arguments. See DbAccess class for details.
Returns
-------
value : Union[bool, str]
The return value, as described.
log_fname : str
name of the RCX log file.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
return await self.impl_db.async_run_rcx(lib_name, cell_name, **kwargs)
def create_schematic_from_netlist(self, netlist, lib_name, cell_name,
sch_view=None, **kwargs):
# type: (str, str, str, Optional[str], **Any) -> None
"""Create a schematic from a netlist.
This is mainly used to create extracted schematic from an extracted netlist.
Parameters
----------
netlist : str
the netlist file name.
lib_name : str
library name.
cell_name : str
cell_name
sch_view : Optional[str]
schematic view name. The default value is implemendation dependent.
**kwargs : Any
additional implementation-dependent arguments.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
return self.impl_db.create_schematic_from_netlist(netlist, lib_name, cell_name,
sch_view=sch_view, **kwargs)
def create_verilog_view(self, verilog_file, lib_name, cell_name, **kwargs):
# type: (str, str, str, **Any) -> None
"""Create a verilog view for mix-signal simulation.
Parameters
----------
verilog_file : str
the verilog file name.
lib_name : str
library name.
cell_name : str
cell name.
**kwargs : Any
additional implementation-dependent arguments.
"""
if self.impl_db is None:
raise Exception('BAG Server is not set up.')
verilog_file = os.path.abspath(verilog_file)
if not os.path.isfile(verilog_file):
raise ValueError('%s is not a file.' % verilog_file)
return self.impl_db.create_verilog_view(verilog_file, lib_name, cell_name, **kwargs)
================================================
FILE: bag/data/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/data/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package defines methods and classes useful for data post-processing.
"""
# compatibility import.
from ..io import load_sim_results, save_sim_results, load_sim_file
from .core import Waveform
__all__ = ['load_sim_results', 'save_sim_results', 'load_sim_file',
'Waveform', ]
================================================
FILE: bag/data/core.py
================================================
# -*- coding: utf-8 -*-
"""This module defines core data post-processing classes.
"""
import numpy as np
import scipy.interpolate as interp
import scipy.cluster.vq as svq
import scipy.optimize as sciopt
class Waveform(object):
"""A (usually transient) waveform.
This class provides interpolation and other convenience functions.
Parameters
----------
xvec : np.multiarray.ndarray
the X vector.
yvec : np.multiarray.ndarray
the Y vector.
xtol : float
the X value tolerance.
order : int
the interpolation order. 1 for nearest, 2 for linear, 3 for spline.
ext : int or str
interpolation extension mode. See documentation for InterpolatedUnivariateSpline.
"""
def __init__(self, xvec, yvec, xtol, order=3, ext=3):
self._xvec = xvec
self._yvec = yvec
self._xtol = xtol
self._order = order
self._ext = ext
self._fun = interp.InterpolatedUnivariateSpline(xvec, yvec, k=order, ext=ext)
@property
def xvec(self):
"""the X vector"""
return self._xvec
@property
def yvec(self):
"""the Y vector"""
return self._yvec
@property
def order(self):
"""the interpolation order. 1 for nearest, 2 for linear, 3 for spline."""
return self._order
@property
def xtol(self):
"""the X value tolerance."""
return self._xtol
@property
def ext(self):
"""interpolation extension mode. See documentation for InterpolatedUnivariateSpline."""
return self._ext
def __call__(self, *arg, **kwargs):
"""Evaluate the waveform at the given points."""
return self._fun(*arg, **kwargs)
def get_xrange(self):
"""Returns the X vector range.
Returns
-------
xmin : float
minimum X value.
xmax : float
maximum X value.
"""
return self.xvec[0], self.xvec[-1]
def shift_by(self, xshift):
"""Returns a shifted version of this waveform.
Parameters
----------
xshift : float
the amount to shift by.
Returns
-------
wvfm : bag.data.core.Waveform
a reference to this instance, or a copy if copy is True.
"""
return Waveform(self.xvec + xshift, self.yvec, self.xtol, order=self.order, ext=self.ext)
def get_all_crossings(self, threshold, start=None, stop=None, edge='both'):
"""Returns all X values at which this waveform crosses the given threshold.
Parameters
----------
threshold : float
the threshold value.
start : float or None
if given, search for crossings starting at this X value.
stop : float or None
if given, search only for crossings before this X value.
edge : string
crossing type. Valid values are 'rising', 'falling', or 'both'.
Returns
-------
xval_list : list[float]
all X values at which crossing occurs.
"""
# determine start and stop indices
sidx = 0 if start is None else np.searchsorted(self.xvec, [start])[0]
if stop is None:
eidx = len(self.xvec)
else:
eidx = np.searchsorted(self.xvec, [stop])[0]
if eidx < len(self.xvec) and abs(self.xvec[eidx] - stop) < self.xtol:
eidx += 1
# quantize waveform values, then detect edge.
bool_vec = self.yvec[sidx:eidx] >= threshold # type: np.ndarray
qvec = bool_vec.astype(int)
dvec = np.diff(qvec)
# eliminate unwanted edge types.
if edge == 'rising':
dvec = np.maximum(dvec, 0)
elif edge == 'falling':
dvec = np.minimum(dvec, 0)
# get crossing indices
idx_list = dvec.nonzero()[0]
# convert indices to X value using brentq interpolation.
def crossing_fun(x):
return self._fun(x) - threshold
xval_list = []
for idx in idx_list:
t0, t1 = self.xvec[sidx + idx], self.xvec[sidx + idx + 1]
try:
tcross = sciopt.brentq(crossing_fun, t0, t1, xtol=self.xtol)
except ValueError:
# no solution, this happens only if we have numerical error
# around the threshold. In this case just pick the endpoint
# closest to threshold.
va = crossing_fun(t0)
vb = crossing_fun(t1)
tcross = t0 if abs(va) < abs(vb) else t1
xval_list.append(tcross)
return xval_list
def get_crossing(self, threshold, start=None, stop=None, n=1, edge='both'):
"""Returns the X value at which this waveform crosses the given threshold.
Parameters
----------
threshold : float
the threshold value.
start : float or None
if given, search for the crossing starting at this X value.'
stop : float or None
if given, search only for crossings before this X value.
n : int
returns the nth crossing.
edge : str
crossing type. Valid values are 'rising', 'falling', or 'both'.
Returns
-------
xval : float or None
the X value at which the crossing occurs. None if no crossings are detected.
"""
xval_list = self.get_all_crossings(threshold, start=start, stop=stop, edge=edge)
if len(xval_list) < n:
return None
return xval_list[n-1]
def to_arrays(self, xmin=None, xmax=None):
"""Returns the X and Y arrays representing this waveform.
Parameters
----------
xmin : float or None
If given, will start from this value.
xmax : float or None
If given, will end at this value.
Returns
-------
xvec : np.multiarray.ndarray
the X array
yvec : np.multiarray.ndarray
the Y array
"""
sidx = 0 if xmin is None else np.searchsorted(self.xvec, [xmin])[0]
eidx = len(self.xvec) if xmax is None else np.searchsorted(self.xvec, [xmax])[0]
if eidx < len(self.xvec) and self.xvec[eidx] == xmax:
eidx += 1
xtemp = self.xvec[sidx:eidx]
if xmin is not None and (len(xtemp) == 0 or xtemp[0] != xmin):
np.insert(xtemp, 0, [xmin])
if xmax is not None and (len(xtemp) == 0 or xtemp[-1] != xmax):
np.append(xtemp, [xmax])
return xtemp, self(xtemp)
def get_eye_specs(self, tbit, tsample, thres=0.0, nlev=2):
"""Compute the eye diagram spec of this waveform.
This algorithm uses the following steps.
1. set t_off to 0
2. sample the waveform at tbit interval, starting at t0 + t_off.
3. sort the sampled values, get gap between adjacent values.
4. record G, the length of the gap covering thres.
5. increment t_off by tsample, go to step 2 and repeat until
t_off >= tbit.
6. find t_off with maximum G. This is the eye center.
7. at the eye center, compute eye height and eye opening using kmeans
clustering algorithm.
8. return result.
Parameters
----------
tbit : float
eye period.
tsample : float
the resolution to sample the eye. Used to find optimal
time shift and maximum eye opening.
thres : float
the eye vertical threshold.
nlev : int
number of expected levels. 2 for NRZ, 4 for PAM4.
Returns
-------
result : dict
A dictionary from specification to value.
"""
tstart, tend = self.get_xrange()
toff_vec = np.arange(0, tbit, tsample)
best_idx = 0
best_gap = 0.0
best_values = None
mid_lev = nlev // 2
for idx, t_off in enumerate(toff_vec):
# noinspection PyTypeChecker
values = self(np.arange(tstart + t_off, tend, tbit))
values.sort()
up_idx = np.searchsorted(values, [thres])[0]
if up_idx == 0 or up_idx == len(values):
continue
cur_gap = values[up_idx] - values[up_idx - 1]
if cur_gap > best_gap:
best_idx = idx
best_gap = cur_gap
best_values = values
if best_values is None:
raise ValueError("waveform never cross threshold=%.4g" % thres)
vstd = np.std(best_values)
vtemp = best_values / vstd
tmp_arr = np.linspace(vtemp[0], vtemp[-1], nlev) # type: np.ndarray
clusters = svq.kmeans(vtemp, tmp_arr)[0]
# clusters = svq.kmeans(vtemp, 4, iter=50)[0]
clusters *= vstd
clusters.sort()
vcenter = (clusters[mid_lev] + clusters[mid_lev - 1]) / 2.0
# compute eye opening/margin
openings = []
tr_widths = []
last_val = best_values[0]
bot_val = last_val
cur_cidx = 0
for cur_val in best_values:
cur_cluster = clusters[cur_cidx]
next_cluster = clusters[cur_cidx + 1]
if abs(cur_val - cur_cluster) > abs(cur_val - next_cluster):
openings.append(cur_val - last_val)
tr_widths.append(last_val - bot_val)
cur_cidx += 1
if cur_cidx == len(clusters) - 1:
tr_widths.append(best_values[-1] - cur_val)
break
bot_val = cur_val
last_val = cur_val
return {'center': (float(toff_vec[best_idx]), vcenter),
'levels': clusters,
'heights': clusters[1:] - clusters[:-1],
'openings': np.array(openings),
'trace_widths': np.array(tr_widths)
}
def _add_xy(self, other):
if not isinstance(other, Waveform):
raise ValueError("Trying to add non-Waveform object.")
xnew = np.concatenate((self.xvec, other.xvec))
xnew = np.unique(np.around(xnew / self.xtol)) * self.xtol
# noinspection PyTypeChecker
y1 = self(xnew)
y2 = other(xnew)
return xnew, y1 + y2
def __add__(self, other):
if np.isscalar(other):
return Waveform(np.array(self.xvec), self.yvec + other, self.xtol, order=self.order, ext=self.ext)
elif isinstance(other, Waveform):
new_order = max(self.order, other.order)
xvec, yvec = self._add_xy(other)
return Waveform(xvec, yvec, self.xtol, order=new_order, ext=self.ext)
else:
raise Exception('type %s not supported' % type(other))
def __neg__(self):
return Waveform(np.array(self.xvec), -self.yvec, self.xtol, order=self.order, ext=self.ext)
def __mul__(self, scale):
if not np.isscalar(scale):
raise ValueError("Can only multiply by scalar.")
return Waveform(np.array(self.xvec), scale * self.yvec, self.xtol, order=self.order, ext=self.ext)
def __rmul__(self, scale):
return self.__mul__(scale)
================================================
FILE: bag/data/dc.py
================================================
# -*- coding: utf-8 -*-
"""This module defines classes for computing DC operating point.
"""
from typing import Union, Dict
import scipy.sparse
import scipy.optimize
import numpy as np
from bag.tech.mos import MosCharDB
class DCCircuit(object):
"""A class that solves DC operating point of a circuit.
Parameters
----------
ndb : MosCharDB
nmos characterization database.
pdb : MosCharDB
pmos characterization database.
"""
def __init__(self, ndb, pdb):
# type: (MosCharDB, MosCharDB) -> None
self._n = 1
self._ndb = ndb
self._pdb = pdb
self._transistors = {}
self._node_id = {'gnd': 0, 'vss': 0, 'VSS': 0}
self._node_name_lookup = {0: 'gnd'}
self._node_voltage = {0: 0}
def _get_node_id(self, name):
# type: (str) -> int
if name not in self._node_id:
ans = self._n
self._node_id[name] = ans
self._node_name_lookup[ans] = name
self._n += 1
return ans
else:
return self._node_id[name]
def set_voltage_source(self, node_name, voltage):
# type: (str, float) -> None
"""
Specify voltage the a node.
Parameters
----------
node_name : str
the net name.
voltage : float
voltage of the given net.
"""
node_id = self._get_node_id(node_name)
self._node_voltage[node_id] = voltage
def add_transistor(self, d_name, g_name, s_name, b_name, mos_type, intent, w, lch, fg=1):
# type: (str, str, str, str, str, str, Union[float, int], float, int) -> None
"""Adds a small signal transistor model to the circuit.
Parameters
----------
d_name : str
drain net name.
g_name : str
gate net name.
s_name : str
source net name.
b_name : str
body net name. Defaults to 'gnd'.
mos_type : str
transistor type. Either 'nch' or 'pch'.
intent : str
transistor threshold flavor.
w : Union[float, int]
transistor width.
lch : float
transistor channel length.
fg : int
transistor number of fingers.
"""
node_d = self._get_node_id(d_name)
node_g = self._get_node_id(g_name)
node_s = self._get_node_id(s_name)
node_b = self._get_node_id(b_name)
# get existing current function. Initalize if not found.
ids_key = (mos_type, intent, lch)
if ids_key in self._transistors:
arow, acol, bdata, fg_list, ds_list = self._transistors[ids_key]
else:
arow, acol, bdata, fg_list, ds_list = [], [], [], [], []
self._transistors[ids_key] = (arow, acol, bdata, fg_list, ds_list)
# record Ai and bi data
offset = len(fg_list) * 4
arow.extend([offset + 1, offset + 1, offset + 2, offset + 2, offset + 3, offset + 3])
acol.extend([node_b, node_s, node_d, node_s, node_g, node_s])
bdata.append(w)
fg_list.append(fg)
ds_list.append((node_d, node_s))
def solve(self, env, guess_dict, itol=1e-10, inorm=1e-6):
# type: (str, Dict[str, float], float, float) -> Dict[str, float]
"""Solve DC operating point.
Parameters
----------
env : str
the simulation environment.
guess_dict : Dict[str, float]
initial guess dictionary.
itol : float
current error tolerance.
inorm : float
current normalization factor.
Returns
-------
op_dict : Dict[str, float]
DC operating point dictionary.
"""
# step 1: get list of nodes to solve
node_list = [idx for idx in range(self._n) if idx not in self._node_voltage]
reverse_dict = {nid: idx for idx, nid in enumerate(node_list)}
ndim = len(node_list)
# step 2: get Av and bv
amatv = scipy.sparse.csr_matrix(([1] * ndim, (node_list, np.arange(ndim))), shape=(self._n, ndim))
bmatv = np.zeros(self._n)
for nid, val in self._node_voltage.items():
bmatv[nid] = val
# step 3: gather current functions, and output matrix entries
ifun_list = []
out_data = []
out_row = []
out_col = []
out_col_cnt = 0
for (mos_type, intent, lch), (arow, acol, bdata, fg_list, ds_list) in self._transistors.items():
db = self._ndb if mos_type == 'nch' else self._pdb
ifun = db.get_function('ids', env=env, intent=intent, l=lch)
# step 3A: compute Ai and bi
num_tran = len(fg_list)
adata = [1, -1] * (3 * num_tran)
amati = scipy.sparse.csr_matrix((adata, (arow, acol)), shape=(4 * num_tran, self._n))
bmati = np.zeros(4 * num_tran)
bmati[0::4] = bdata
# step 3B: compute A = Ai * Av, b = Ai * bv + bi
amat = amati.dot(amatv)
bmat = amati.dot(bmatv) + bmati
# record scale matrix and function.
scale_mat = scipy.sparse.diags(fg_list) / inorm
ifun_list.append((ifun, scale_mat, amat, bmat))
for node_d, node_s in ds_list:
if node_d in reverse_dict:
out_row.append(reverse_dict[node_d])
out_data.append(-1)
out_col.append(out_col_cnt)
if node_s in reverse_dict:
out_row.append(reverse_dict[node_s])
out_data.append(1)
out_col.append(out_col_cnt)
out_col_cnt += 1
# construct output matrix
out_mat = scipy.sparse.csr_matrix((out_data, (out_row, out_col)), shape=(ndim, out_col_cnt))
# step 4: define zero function
def zero_fun(varr):
iarr = np.empty(out_col_cnt)
offset = 0
for idsf, smat, ai, bi in ifun_list:
num_out = smat.shape[0]
# reshape going row first instead of column
arg = (ai.dot(varr) + bi).reshape(4, -1, order='F').T
if idsf.ndim == 3:
# handle case where transistor source and body are shorted
tmpval = idsf(arg[:, [0, 2, 3]])
else:
tmpval = idsf(arg)
iarr[offset:offset + num_out] = smat.dot(tmpval)
offset += num_out
return out_mat.dot(iarr)
# step 5: define zero function
def jac_fun(varr):
jarr = np.empty((out_col_cnt, ndim))
offset = 0
for idsf, smat, ai, bi in ifun_list:
num_out = smat.shape[0]
# reshape going row first instead of column
arg = (ai.dot(varr) + bi).reshape(4, -1, order='F').T
if idsf.ndim == 3:
# handle case where transistor source and body are shorted
tmpval = idsf.jacobian(arg[:, [0, 2, 3]])
# noinspection PyTypeChecker
tmpval = np.insert(tmpval, 1, 0.0, axis=len(tmpval.shape) - 1)
else:
tmpval = idsf.jacobian(arg)
jcur = smat.dot(tmpval)
for idx in range(num_out):
# ai is sparse matrix; multiplication is matrix
jarr[offset + idx, :] = jcur[idx, :] @ ai[4 * idx:4 * idx + 4, :]
offset += num_out
return out_mat.dot(jarr)
xguess = np.empty(ndim)
for name, guess_val in guess_dict.items():
xguess[reverse_dict[self._node_id[name]]] = guess_val
result = scipy.optimize.root(zero_fun, xguess, jac=jac_fun, tol=itol / inorm, method='hybr')
if not result.success:
raise ValueError('solution failed.')
op_dict = {self._node_name_lookup[nid]: result.x[idx] for idx, nid in enumerate(node_list)}
return op_dict
================================================
FILE: bag/data/digital.py
================================================
# -*- coding: utf-8 -*-
"""This module defines functions useful for digital verification/postprocessing.
"""
from typing import Optional, List, Tuple
import numpy as np
from .core import Waveform
def de_bruijn(n, symbols=None):
# type: (int, Optional[List[float]]) -> List[float]
"""Returns a De Bruijn sequence with subsequence of length n.
a De Bruijn sequence with subsequence of length n is a sequence such that
all possible subsequences of length n appear exactly once somewhere in the
sequence. This method is useful for simulating the worst case eye diagram
given finite impulse response.
Parameters
----------
n : int
length of the subsequence.
symbols : Optional[List[float]] or None
the list of symbols. If None, defaults to [0.0, 1.0].
Returns
-------
seq : List[float]
the de bruijn sequence.
"""
symbols = symbols or [0.0, 1.0]
k = len(symbols)
a = [0] * (k * n)
sequence = []
def db(t, p):
if t > n:
if n % p == 0:
sequence.extend(a[1:p + 1])
else:
a[t] = a[t - p]
db(t + 1, p)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t + 1, t)
db(1, 1)
return [symbols[i] for i in sequence]
def dig_to_pwl(values, tper, trf, td=0):
# type: (List[float], float, float, float) -> Tuple[List[float], List[float]]
"""Convert a list of digital bits to PWL waveform.
This function supports negative delay. However, time/value pairs for negative data
are truncated.
Parameters
----------
values : List[float]
list of values for each bit.
tper : float
the period in seconds.
trf : float
the rise/fall time in seconds.
td : float
the delay
Returns
-------
tvec : List[float]
the time vector.
yvec : List[float]
the value vector.
"""
y0 = values[0]
tcur, ycur = td, y0
tvec, yvec = [], []
for v in values:
if v != ycur:
if tcur >= 0:
tvec.append(tcur)
yvec.append(ycur)
elif tcur < 0 < tcur + trf:
# make sure time starts at 0
tvec.append(0)
yvec.append(ycur - (v - ycur) / trf * tcur)
ycur = v
if tcur + trf >= 0:
tvec.append(tcur + trf)
yvec.append(ycur)
elif tcur + trf < 0 < tcur + tper:
# make sure time starts at 0
tvec.append(0)
yvec.append(ycur)
tcur += tper
else:
if tcur <= 0 < tcur + tper:
# make sure time starts at 0
tvec.append(0)
yvec.append(ycur)
tcur += tper
if not tvec:
# only here if input is constant
tvec = [0, tper]
yvec = [y0, y0]
elif tvec[0] > 0:
# make time start at 0
tvec.insert(0, 0)
yvec.insert(0, y0)
return tvec, yvec
def get_crossing_index(yvec, threshold, n=0, rising=True):
# type: (np.array, float, int, bool) -> int
"""Returns the first index that the given numpy array crosses the given threshold.
Parameters
----------
yvec : np.array
the numpy array.
threshold : float
the crossing threshold.
n : int
returns the nth edge index, with n=0 being the first index.
rising : bool
True to return rising edge index. False to return falling edge index.
Returns
-------
idx : int
the crossing edge index.
"""
bool_vec = yvec >= threshold
qvec = bool_vec.astype(int)
dvec = np.diff(qvec)
dvec = np.maximum(dvec, 0) if rising else np.minimum(dvec, 0)
idx_list = dvec.nonzero()[0]
return idx_list[n]
def get_flop_timing(tvec, d, q, clk, ttol, data_thres=0.5,
clk_thres=0.5, tstart=0.0, clk_edge='rising', tag=None, invert=False):
"""Calculate flop timing parameters given the associated waveforms.
This function performs the following steps:
1. find all valid clock edges. Compute period of the clock (clock waveform
must be periodic).
2. For each valid clock edge:
A. Check if the input changes in the previous cycle. If so, compute tsetup.
Otherwise, tsetup = tperiod.
B. Check if input changes in the current cycle. If so, compute thold.
Otherwise, thold = tperiod.
C. Check that output transition at most once and that output = input.
Otherwise, record an error.
D. record the output data polarity.
3. For each output data polarity, compute the minimum tsetup and thold and any
errors. Return summary as a dictionary.
The output is a dictionary with keys 'setup', 'hold', 'delay', and 'errors'.
the setup/hold/delay entries contains 2-element tuples describing the worst
setup/hold/delay time. The first element is the setup/hold/delay time, and
the second element is the clock edge time at which it occurs. The errors field
stores all clock edge times at which an error occurs.
Parameters
----------
tvec : np.ndarray
the time data.
d : np.ndarray
the input data.
q : np.ndarray
the output data.
clk : np.ndarray
the clock data.
ttol : float
time resolution.
data_thres : float
the data threshold.
clk_thres : float
the clock threshold.
tstart : float
ignore data points before tstart.
clk_edge : str
the clock edge type. Valid values are "rising", "falling", or "both".
tag : obj
an identifier tag to append to results.
invert : bool
if True, the flop output is inverted from the data.
Returns
-------
data : dict[str, any]
A dictionary describing the worst setup/hold/delay and errors, if any.
"""
d_wv = Waveform(tvec, d, ttol)
clk_wv = Waveform(tvec, clk, ttol)
q_wv = Waveform(tvec, q, ttol)
tend = tvec[-1]
# get all clock sampling times and clock period
samp_times = clk_wv.get_all_crossings(clk_thres, start=tstart, edge=clk_edge)
tper = (samp_times[-1] - samp_times[0]) / (len(samp_times) - 1)
# ignore last clock cycle if it's not a full cycle.
if samp_times[-1] + tper > tend:
samp_times = samp_times[:-1]
# compute setup/hold/error for each clock period
data = {'setup': (tper, -1), 'hold': (tper, -1), 'delay': (0.0, -1), 'errors': []}
for t in samp_times:
d_prev = d_wv.get_all_crossings(data_thres, start=t - tper, stop=t, edge='both')
d_cur = d_wv.get_all_crossings(data_thres, start=t, stop=t + tper, edge='both')
q_cur = q_wv.get_all_crossings(data_thres, start=t, stop=t + tper, edge='both')
d_val = d_wv(t) > data_thres
q_val = q_wv(t + tper) > data_thres
# calculate setup/hold/delay
tsetup = t - d_prev[-1] if d_prev else tper
thold = d_cur[0] - t if d_cur else tper
tdelay = q_cur[0] - t if q_cur else 0.0
# check if flop has error
error = (invert != (q_val != d_val)) or (len(q_cur) > 1)
# record results
if tsetup < data['setup'][0]:
data['setup'] = (tsetup, t)
if thold < data['hold'][0]:
data['hold'] = (thold, t)
if tdelay > data['delay'][0]:
data['delay'] = (tdelay, t)
if error:
data['errors'].append(t)
if tag is not None:
data['setup'] += (tag, )
data['hold'] += (tag, )
data['delay'] += (tag, )
data['errors'] = [(t, tag) for t in data['errors']]
return data
================================================
FILE: bag/data/lti.py
================================================
# -*- coding: utf-8 -*-
"""This module defines functions and classes useful for characterizing linear time-invariant circuits.
"""
from typing import Dict, List, Tuple, Union, Optional
import numpy as np
import scipy.signal
import scipy.sparse
import scipy.sparse.linalg
# noinspection PyProtectedMember
from scipy.signal.ltisys import StateSpaceContinuous, TransferFunctionContinuous
class LTICircuit(object):
"""A class that models a linear-time-invariant circuit.
This class computes AC transfer functions for linear-time-invariant circuits.
Note: Since this class work with AC transfer functions, 'gnd' in this circuit is AC ground.
Parameters
----------
udot_tol : float
tolerance to determine if dependency on input derivatives is zero.
"""
_float_min = np.finfo(np.float64).eps
def __init__(self, udot_tol=1e-12):
# type: (float) -> None
self._num_n = 0
self._gmat_data = {} # type: Dict[Tuple[int, int], float]
self._cmat_data = {} # type: Dict[Tuple[int, int], float]
self._vcvs_list = [] # type: List[Tuple[int, int, int, int, float]]
self._ind_data = {} # type: Dict[Tuple[int, int], float]
self._node_id = {'gnd': -1}
self._udot_tol = udot_tol
def _get_node_id(self, name):
# type: (str) -> int
if name not in self._node_id:
ans = self._num_n
self._node_id[name] = ans
self._num_n += 1
return ans
else:
return self._node_id[name]
@staticmethod
def _add(mat, key, val):
# type: (Dict[Tuple[int, int], float], Tuple[int, int], float) -> None
if key in mat:
mat[key] += val
else:
mat[key] = val
def add_res(self, res, p_name, n_name):
# type: (float, str, str) -> None
"""Adds a resistor to the circuit.
Parameters
----------
res : float
the resistance value, in Ohms.
p_name : str
the positive terminal net name.
n_name : str
the negative terminal net name.
"""
# avoid 0 resistance.
res_sgn = 1 if res >= 0 else -1
g = res_sgn / max(abs(res), self._float_min)
self.add_conductance(g, p_name, n_name)
def add_conductance(self, g, p_name, n_name):
# type: (float, str, str) -> None
"""Adds a resistor to the circuit given conductance value.
Parameters
----------
g : float
the conductance value, in inverse Ohms.
p_name : str
the positive terminal net name.
n_name : str
the negative terminal net name.
"""
node_p = self._get_node_id(p_name)
node_n = self._get_node_id(n_name)
if node_p == node_n:
return
if node_p < node_n:
node_p, node_n = node_n, node_p
self._add(self._gmat_data, (node_p, node_p), g)
if node_n >= 0:
self._add(self._gmat_data, (node_p, node_n), -g)
self._add(self._gmat_data, (node_n, node_p), -g)
self._add(self._gmat_data, (node_n, node_n), g)
def add_vccs(self, gm, p_name, n_name, cp_name, cn_name='gnd'):
# type: (float, str, str, str, str) -> None
"""Adds a voltage controlled current source to the circuit.
Parameters
----------
gm : float
the gain of the voltage controlled current source, in Siemens.
p_name : str
the terminal that the current flows out of.
n_name : str
the terminal that the current flows in to.
cp_name : str
the positive voltage control terminal.
cn_name : str
the negative voltage control terminal. Defaults to 'gnd'.
"""
node_p = self._get_node_id(p_name)
node_n = self._get_node_id(n_name)
node_cp = self._get_node_id(cp_name)
node_cn = self._get_node_id(cn_name)
if node_p == node_n or node_cp == node_cn:
return
if node_cp >= 0:
if node_p >= 0:
self._add(self._gmat_data, (node_p, node_cp), gm)
if node_n >= 0:
self._add(self._gmat_data, (node_n, node_cp), -gm)
if node_cn >= 0:
if node_p >= 0:
self._add(self._gmat_data, (node_p, node_cn), -gm)
if node_n >= 0:
self._add(self._gmat_data, (node_n, node_cn), gm)
def add_vcvs(self, gain, p_name, n_name, cp_name, cn_name='gnd'):
# type: (float, str, str, str, str) -> None
"""Adds a voltage controlled voltage source to the circuit.
Parameters
----------
gain : float
the gain of the voltage controlled voltage source.
p_name : str
the positive terminal of the output voltage source.
n_name : str
the negative terminal of the output voltage source.
cp_name : str
the positive voltage control terminal.
cn_name : str
the negative voltage control terminal. Defaults to 'gnd'.
"""
node_p = self._get_node_id(p_name)
node_n = self._get_node_id(n_name)
node_cp = self._get_node_id(cp_name)
node_cn = self._get_node_id(cn_name)
if node_p == node_n:
raise ValueError('positive and negative terminal of a vcvs cannot be the same.')
if node_cp == node_cn:
raise ValueError('positive and negative control terminal of a vcvs cannot be the same.')
if node_p < node_n:
# flip nodes so we always have node_p > node_n, to guarantee node_p >= 0
node_p, node_n, node_cp, node_cn = node_n, node_p, node_cn, node_cp
self._vcvs_list.append((node_p, node_n, node_cp, node_cn, gain))
def add_cap(self, cap, p_name, n_name):
# type: (float, str, str) -> None
"""Adds a capacitor to the circuit.
Parameters
----------
cap : float
the capacitance value, in Farads.
p_name : str
the positive terminal net name.
n_name : str
the negative terminal net name.
"""
node_p = self._get_node_id(p_name)
node_n = self._get_node_id(n_name)
if node_p == node_n:
return
if node_p < node_n:
node_p, node_n = node_n, node_p
self._add(self._cmat_data, (node_p, node_p), cap)
if node_n >= 0:
self._add(self._cmat_data, (node_p, node_n), -cap)
self._add(self._cmat_data, (node_n, node_p), -cap)
self._add(self._cmat_data, (node_n, node_n), cap)
def add_ind(self, ind, p_name, n_name):
# type: (float, str, str) -> None
"""Adds an inductor to the circuit.
Parameters
----------
ind : float
the inductance value, in Henries.
p_name : str
the positive terminal net name.
n_name : str
the negative terminal net name.
"""
node_p = self._get_node_id(p_name)
node_n = self._get_node_id(n_name)
if node_p == node_n:
return
if node_p < node_n:
key = node_n, node_p
else:
key = node_p, node_n
if key not in self._ind_data:
self._ind_data[key] = ind
else:
self._ind_data[key] = 1.0 / (1.0 / ind + 1.0 / self._ind_data[key])
def add_transistor(self, tran_info, d_name, g_name, s_name, b_name='gnd', fg=1, neg_cap=True):
# type: (Dict[str, float], str, str, str, str, Union[float, int], bool) -> None
"""Adds a small signal transistor model to the circuit.
Parameters
----------
tran_info : Dict[str, float]
a dictionary of 1-finger transistor small signal parameters. Should contain gm, gds, gb,
cgd, cgs, cgb, cds, cdb, and csb.
d_name : str
drain net name.
g_name : str
gate net name.
s_name : str
source net name.
b_name : str
body net name. Defaults to 'gnd'.
fg : Union[float, int]
number of transistor fingers.
neg_cap : bool
True to allow negative capacitance (which is there due to model fitting).
"""
gm = tran_info['gm'] * fg
gds = tran_info['gds'] * fg
cgd = tran_info['cgd'] * fg
cgs = tran_info['cgs'] * fg
cds = tran_info['cds'] * fg
cgb = tran_info.get('cgb', 0) * fg
cdb = tran_info.get('cdb', 0) * fg
csb = tran_info.get('csb', 0) * fg
if not neg_cap:
cgd = max(cgd, 0)
cgs = max(cgs, 0)
cds = max(cds, 0)
cgb = max(cgb, 0)
cdb = max(cdb, 0)
csb = max(csb, 0)
self.add_vccs(gm, d_name, s_name, g_name, s_name)
self.add_conductance(gds, d_name, s_name)
self.add_cap(cgd, g_name, d_name)
self.add_cap(cgs, g_name, s_name)
self.add_cap(cds, d_name, s_name)
self.add_cap(cgb, g_name, b_name)
self.add_cap(cdb, d_name, b_name)
self.add_cap(csb, s_name, b_name)
if 'gb' in tran_info:
# only add these if source is not shorted to body.
gb = tran_info['gb'] * fg
self.add_vccs(gb, d_name, s_name, b_name, s_name)
@classmethod
def _count_rank(cls, diag):
# type: (np.ndarray) -> int
diag_abs = np.abs(diag)
float_min = cls._float_min
rank_tol = diag_abs[0] * diag.size * float_min
rank_cnt = diag_abs > rank_tol # type: np.ndarray
return np.count_nonzero(rank_cnt)
@classmethod
def _solve_gx_bw(cls, g, b):
# type: (np.ndarray, np.ndarray) -> Tuple[np.ndarray, np.ndarray]
"""Solve the equation G*x + B*[w, w', ...].T = 0 for x.
Finds matrix Ka, Kw such that x = Ka * a + Kw * [w, w', ...].T solves
the given equation for any value of a.
Parameters
----------
g : np.ndarray
the G matrix, with shape (M, N) and M < N.
b : np.ndarray
the B matrix.
Returns
-------
ka : np.ndarray
the Ky matrix.
kw : np.ndarray
the Kw matrix.
"""
# G = U*S*Vh
u, s, vh = scipy.linalg.svd(g, full_matrices=True, overwrite_a=True)
# let B=Uh*B, so now S*Vh*x + B*w = 0
b = u.T.dot(b)
# let y = Vh*x, or x = V*y, so now S*y + U*B*w = 0
v = vh.T
# truncate the bottom 0 part of S, now S_top*y_top + B_top*w = 0
rank = cls._count_rank(s)
# check bottom part of B. If not 0, there's no solution
b_abs = np.abs(b)
zero_tol = np.amax(b_abs) * cls._float_min
if np.count_nonzero(b_abs[rank:, :] > zero_tol) > 0:
raise ValueError('B matrix bottom is not zero. This circuit has no solution.')
b_top = b[:rank, :]
s_top_inv = 1 / s[:rank] # type: np.ndarray
s_top_inv = np.diag(s_top_inv)
# solving, we get y_top = -S_top^-1*B_top*w = Ku*w
kw = s_top_inv.dot(-b_top)
# now x = V*y = Vl*y_top + Vr*y_bot = Vr*y_bot + Vl*Kw*w = Ky*y_bot = Kw*w
vl = v[:, :rank]
vr = v[:, rank:]
kw = vl.dot(kw)
return vr, kw
@classmethod
def _transform_c_qr(cls, g, c, b, d):
"""Reveal redundant variables by transforming C matrix using QR decomposition"""
q, r, p = scipy.linalg.qr(c, pivoting=True)
rank = cls._count_rank(np.diag(r))
qh = q.T
return rank, qh.dot(g[:, p]), r, qh.dot(b), d[:, p]
# @classmethod
# def _transform_c_svd(cls, g, c, b, d):
# """Reveal redundant variables by transforming C matrix using SVD decomposition"""
# u, s, vh = scipy.linalg.svd(c, full_matrices=True, overwrite_a=True)
# uh = u.T
# v = vh.T
# rank = cls._count_rank(s)
# return rank, uh.dot(g).dot(v), np.diag(s), uh.dot(b), d.dot(v)
@classmethod
def _reduce_state_space(cls, g, c, b, d, e, ndim_w):
"""Reduce state space variables.
Given the state equation G*x + C*x' + B*[w, w', w'', ...].T = 0, and
y = D*x + E*[w, w', w'', ...].T, check if C is full rank. If not,
we compute new G, C, and B matrices with reduced dimensions.
"""
# step 0: transform C and obtain rank
rank, g, c, b, d = cls._transform_c_qr(g, c, b, d)
# rank, g, c, b, d = cls._transform_c_svd(g, c, b, d)
while rank < c.shape[0]:
# step 1: eliminate x' term by looking at bottom part of matrices
ctop = c[:rank, :]
gtop = g[:rank, :]
gbot = g[rank:, :]
btop = b[:rank, :]
bbot = b[rank:, :]
# step 2: find ka and kw from bottom
ka, kw = cls._solve_gx_bw(gbot, bbot)
# step 3: substitute x = ka * a + kw * [w, w', w'', ...].T
g = gtop.dot(ka)
c = ctop.dot(ka)
b = np.zeros((btop.shape[0], btop.shape[1] + ndim_w))
b[:, :btop.shape[1]] = btop + gtop.dot(kw)
b[:, ndim_w:] += ctop.dot(kw)
enew = np.zeros((e.shape[0], e.shape[1] + ndim_w))
enew[:, :-ndim_w] = e + d.dot(kw)
e = enew
d = d.dot(ka)
# step 4: transform C to prepare for next iteration
rank, g, c, b, d = cls._transform_c_qr(g, c, b, d)
# rank, g, c, b, d = cls._transform_c_svd(g, c, b, d)
g, c, b, d, e = cls._simplify(g, c, b, d, e, ndim_w)
return g, c, b, d, e
@classmethod
def _simplify(cls, g, c, b, d, e, ndim_w):
"""Eliminate input derivatives by re-defining state variables.
"""
while b.shape[1] > ndim_w:
kw = scipy.linalg.solve_triangular(c, b[:, ndim_w:])
bnew = np.dot(g, -kw)
bnew[:, :ndim_w] += b[:, :ndim_w]
b = bnew
e[:, :kw.shape[1]] -= d.dot(kw)
return g, c, b, d, e
def _build_mna_matrices(self, inputs, outputs, in_type='v'):
# type: (Union[str, List[str]], Union[str, List[str]], str) -> Tuple[np.ndarray, ...]
"""Create and return MNA matrices representing this circuit.
Parameters
----------
inputs : Union[str, List[str]]
the input voltage/current node name(s).
outputs : Union[str, List[str]]
the output voltage node name(s).
in_type : str
set to 'v' for input voltage sources. Otherwise, current sources.
Returns
-------
g : np.ndarray
the conductance matrix
c : np.ndarray
the capacitance/inductance matrix.
b : np.ndarray
the input-to-state matrix.
d : np.ndarray
the state-to-output matrix.
e : np.ndarray
the input-to-output matrix.
"""
if isinstance(inputs, list) or isinstance(inputs, tuple):
node_ins = [self._node_id[name] for name in inputs]
else:
node_ins = [self._node_id[inputs]]
if isinstance(outputs, list) or isinstance(outputs, tuple):
node_outs = [self._node_id[name] for name in outputs]
else:
node_outs = [self._node_id[outputs]]
is_voltage = (in_type == 'v')
# step 1: construct matrices
gdata, grows, gcols = [], [], []
cdata, crows, ccols = [], [], []
# step 1A: gather conductors/vccs
for (ridx, cidx), gval in self._gmat_data.items():
gdata.append(gval)
grows.append(ridx)
gcols.append(cidx)
# step 1B: gather capacitors
for (ridx, cidx), cval in self._cmat_data.items():
cdata.append(cval)
crows.append(ridx)
ccols.append(cidx)
# step 1C: gather inductors
num_states = self._num_n
for (node_p, node_n), lval in self._ind_data.items():
gdata.append(1)
grows.append(node_p)
gcols.append(num_states)
gdata.append(1)
grows.append(num_states)
gcols.append(node_p)
if node_n >= 0:
gdata.append(-1)
grows.append(node_n)
gcols.append(num_states)
gdata.append(-1)
grows.append(num_states)
gcols.append(node_n)
cdata.append(-lval)
crows.append(num_states)
ccols.append(num_states)
num_states += 1
# step 1D: add currents from vcvs
for node_p, node_n, node_cp, node_cn, gain in self._vcvs_list:
# vcvs means vp - vn - A*vcp + A*vcn = 0, and current flows from p to n
# current flowing out of p
gdata.append(1)
grows.append(node_p)
gcols.append(num_states)
# voltage of p
gdata.append(1)
grows.append(num_states)
gcols.append(node_p)
if node_n >= 0:
# current flowing into n
gdata.append(-1)
grows.append(node_n)
gcols.append(num_states)
# voltage of n
gdata.append(-1)
grows.append(num_states)
gcols.append(node_n)
if node_cp >= 0:
# voltage of cp
gdata.append(-gain)
grows.append(num_states)
gcols.append(node_cp)
if node_cn >= 0:
# voltage of cn
gdata.append(gain)
grows.append(num_states)
gcols.append(node_cn)
num_states += 1
ndim_in = len(node_ins)
if is_voltage:
# step 1E: add current/voltage from input voltage source
b = np.zeros((num_states + ndim_in, ndim_in))
for in_idx, node_in in enumerate(node_ins):
gdata.append(1)
grows.append(node_in)
gcols.append(num_states)
gdata.append(-1)
grows.append(num_states)
gcols.append(node_in)
b[num_states + in_idx, in_idx] = 1
num_states += ndim_in
else:
# inject current to node_in
b = np.zeros((num_states, ndim_in))
for in_idx, node_in in enumerate(node_ins):
b[node_in, in_idx] = -1
# step 2: create matrices
shape = (num_states, num_states)
g = scipy.sparse.csc_matrix((gdata, (grows, gcols)), shape=shape).todense().A
c = scipy.sparse.csc_matrix((cdata, (crows, ccols)), shape=shape).todense().A
ndim_out = len(node_outs)
d = scipy.sparse.csc_matrix((np.ones(ndim_out), (np.arange(ndim_out), node_outs)),
shape=(ndim_out, num_states)).todense().A
e = np.zeros((ndim_out, ndim_in))
return g, c, b, d, e
def get_state_space(self, inputs, outputs, in_type='v'):
# type: (Union[str, List[str]], Union[str, List[str]], str) -> StateSpaceContinuous
"""Compute the state space model from the given inputs to outputs.
Parameters
----------
inputs : Union[str, List[str]]
the input voltage/current node name(s).
outputs : Union[str, List[str]]
the output voltage node name(s).
in_type : str
set to 'v' for input voltage sources. Otherwise, current sources.
Returns
-------
system : StateSpaceContinuous
the scipy state space object. See scipy.signal package on how to use this object.
"""
g0, c0, b0, d0, e0 = self._build_mna_matrices(inputs, outputs, in_type)
ndim_in = e0.shape[1]
g, c, b, d, e = self._reduce_state_space(g0, c0, b0, d0, e0, ndim_in)
amat = scipy.linalg.solve_triangular(c, -g)
bmat = scipy.linalg.solve_triangular(c, -b)
cmat = d
e_abs = np.abs(e)
tol = np.amax(e_abs) * self._udot_tol
if np.count_nonzero(e_abs[:, ndim_in:] > tol) > 0:
print('WARNING: output depends on input derivatives. Ignored.')
print('D matrix: ')
print(e)
dmat = e[:, :ndim_in]
return StateSpaceContinuous(amat, bmat, cmat, dmat)
def get_num_den(self, in_name, out_name, in_type='v', atol=0.0):
# type: (str, str, str, float) -> Tuple[np.ndarray, np.ndarray]
"""Compute the transfer function between the two given nodes.
Parameters
----------
in_name : str
the input voltage/current node name.
out_name : Union[str, List[str]]
the output voltage node name.
in_type : str
set to 'v' for input voltage sources. Otherwise, current sources.
atol : float
absolute tolerance for checking zeros in the numerator. Used to filter out scipy warnings.
Returns
-------
num : np.ndarray
the numerator polynomial.
den : np.ndarray
the denominator polynomial.
"""
state_space = self.get_state_space(in_name, out_name, in_type=in_type)
num, den = scipy.signal.ss2tf(state_space.A, state_space.B, state_space.C, state_space.D)
num = num[0, :]
# check if numerator has leading zeros.
# this makes it so the user have full control over numerical precision, and
# avoid scipy bad conditioning warnings.
while abs(num[0]) <= atol:
num = num[1:]
return num, den
def get_transfer_function(self, in_name, out_name, in_type='v', atol=0.0):
# type: (str, str, str, float) -> TransferFunctionContinuous
"""Compute the transfer function between the two given nodes.
Parameters
----------
in_name : str
the input voltage/current node name.
out_name : Union[str, List[str]]
the output voltage node name.
in_type : str
set to 'v' for input voltage sources. Otherwise, current sources.
atol : float
absolute tolerance for checking zeros in the numerator. Used to filter out scipy warnings.
Returns
-------
system : TransferFunctionContinuous
the scipy transfer function object. See scipy.signal package on how to use this object.
"""
num, den = self.get_num_den(in_name, out_name, in_type=in_type, atol=atol)
return TransferFunctionContinuous(num, den)
def get_impedance(self, node_name, freq, atol=0.0):
# type: (str, float, float) -> complex
"""Computes the impedance looking into the given node.
Parameters
----------
node_name : str
the node to compute impedance for. We will inject a current into this node and measure the voltage
on this node.
freq : float
the frequency to compute the impedance at, in Hertz.
atol : float
absolute tolerance for checking zeros in the numerator. Used to filter out scipy warnings.
Returns
-------
impedance : complex
the impedance value, in Ohms.
"""
sys = self.get_transfer_function(node_name, node_name, in_type='i', atol=atol)
w_test = 2 * np.pi * freq
_, zin_vec = sys.freqresp(w=[w_test])
return zin_vec[0]
def get_w_crossings(num, den, atol=1e-8):
# type: (np.multiarray.ndarray, np.multiarray.ndarray, float) -> Tuple[Optional[float], Optional[float]]
"""Given the numerator and denominator of the transfer function, compute gain margin/phase margin frequencies.
To determine the crossover frequencies, we write the transfer function as:
.. math::
\\frac{A(w) + jB(w)}{C(w) + jD(w)}
where :math:`A(w)`, :math:`B(w)`, :math:`C(w)`, and :math:`D(w)` are real polynomials. The gain margin frequency
is the frequency at which:
.. math::
\\frac{B(w)}{A(w)} = \\frac{D(w)}{C(w)} \\implies A(w)D(w) - B(w)C(w) = 0
The phase margin frequency is the frequency at which:
.. math::
\\frac{A^2(w) + B^2(w)}{C^2(w) + D^2(w)} = 1 \implies A^2(w) + B^2(w) - C^2(w) - D^2(w) = 0
This function solves these two equations and returns the smallest real and positive roots.
Parameters
----------
num : np.multiarray.ndarray
the numerator polynomial coefficients array. index 0 is coefficient for highest term.
den : np.multiarray.ndarray
the denominator polynomial coefficients array. index 0 is coefficient for highest term.
atol : float
absolute tolerance used to check if the imaginary part of a root is 0, or if a root is greater than 0.
Returns
-------
w_phase : Optional[float]
lowest positive frequency in rad/s at which the gain becomes unity. None if no such frequency exist.
w_gain : Optional[float]
lower positive frequency in rad/s at which the phase becomes 180 degrees. None if no such frequency exist.
"""
# construct A(w), B(w), C(w), and D(w)
num_flip = num[::-1]
den_flip = den[::-1]
avec = np.copy(num_flip)
bvec = np.copy(num_flip)
cvec = np.copy(den_flip)
dvec = np.copy(den_flip)
avec[1::2] = 0
avec[2::4] *= -1
bvec[0::2] = 0
bvec[3::4] *= -1
cvec[1::2] = 0
cvec[2::4] *= -1
dvec[0::2] = 0
dvec[3::4] *= -1
apoly = np.poly1d(avec[::-1])
bpoly = np.poly1d(bvec[::-1])
cpoly = np.poly1d(cvec[::-1])
dpoly = np.poly1d(dvec[::-1])
# solve for w_phase/w_gain
poly_list = [apoly**2 + bpoly**2 - cpoly**2 - dpoly**2,
apoly * dpoly - bpoly * cpoly]
w_list = [None, None] # type: List[Optional[float]]
for idx in range(2):
for root in poly_list[idx].roots:
root_real = float(root.real)
if abs(root.imag) < atol < root_real:
w_list_idx = w_list[idx]
if w_list_idx is None or root_real < w_list_idx:
w_list[idx] = root_real
return w_list[0], w_list[1]
def get_w_3db(num, den, atol=1e-8):
# type: (np.multiarray.ndarray, np.multiarray.ndarray, float) -> Optional[float]
"""Given the numerator and denominator of the transfer function, compute the 3dB frequency.
To determine the 3dB frequency, we first normalize the transfer function so that its DC gain is one,
then we write the transfer function as:
.. math::
\\frac{A(w) + jB(w)}{C(w) + jD(w)}
where :math:`A(w)`, :math:`B(w)`, :math:`C(w)`, and :math:`D(w)` are real polynomials. The 3dB frequency
is the frequency at which:
.. math::
\\frac{A^2(w) + B^2(w)}{C^2(w) + D^2(w)} = 0.5 \implies A^2(w) + B^2(w) - 0.5\\left(C^2(w) + D^2(w)\\right) = 0
This function solves this equation and returns the smallest real and positive roots.
Parameters
----------
num : np.multiarray.ndarray
the numerator polynomial coefficients array. index 0 is coefficient for highest term.
den : np.multiarray.ndarray
the denominator polynomial coefficients array. index 0 is coefficient for highest term.
atol : float
absolute tolerance used to check if the imaginary part of a root is 0, or if a root is greater than 0.
Returns
-------
w_3db : Optional[float]
the 3dB frequency in rad/s. None if no such frequency exist.
"""
# construct A(w), B(w), C(w), and D(w) of normalized transfer function
num_flip = num[::-1] / num[-1]
den_flip = den[::-1] / den[-1]
avec = np.copy(num_flip)
bvec = np.copy(num_flip)
cvec = np.copy(den_flip)
dvec = np.copy(den_flip)
avec[1::2] = 0
avec[2::4] *= -1
bvec[0::2] = 0
bvec[3::4] *= -1
cvec[1::2] = 0
cvec[2::4] *= -1
dvec[0::2] = 0
dvec[3::4] *= -1
apoly = np.poly1d(avec[::-1])
bpoly = np.poly1d(bvec[::-1])
cpoly = np.poly1d(cvec[::-1])
dpoly = np.poly1d(dvec[::-1])
# solve for w_phase/w_gain
poly = apoly**2 + bpoly**2 - (cpoly**2 + dpoly**2) / 2 # type: np.poly1d
w_ans = None
for root in poly.roots:
root_real = float(root.real)
if abs(root.imag) < atol < root_real and (w_ans is None or root_real < w_ans):
w_ans = root_real
return w_ans
def get_stability_margins(num, den, rtol=1e-8, atol=1e-8):
# type: (np.multiarray.ndarray, np.multiarray.ndarray, float, float) -> Tuple[float, float]
"""Given the numerator and denominator of the transfer function, compute phase and gain margins.
Parameters
----------
num : np.multiarray.ndarray
the numerator polynomial coefficients array. index 0 is coefficient for highest term.
den : np.multiarray.ndarray
the denominator polynomial coefficients array. index 0 is coefficient for highest term.
rtol : float
relative tolerance. Used to check if two frequencies are equal.
atol : float
absolute tolerance. Used to check a number is equal to 0.
Returns
-------
phase_margin : float
the phase margin in degrees. If the system is unstable, a negative number is returned.
gain_margin : float
the gain margin.
"""
poly_n = np.poly1d(num)
poly_d = np.poly1d(den)
# compute gain margin.
w_phase, w_gain = get_w_crossings(num, den, atol=atol)
if w_gain is None:
gain_margin = float('inf')
else:
gain_margin = abs(poly_d(1j * w_gain) / poly_n(1j * w_gain))
# compute phase margin
if w_phase is None:
# gain never equal to 1. That means gain is always greater than 1 or gain is always less than 1.
dc_gain = poly_n(0) / poly_d(0)
if dc_gain < 1 - max(rtol, atol):
# gain is always less than 1, infinite phase margin
phase_margin = float('inf')
else:
# gain is always greater than 1, unstable
phase_margin = -1
elif w_gain is not None and w_phase > w_gain + max(w_gain * rtol, atol):
# unity gain frequency > 180 degree frequency, we're unstable
phase_margin = -1
else:
phase_margin = np.angle(poly_n(1j * w_phase) / poly_d(1j * w_phase), deg=True) + 180
return phase_margin, gain_margin
================================================
FILE: bag/data/ltv.py
================================================
# -*- coding: utf-8 -*-
"""This module defines functions and classes for linear time-varying circuits data post-processing.
"""
import numpy as np
import scipy.interpolate as interp
import scipy.sparse as sparse
def _even_quotient(a, b, tol=1e-6):
"""Returns a / b if it is an integer, -1 if it is not.."""
num = int(round(a / b))
if abs(a - b * num) < abs(b * tol):
return num
return -1
class LTVImpulseFinite(object):
r"""A class that computes finite impulse response of a linear time-varying circuit.
This class computes the time-varying impulse response based on PSS/PAC simulation
data, and provides several useful query methods. Your simulation should be set up
as follows:
#. Setup PSS as usual. We will denote system period as tper and fc = 1/tper.
#. In PAC, set the maxmimum sidebands to m.
#. In PAC, set the input frequency sweep to be absolute, and sweep from 0 to
n * fstep in steps of fstep, where fstep = fc / k for some integer k.
k should be chosen so that the output settles back to 0 after time k * tper. k
should also be chosen such that fstep is a nice round frequency. Otherwise,
numerical errors may introduce strange results.
n should be chosen so that n * fstep is sufficiently large compared to system
bandwidth.
#. In PAC options, set the freqaxis option to be "in".
#. After simulation, PAC should save the output frequency response as a function of
output harmonic number and input frequency. Post-process this into a complex 2D
matrix hmat with shape (2 * m + 1, n + 1), and pass it to this class's constructor.
Parameters
----------
hmat : np.ndarray
the PAC simulation data matrix with shape (2 * m + 1, n + 1).
hmat[a + m, b] is the complex AC gain from input frequency b * fc / k
to output frequency a * fc + b * fc / k.
m : int
number of output sidebands.
n : int
number of input frequencies.
tper : float
the system period, in seconds.
k : int
the ratio between period of the input impulse train and the system period.
Must be an integer.
out0 : :class:`numpy.ndarray`
steady-state output transient waveform with 0 input over 1 period. This should
be a two-column array, where the first column is time vector and second column
is the output. Used to compute transient response.
Notes
-----
This class uses the algorithm described in [1]_ to compute impulse response from PSS/PAC
simulation data. The impulse response :math:`h(t, \tau)` satisfies the following equation:
.. math:: y(t) = \int_{-\infty}^{\infty} h(t, \tau) \cdot x(\tau)\ d\tau
Intuitively, :math:`h(t, \tau)` represents the output at time :math:`t` subject to
an impulse at time :math:`\tau`. As described in the paper, If :math:`w_c` is the system
frequency, and :math:`H_m(jw)` is the frequency response of the system at :math:`mw_c + w`
due to an input sinusoid with frequency :math:`w`, then the impulse response can be calculated as:
.. math::
h(t, \tau) = \frac{1}{kT}\sum_{n=-\infty}^{\infty}\sum_{m=-\infty}^{\infty}
H_m\left (j\dfrac{nw_c}{k}\right) \exp \left[ jmw_ct + j\dfrac{nw_c}{k} (t - \tau)\right]
where :math:`0 \le \tau < T` and :math:`\tau \le t \le \tau + kT`.
References
----------
.. [1] J. Kim, B. S. Leibowitz and M. Jeeradit, "Impulse sensitivity function analysis of
periodic circuits," 2008 IEEE/ACM International Conference on Computer-Aided Design,
San Jose, CA, 2008, pp. 386-391.
.. automethod:: __call__
"""
def __init__(self, hmat, m, n, tper, k, out0):
hmat = np.asarray(hmat)
if hmat.shape != (2 * m + 1, n + 1):
raise ValueError('hmat shape = %s not compatible with M=%d, N=%d' %
(hmat.shape, m, n))
# use symmetry to fill in negative input frequency data.
fullh = np.empty((2 * m + 1, 2 * n + 1), dtype=complex)
fullh[:, n:] = hmat / (k * tper)
fullh[:, :n] = np.fliplr(np.flipud(fullh[:, n + 1:])).conj()
self.hmat = fullh
wc = 2.0 * np.pi / tper
self.m_col = np.arange(-m, m + 1) * (1.0j * wc)
self.n_col = np.arange(-n, n + 1) * (1.0j * wc / k)
self.m_col = self.m_col.reshape((-1, 1))
self.n_col = self.n_col.reshape((-1, 1))
self.tper = tper
self.k = k
self.outfun = interp.interp1d(out0[:, 0], out0[:, 1], bounds_error=True,
assume_sorted=True)
@staticmethod
def _print_debug_msg(result):
res_imag = np.imag(result).flatten()
res_real = np.real(result).flatten()
res_ratio = np.abs(res_imag / (res_real + 1e-18))
idx = np.argmax(res_ratio)
print('max imag/real ratio: %.4g, imag = %.4g, real = %.4g' %
(res_ratio[idx], res_imag[idx], res_real[idx]))
def __call__(self, t, tau, debug=False):
"""Calculate h(t, tau).
Compute h(t, tau), which is the output at t subject to an impulse
at time tau. standard numpy broadcasting rules apply.
Parameters
----------
t : array-like
the output time.
tau : array-like
the input impulse time.
debug : bool
True to print debug messages.
Returns
-------
val : :class:`numpy.ndarray`
the time-varying impulse response evaluated at the given coordinates.
"""
# broadcast arguments to same shape
t, tau = np.broadcast_arrays(t, tau)
# compute impulse using efficient matrix multiply and numpy broadcasting.
dt = t - tau
zero_indices = (dt < 0) | (dt > self.k * self.tper)
t_row = t.reshape((1, -1))
dt_row = dt.reshape((1, -1))
tmp = np.dot(self.hmat, np.exp(np.dot(self.n_col, dt_row))) * np.exp(np.dot(self.m_col, t_row))
result = np.sum(tmp, axis=0).reshape(dt.shape)
# zero element such that dt < 0 or dt > k * T.
result[zero_indices] = 0.0
if debug:
self._print_debug_msg(result)
# discard imaginary part
return np.real(result)
def _get_core(self, num_points, debug=False):
"""Returns h(dt, tau) matrix and output waveform over 1 period. Used by lsim.
Compute h(dt, tau) for 0 <= tau < T and 0 <= dt < kT, where dt = t - tau.
"""
dt_vec = np.linspace(0.0, self.k * self.tper, self.k * num_points, endpoint=False) # type: np.ndarray
tvec_per = dt_vec[:num_points]
tau_col = tvec_per.reshape((-1, 1))
dt_row = dt_vec.reshape((1, -1))
# use matrix multiply to sum across n
tmp = np.dot(self.hmat, np.exp(np.dot(self.n_col, dt_row)))
# use broadcast multiply for exp(-jwm*(t-tau)) term
tmp = tmp * np.exp(np.dot(self.m_col, dt_row))
# use matrix multiply to sum across m
result = np.dot(np.exp(np.dot(tau_col, self.m_col.T)), tmp).T
if debug:
self._print_debug_msg(result)
# discard imaginary part
result = np.real(result)
# compute output waveform
wvfm = self.outfun(tvec_per)
return result, wvfm
def visualize(self, fig_idx, num_points, num_period,
plot_color=True, plot_3d=False, show=True):
"""Visualize the time-varying impulse response.
Parameters
----------
fig_idx : int
starting figure index.
num_points : int
number of sample points in a period.
num_period : int
number of output period.
plot_color : bool
True to create a plot of the time-varying impulse response as 2D color plot.
plot_3d : bool
True to create a 3D plot of the impulse response.
show : bool
True to show the plots immediately. Set to False if you want to create some
other plots.
"""
if not plot_color and not plot_3d:
# do nothing.
return
tot_points = num_period * num_points
tau_vec = np.linspace(0, self.tper, num_points, endpoint=False)
dt_vec = np.linspace(0, num_period * self.tper, tot_points, endpoint=False)
dt, tau = np.meshgrid(dt_vec, tau_vec, indexing='ij', copy=False)
t = tau + dt
result, _ = self._get_core(num_points)
result = result[:num_period * num_points, :]
import matplotlib.pyplot as plt
from matplotlib import cm
if plot_color:
# plot 2D color
fig = plt.figure(fig_idx)
fig_idx += 1
ax = fig.gca()
cp = ax.pcolor(t, tau, result, cmap=cm.cubehelix)
plt.colorbar(cp)
ax.set_title('Impulse response contours')
ax.set_ylabel('impulse time')
ax.set_xlabel('output time')
if plot_3d:
# plot 3D impulse response
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(fig_idx)
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(t, tau, result, rstride=1, cstride=1, linewidth=0, cmap=cm.cubehelix)
ax.set_title('Impulse response')
ax.set_ylabel('impulse time')
ax.set_xlabel('output time')
if show:
plt.show()
def lsim(self, u, tstep, tstart=0.0, ac_only=False, periodic=False, debug=False):
r"""Compute the output waveform given input waveform.
This method assumes zero initial state. The output waveform will be the
same length as the input waveform, so pad zeros if necessary.
Parameters
----------
u : array-like
the input waveform.
tstep : float
the input/output time step, in seconds. Must evenly divide system period.
tstart : float
the time corresponding to u[0]. Assume u = 0 for all time before tstart.
Defaults to 0.
ac_only : bool
Return output waveform due to AC input only and without steady-state
transient.
periodic : bool
True if the input is periodic. If so, returns steady state output.
debug : bool
True to print debug messages.
Returns
-------
y : :class:`numpy.ndarray`
the output waveform.
Notes
-----
This method computes the integral:
.. math:: y(t) = \int_{-\infty}^{\infty} h(t, \tau) \cdot x(\tau)\ d\tau
using the following algorithm:
#. set :math:`d\tau = \texttt{tstep}`.
#. Compute :math:`h(\tau + dt, \tau)` for :math:`0 \le dt < kT` and
:math:`0 \le \tau < T`, then express as a kN-by-N matrix. This matrix
completely describes the time-varying impulse response.
#. tile the impulse response matrix horizontally until its number of columns
matches input signal length, then multiply column i by u[i].
#. Compute y as the sum of all anti-diagonals of the matrix computed in
previous step, multiplied by :math:`d\tau`. Truncate if necessary.
"""
u = np.asarray(u)
nstep = _even_quotient(self.tper, tstep)
ndelay = _even_quotient(tstart, tstep)
# error checking
if len(u.shape) != 1:
raise ValueError('u must be a 1D array.')
if nstep < 0:
raise ValueError('Time step = %.4g does not evenly divide'
'System period = %.4g' % (tstep, self.tper))
if ndelay < 0:
raise ValueError('Time step = %.4g does not evenly divide'
'Startimg time = %.4g' % (tstep, tstart))
if periodic and nstep != u.size:
raise ValueError('Periodic waveform must have same period as system period.')
# calculate and tile hcore
ntot = u.size
hcore, outwv = self._get_core(nstep, debug=debug)
hcore = np.roll(hcore, -ndelay, axis=1)
outwv = np.roll(outwv, -ndelay)
if periodic:
# input periodic; more efficient math.
hcore *= u
hcore = np.tile(hcore, (1, self.k + 1))
y = np.bincount(np.sum(np.indices(hcore.shape), axis=0).flat, hcore.flat)
y = y[self.k * nstep:(self.k + 1) * nstep] * tstep
else:
ntile = int(np.ceil(ntot * 1.0 / nstep))
hcore = np.tile(hcore, (1, ntile))
outwv = np.tile(outwv, (ntile,))
hcore = hcore[:, :ntot]
outwv = outwv[:ntot]
# broadcast multiply
hcore *= u
# magic code from stackoverflow
# returns an array of the sums of all anti-diagonals.
y = np.bincount(np.sum(np.indices(hcore.shape), axis=0).flat, hcore.flat)[:ntot] * tstep
if not ac_only:
# add output steady state transient
y += outwv
return y
def lsim_digital(self, tsym, tstep, data, pulse, tstart=0.0, nchain=1, tdelta=0.0, **kwargs):
"""Compute output waveform given input pulse shape and data.
This method is similar to :func:`~bag.data.ltv.LTVImpulseFinite.lsim`, but
assumes the input is superposition of shifted and scaled copies of a given
pulse waveform. This assumption speeds up the computation and is useful
for high speed link design.
Parameters
----------
tsym : float
the symbol period, in seconds. Must evenly divide system period.
tstep : float
the output time step, in seconds. Must evenly divide symbol period.
data : list[float]
list of symbol values.
pulse : np.ndarray
the pulse waveform as a two-column array. The first column is time,
second column is pulse waveform value. Linear interpolation will be used
if necessary. Time must start at 0.0 and be increasing.
tstart : float
time of the first data symbol. Defaults to 0.0
nchain : int
number of blocks in a chain. Defaults to 1. This argument is useful if
you have multiple blocks cascaded together in a chain, and you wish to find
the output waveform at the end of the chain.
tdelta : float
time difference between adjacent elements in a chain. Defaults to 0. This
argument is useful for simulating a chain of latches, where blocks operate
on alternate phases of the clock.
kwargs : dict[str, any]
additional keyword arguments for :func:`~bag.data.ltv.LTVImpulseFinite.lsim`.
Returns
-------
output : :class:`numpy.ndarray`
the output waveform over N symbol period, where N is the given data length.
"""
# check tsym evenly divides system period
nsym = _even_quotient(self.tper, tsym)
if nsym < 0:
raise ValueError('Symbol period %.4g does not evenly divide '
'system period %.4g' % (tsym, self.tper))
# check tstep evenly divides tsym
nstep = _even_quotient(tsym, tstep)
if nstep < 0:
raise ValueError('Time step %.4g does not evenly divide '
'symbol period %.4g' % (tstep, tsym))
# check tstep evenly divides tstart
ndelay = _even_quotient(tstart, tstep)
if ndelay < 0:
raise ValueError('Time step %.4g does not evenly divide '
'starting time %.4g' % (tstep, tstart))
nper = nstep * nsym
pulse = np.asarray(pulse)
tvec = pulse[:, 0]
pvec = pulse[:, 1]
# find input length
# noinspection PyUnresolvedReferences
nlast = min(np.nonzero(pvec)[0][-1] + 1, tvec.size - 1)
tlast = tvec[nlast]
ntot = int(np.ceil(tlast / tstep)) + nchain * self.k * nper + nstep * (nsym - 1)
# interpolate input
pfun = interp.interp1d(tvec, pvec, kind='linear', copy=False, bounds_error=False,
fill_value=0.0, assume_sorted=True)
tin = np.linspace(0.0, ntot * tstep, ntot, endpoint=False)
pin = pfun(tin)
# super-impose pulse responses
num_out = len(data) * nstep
output = np.zeros(num_out)
for idx in range(nsym):
# get output pulse response
pout = pin
for j in range(nchain):
pout = self.lsim(pout, tstep, tstart=tstart + j * tdelta, periodic=False,
ac_only=True, **kwargs)
# construct superposition matrix
cur_data = data[idx::nsym]
offsets = np.arange(0, len(cur_data) * nper, nper) * -1
diags = np.tile(cur_data, (ntot, 1)).T
dia_mat = sparse.dia_matrix((diags, offsets), shape=(num_out, ntot))
# superimpose
output += dia_mat.dot(pout)
# shift input pulse.
pin = np.roll(pin, nstep)
# compute output steady state waveform
out_pss = self.outfun(np.linspace(0.0, self.tper, nper, endpoint=False))
out_pss = np.roll(out_pss, -ndelay)
for j in range(1, nchain):
out_pss = self.lsim(out_pss, tstep, tstart=tstart + j * tdelta, periodic=True,
ac_only=False, **kwargs)
ntile = int(np.ceil(num_out * 1.0 / nper))
out_pss = np.tile(out_pss, (ntile,))
output += out_pss[:num_out]
return output
================================================
FILE: bag/data/mos.py
================================================
# -*- coding: utf-8 -*-
"""This module defines classes for computing DC operating point.
"""
from typing import Dict
import numpy as np
def mos_y_to_ss(sim_data, char_freq, fg, ibias, cfit_method='average'):
# type: (Dict[str, np.ndarray], float, int, np.ndarray, str) -> Dict[str, np.ndarray]
"""Convert transistor Y parameters to small-signal parameters.
This function computes MOSFET small signal parameters from 3-port
Y parameter measurements done on gate, drain and source, with body
bias fixed. This functions fits the Y parameter to a capcitor-only
small signal model using least-mean-square error.
Parameters
----------
sim_data : Dict[str, np.ndarray]
A dictionary of Y parameters values stored as complex numpy arrays.
char_freq : float
the frequency Y parameters are measured at.
fg : int
number of transistor fingers used for the Y parameter measurement.
ibias : np.ndarray
the DC bias current of the transistor. Always positive.
cfit_method : str
method used to extract capacitance from Y parameters. Currently
supports 'average' or 'worst'
Returns
-------
ss_dict : Dict[str, np.ndarray]
A dictionary of small signal parameter values stored as numpy
arrays. These values are normalized to 1-finger transistor.
"""
w = 2 * np.pi * char_freq
gm = (sim_data['y21'].real - sim_data['y31'].real) / 2.0 # type: np.ndarray
gds = (sim_data['y22'].real - sim_data['y32'].real) / 2.0 # type: np.ndarray
gb = (sim_data['y33'].real - sim_data['y23'].real) / 2.0 - gm - gds # type: np.ndarray
cgd12 = -sim_data['y12'].imag / w
cgd21 = -sim_data['y21'].imag / w
cgs13 = -sim_data['y13'].imag / w
cgs31 = -sim_data['y31'].imag / w
cds23 = -sim_data['y23'].imag / w
cds32 = -sim_data['y32'].imag / w
cgg = sim_data['y11'].imag / w
cdd = sim_data['y22'].imag / w
css = sim_data['y33'].imag / w
if cfit_method == 'average':
cgd = (cgd12 + cgd21) / 2 # type: np.ndarray
cgs = (cgs13 + cgs31) / 2 # type: np.ndarray
cds = (cds23 + cds32) / 2 # type: np.ndarray
elif cfit_method == 'worst':
cgd = np.maximum(cgd12, cgd21)
cgs = np.maximum(cgs13, cgs31)
cds = np.maximum(cds23, cds32)
else:
raise ValueError('Unknown cfit_method = %s' % cfit_method)
cgb = cgg - cgd - cgs # type: np.ndarray
cdb = cdd - cds - cgd # type: np.ndarray
csb = css - cgs - cds # type: np.ndarray
ibias = ibias / fg
gm = gm / fg
gds = gds / fg
gb = gb / fg
cgd = cgd / fg
cgs = cgs / fg
cds = cds / fg
cgb = cgb / fg
cdb = cdb / fg
csb = csb / fg
return dict(
ibias=ibias,
gm=gm,
gds=gds,
gb=gb,
cgd=cgd,
cgs=cgs,
cds=cds,
cgb=cgb,
cdb=cdb,
csb=csb,
)
================================================
FILE: bag/data/plot.py
================================================
# -*- coding: utf-8 -*-
"""This module contains utilities to improve waveform plotting in python.
"""
import numpy as np
import scipy.interpolate as interp
from matplotlib.lines import Line2D
from matplotlib.figure import Figure
from matplotlib.text import Annotation
import matplotlib.pyplot as plt
from ..math import float_to_si_string
# Vega category10 palette
color_cycle = ['#1f77b4', '#ff7f0e',
'#2ca02c', '#d62728',
'#9467bd', '#8c564b',
'#e377c2', '#7f7f7f',
'#bcbd22', '#17becf',
]
def figure(fig_id, picker=5.0):
"""Create a WaveformPlotter.
Parameters
----------
fig_id : int
the figure ID.
picker : float
picker event pixel tolerance.
Returns
-------
plotter : bag.data.plot.WaveformPlotter
a plotter that helps you make interactive matplotlib figures.
"""
return WaveformPlotter(fig_id, picker=picker)
def plot_waveforms(xvec, panel_list, fig=1):
"""Plot waveforms in vertical panels with shared X axis.
Parameters
----------
xvec : :class:`numpy.ndarray`
the X data.
panel_list : list[list[(str, :class:`numpy.ndarray`)]]
list of lists of Y data. Each sub-list is one panel. Each element of the sub-list
is a tuple of signal name and signal data.
fig : int
the figure ID.
"""
nrow = len(panel_list)
if nrow > 0:
myfig = plt.figure(fig, FigureClass=MarkerFigure) # type: MarkerFigure
ax0 = None
for idx, panel in enumerate(panel_list):
if ax0 is None:
ax = plt.subplot(nrow, 1, idx + 1)
ax0 = ax
else:
ax = plt.subplot(nrow, 1, idx + 1, sharex=ax0)
for name, sig in panel:
ax.plot(xvec, sig, label=name, picker=5.0)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
myfig.setup_callbacks()
plt.show(block=False)
def _fpart(x):
return x - int(x)
def _rfpart(x):
return 1 - _fpart(x)
def draw_line(x0, y0, x1, y1, xmax, grid):
"""Draws an anti-aliased line in img from p1 to p2 with the given color."""
if x0 > x1:
# x1 is wrapped around
x1 += xmax
dx, dy = x1 - x0, y1 - y0
steep = dx < abs(dy)
if steep:
x0, y0, x1, y1, dx, dy = y0, x0, y1, x1, dy, dx
gradient = dy * 1.0 / dx
# handle first endpoint
xpxl1 = int(x0 + 0.5)
yend = y0 + gradient * (xpxl1 - x0)
xgap = _rfpart(x0 + 0.5)
ypxl1 = int(yend)
if steep:
grid[ypxl1 % xmax, xpxl1] += _rfpart(yend) * xgap
grid[(ypxl1 + 1) % xmax, xpxl1] += _fpart(yend) * xgap
else:
grid[xpxl1 % xmax, ypxl1] += _rfpart(yend) * xgap
grid[xpxl1 % xmax, ypxl1 + 1] += _fpart(yend) * xgap
intery = yend + gradient # first y-intersection for the main loop
# do not color second endpoint to avoid double coloring.
xpxl2 = int(x1 + 0.5)
# main loop
if steep:
for x in range(xpxl1 + 1, xpxl2):
xval = int(intery)
grid[xval % xmax, x] += _rfpart(intery)
grid[(xval + 1) % xmax, x] += _fpart(intery)
intery += gradient
else:
for x in range(xpxl1 + 1, xpxl2):
xval = x % xmax
grid[xval, int(intery)] += _rfpart(intery)
grid[xval, int(intery) + 1] += _fpart(intery)
intery += gradient
def plot_eye_heatmap(fig, tvec, yvec, tper, tstart=None, tend=None, toff=None,
tstep=None, vstep=None,
cmap=None, vmargin=0.05, interpolation='gaussian',
repeat=False):
"""Plot eye diagram heat map.
Parameters
----------
fig : int
the figure ID.
tvec : np.ndarray
the time data.
yvec : np.ndarray
waveform data.
tper : float
the eye period.
tstart : float
starting time. Defaults to first point.
tend : float
ending time. Defaults to last point.
toff : float
eye offset. Defaults to 0.
tstep : float or None
horizontal bin size. Defaults to using 200 bins.
vstep : float or None
vertical bin size. Defaults to using 200 bins.
cmap :
the colormap used for coloring the heat map. If None, defaults to cubehelix_r
vmargin : float
vertical margin in percentage of maximum/minimum waveform values. Defaults
to 5 percent. This is used so that there some room between top/bottom of
eye and the plot.
interpolation : str
interpolation method. Defaults to 'gaussian'. Use 'none' for no interpolation.
repeat : bool
True to repeat the eye diagram once to the right. This is useful if you
want to look at edge transistions.
"""
if not toff:
toff = 0.0
if tstart is None:
tstart = tvec[0]
if tend is None:
tend = tvec[-1]
if tstep is None:
num_h = 200
else:
num_h = int(np.ceil(tper / tstep))
arr_idx = (tstart <= tvec) & (tvec < tend)
tplot = np.mod((tvec[arr_idx] - toff), tper) / tper * num_h # type: np.ndarray
yplot = yvec[arr_idx]
# get vertical range
ymin, ymax = np.amin(yplot), np.amax(yplot)
yrang = (ymax - ymin) * (1 + vmargin)
ymid = (ymin + ymax) / 2.0
ymin = ymid - yrang / 2.0
ymax = ymin + yrang
if vstep is None:
num_v = 200
else:
num_v = int(np.ceil(yrang / vstep))
# rescale Y axis
yplot = (yplot - ymin) / yrang * num_v
grid = np.zeros((num_h, num_v), dtype=float)
for idx in range(yplot.size - 1):
draw_line(tplot[idx], yplot[idx], tplot[idx + 1], yplot[idx + 1], num_h, grid)
if cmap is None:
from matplotlib import cm
# noinspection PyUnresolvedReferences
cmap = cm.cubehelix_r
plt.figure(fig)
grid = grid.T[::-1, :]
if repeat:
grid = np.tile(grid, (1, 2))
tper *= 2.0
plt.imshow(grid, extent=[0, tper, ymin, ymax], cmap=cmap,
interpolation=interpolation, aspect='auto')
cb = plt.colorbar()
cb.set_label('counts')
return grid
def plot_eye(fig, tvec, yvec_list, tper, tstart=None, tend=None,
toff_list=None, name_list=None, alpha=1.0):
"""Plot eye diagram.
Parameters
----------
fig : int
the figure ID.
tvec : np.ndarray
the time data.
yvec_list : list[np.ndarray]
list of waveforms to plot in eye diagram.
tper : float
the period.
tstart : float
starting time. Defaults to first point.
tend : float
ending time. Defaults to last point.
toff_list : list[float]
offset to apply to each waveform. Defaults to zeros.
name_list : list[str] or None
the name of each waveform. Defaults to numbers.
alpha : float
the transparency of each trace. Can be used to mimic heatmap.
"""
if not yvec_list:
return
if not name_list:
name_list = [str(num) for num in range(len(yvec_list))]
if not toff_list:
toff_list = [0.0] * len(yvec_list)
if tstart is None:
tstart = tvec[0]
if tend is None:
tend = tvec[-1]
# get new tstep that evenly divides tper and new x vector
tstep_given = (tvec[-1] - tvec[0]) / (tvec.size - 1)
num_samp = int(round(tper / tstep_given))
t_plot = np.linspace(0.0, tper, num_samp, endpoint=False)
# find tstart and tend in number of tper.
nstart = int(np.floor(tstart / tper))
nend = int(np.ceil(tend / tper))
ncycle = nend - nstart
teye = np.linspace(nstart * tper, nend * tper, num_samp * ncycle, endpoint=False) # type: np.ndarray
teye = teye.reshape((ncycle, num_samp))
myfig = plt.figure(fig, FigureClass=MarkerFigure) # type: MarkerFigure
ax = plt.subplot()
legend_lines = []
for idx, yvec in enumerate(yvec_list):
color = color_cycle[idx % len(color_cycle)]
toff = toff_list[idx]
# get eye traces
yfun = interp.interp1d(tvec - toff, yvec, kind='linear', copy=False, bounds_error=False,
fill_value=np.nan, assume_sorted=True)
plot_list = []
for cycle_idx in range(ncycle):
plot_list.append(t_plot)
plot_list.append(yfun(teye[cycle_idx, :]))
lines = ax.plot(*plot_list, alpha=alpha, color=color, picker=4.0, linewidth=2)
legend_lines.append(lines[0])
# Put a legend to the right of the current axis
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
ax.legend(legend_lines, name_list, loc='center left', bbox_to_anchor=(1, 0.5))
myfig.setup_callbacks()
plt.show(block=False)
def _find_closest_point(x, y, xvec, yvec, xnorm, ynorm):
"""Find point on PWL waveform described by xvec, yvec closest to (x, y)"""
xnvec = xvec / xnorm
ynvec = yvec / ynorm
xn = x / xnorm
yn = y / ynorm
dx = np.diff(xnvec)
dy = np.diff(ynvec)
px = (xn - xnvec[:-1])
py = (yn - ynvec[:-1])
that = (px * dx + py * dy) / (dx ** 2 + dy ** 2)
t = np.minimum(np.maximum(that, 0), 1)
minx = xnvec[:-1] + t * dx
miny = ynvec[:-1] + t * dy
dist = (minx - xn) ** 2 + (miny - yn) ** 2
idx = np.argmin(dist)
return minx[idx] * xnorm, miny[idx] * ynorm
class WaveformPlotter(object):
"""A custom matplotlib interactive plotting class.
This class adds many useful features, such as ability to add/remove markers,
ability to toggle waveforms on and off, and so on.
Parameters
----------
fig_idx : int
the figure index.
picker : float
picker event pixel tolerance.
normal_width : float
normal linewidth.
select_width : float
selected linewidth.
"""
def __init__(self, fig_idx, picker=5.0, normal_width=1.5, select_width=3.0):
self.figure = plt.figure(fig_idx, FigureClass=MarkerFigure) # type: MarkerFigure
self.picker = picker
self.norm_lw = normal_width
self.top_lw = select_width
self.ax = self.figure.gca()
self.ax.set_prop_cycle('color', color_cycle)
self.leline_lookup = {}
self.letext_lookup = {}
self.last_top = None
self.legend = None
self.resized_legend = False
def plot(self, *args, **kwargs):
if self.figure is None:
raise ValueError('figure closed already')
if 'picker' not in kwargs:
kwargs['picker'] = self.picker
kwargs['linewidth'] = self.norm_lw
if 'lw' in kwargs:
del kwargs['lw']
return self.ax.plot(*args, **kwargs)
def setup(self):
if self.figure is None:
raise ValueError('figure closed already')
self.figure.tight_layout()
# Put a legend to the right of the current axis
ax_lines, ax_labels = self.ax.get_legend_handles_labels()
self.legend = self.ax.legend(ax_lines, ax_labels, loc='center left',
bbox_to_anchor=(1, 0.5), fancybox=True)
le_lines = self.legend.get_lines()
le_texts = self.legend.get_texts()
for leline, letext, axline in zip(le_lines, le_texts, ax_lines):
self.leline_lookup[leline] = (letext, axline)
self.letext_lookup[letext] = (leline, axline)
leline.set_picker(self.picker)
letext.set_picker(self.picker)
letext.set_alpha(0.5)
le_texts[-1].set_alpha(1.0)
ax_lines[-1].set_zorder(2)
ax_lines[-1].set_linewidth(self.top_lw)
self.last_top = (le_texts[-1], ax_lines[-1])
self.figure.register_pick_event(self.leline_lookup, self.legend_line_picked)
self.figure.register_pick_event(self.letext_lookup, self.legend_text_picked)
self.figure.setup_callbacks()
self.figure.canvas.mpl_connect('draw_event', self.fix_legend_location)
self.figure.canvas.mpl_connect('close_event', self.figure_closed)
self.figure.canvas.mpl_connect('resize_event', self.figure_resized)
# noinspection PyUnusedLocal
def figure_closed(self, event):
self.figure.close_figure()
self.figure = None
self.ax = None
self.leline_lookup = None
self.letext_lookup = None
self.last_top = None
self.legend = None
# noinspection PyUnusedLocal
def figure_resized(self, event):
self.resized_legend = False
self.fix_legend_location(None)
# noinspection PyUnusedLocal
def fix_legend_location(self, event):
if not self.resized_legend:
self.figure.tight_layout()
inv_tran = self.figure.transFigure.inverted()
leg_box = inv_tran.transform(self.legend.get_window_extent())
leg_width = leg_box[1][0] - leg_box[0][0]
box = self.ax.get_position()
# print box.x0, box.y0, box.width, box.height, leg_width, leg_frame.get_height()
self.ax.set_position([box.x0, box.y0, box.width - leg_width, box.height])
self.resized_legend = True
self.figure.canvas.draw()
def legend_line_picked(self, artist):
letext, axline = self.leline_lookup[artist]
visible = not axline.get_visible()
if visible:
artist.set_alpha(1.0)
else:
artist.set_alpha(0.2)
if visible and (self.last_top[1] is not axline):
# set to be top line
self.legend_text_picked(letext, draw=False)
self.figure.set_line_visibility(axline, visible)
def legend_text_picked(self, artist, draw=True):
leline, axline = self.letext_lookup[artist]
self.last_top[0].set_alpha(0.5)
self.last_top[1].set_zorder(1)
self.last_top[1].set_linewidth(self.norm_lw)
axline.set_zorder(2)
artist.set_alpha(1.0)
axline.set_linewidth(self.top_lw)
self.last_top = (artist, axline)
# if draw is False, this method is not called from
# legend_line_picked(), so we'll never have recursion issues.
if draw:
if not axline.get_visible():
# set line to be visible if not
# draw() will be called in legend_line_picked
self.legend_line_picked(leline)
else:
self.figure.canvas.draw()
# noinspection PyAbstractClass
class MarkerFigure(Figure):
def __init__(self, **kwargs):
Figure.__init__(self, **kwargs)
self.markers = []
self.epsilon = 10.0
self.drag_idx = -1
self.timer = None
self.marker_line_info = None
self.pick_sets = []
self.pick_funs = []
def set_line_visibility(self, axline, visible):
axline.set_visible(visible)
if not visible:
# delete all markers on this line
del_idx_list = [idx for idx, item in enumerate(self.markers) if item[2] is axline]
for targ_idx in reversed(del_idx_list):
an, pt, _, _ = self.markers[targ_idx]
del self.markers[targ_idx]
# print targ_idx, an
an.set_visible(False)
pt.set_visible(False)
self.canvas.draw()
def register_pick_event(self, artist_set, fun):
self.pick_sets.append(artist_set)
self.pick_funs.append(fun)
def on_button_release(self, event):
"""Disable data cursor dragging. """
if event.button == 1:
self.drag_idx = -1
def on_motion(self, event):
"""Move data cursor around. """
ax = event.inaxes
if self.drag_idx >= 0 and ax is not None and event.button == 1:
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
anno, pt, line, bg = self.markers[self.drag_idx]
x, y = _find_closest_point(event.xdata, event.ydata,
line.get_xdata(), line.get_ydata(),
xmax - xmin, ymax - ymin)
pt.set_data([x], [y])
xstr, ystr = float_to_si_string(x, 4), float_to_si_string(y, 4)
anno.set_text('x: %s\ny: %s' % (xstr, ystr))
anno.xy = (x, y)
self.canvas.restore_region(bg)
anno.set_visible(True)
pt.set_visible(True)
ax.draw_artist(anno)
ax.draw_artist(pt)
self.canvas.blit(ax.bbox)
def _get_idx_under_point(self, event):
"""Find selected data cursor."""
mx = event.x
my = event.y
mind = None
minidx = None
# find closest marker point
for idx, (an, pt, _, _) in enumerate(self.markers):
xv, yv = pt.get_xdata()[0], pt.get_ydata()[0]
xp, yp = event.inaxes.transData.transform([xv, yv])
# print xv, yv, xp, yp, mx, my
d = ((mx - xp) ** 2 + (my - yp) ** 2) ** 0.5
if mind is None or d < mind:
mind = d
minidx = idx
if mind is not None and mind < self.epsilon:
return minidx
return -1
def on_pick(self, event):
artist = event.artist
if not artist.get_visible():
return
for idx, artist_set in enumerate(self.pick_sets):
if artist in artist_set:
self.pick_funs[idx](artist)
return
if isinstance(artist, Line2D):
mevent = event.mouseevent
# figure out if we picked marker or line
self.drag_idx = self._get_idx_under_point(mevent)
if self.drag_idx >= 0:
# picked marker.
ax = mevent.inaxes
an, pt, _, _ = self.markers[self.drag_idx]
an.set_visible(False)
pt.set_visible(False)
self.canvas.draw()
self.markers[self.drag_idx][-1] = self.canvas.copy_from_bbox(ax.bbox)
an.set_visible(True)
pt.set_visible(True)
ax.draw_artist(an)
ax.draw_artist(pt)
self.canvas.blit(ax.bbox)
else:
# save data to plot marker later
mxval = mevent.xdata
button = mevent.button
if mxval is not None and button == 1 and not self.marker_line_info:
self.marker_line_info = (artist, mxval, mevent.ydata,
button, mevent.inaxes)
elif isinstance(artist, Annotation):
# delete marker.
mevent = event.mouseevent
if mevent.button == 3:
targ_idx = None
for idx, (an, pt, _, _) in enumerate(self.markers):
if an is artist:
targ_idx = idx
break
if targ_idx is not None:
an, pt, _, _ = self.markers[targ_idx]
del self.markers[targ_idx]
an.set_visible(False)
pt.set_visible(False)
self.canvas.draw()
def _create_marker(self):
if self.marker_line_info:
artist, mxval, myval, button, ax = self.marker_line_info
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
mxval, myval = _find_closest_point(mxval, myval,
artist.get_xdata(), artist.get_ydata(),
xmax - xmin, ymax - ymin)
pt = ax.plot(mxval, myval, 'ko', picker=5.0)[0]
xstr, ystr = float_to_si_string(mxval, 4), float_to_si_string(myval, 4)
msg = 'x: %s\ny: %s' % (xstr, ystr)
anno = ax.annotate(msg, xy=(mxval, myval), bbox=dict(boxstyle='round', fc='yellow', alpha=0.3),
arrowprops=dict(arrowstyle="->"))
anno.draggable()
anno.set_picker(True)
self.markers.append([anno, pt, artist, None])
ax.draw_artist(anno)
ax.draw_artist(pt)
self.canvas.blit(ax.bbox)
self.marker_line_info = None
def close_figure(self):
self.timer.stop()
def setup_callbacks(self):
self.canvas.mpl_connect('pick_event', self.on_pick)
self.canvas.mpl_connect('motion_notify_event', self.on_motion)
self.canvas.mpl_connect('button_release_event', self.on_button_release)
# use timer to make sure we won't create multiple markers at once when
# clicked on overlapping lines.
self.timer = self.canvas.new_timer(interval=100)
self.timer.add_callback(self._create_marker)
self.timer.start()
================================================
FILE: bag/design/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/design/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package defines design template classes.
"""
from .module import Module, ModuleDB, SchInstance, MosModuleBase, ResPhysicalModuleBase, ResMetalModule
__all__ = ['Module', 'ModuleDB', 'SchInstance', 'MosModuleBase', 'ResPhysicalModuleBase', 'ResMetalModule']
================================================
FILE: bag/design/module.py
================================================
# -*- coding: utf-8 -*-
"""This module defines base design module class and primitive design classes.
"""
import os
import abc
from typing import TYPE_CHECKING, List, Dict, Optional, Tuple, Any, Type, Set, Sequence, \
Callable, Union
from ..math import float_to_si_string
from ..io import read_yaml
from ..util.cache import DesignMaster, MasterDB
if TYPE_CHECKING:
from ..core import BagProject
from ..layout.core import TechInfo
class ModuleDB(MasterDB):
"""A database of all modules.
This class is responsible for keeping track of module libraries and
creating new modules.
Parameters
----------
lib_defs : str
path to the design library definition file.
tech_info : TechInfo
the TechInfo instance.
sch_exc_libs : List[str]
list of libraries that are excluded from import.
prj : Optional[BagProject]
the BagProject instance.
name_prefix : str
generated layout name prefix.
name_suffix : str
generated layout name suffix.
lib_path : str
path to create generated library in.
"""
def __init__(self, lib_defs, tech_info, sch_exc_libs, prj=None, name_prefix='',
name_suffix='', lib_path=''):
# type: (str, TechInfo, List[str], Optional[BagProject], str, str, str) -> None
MasterDB.__init__(self, '', lib_defs=lib_defs, name_prefix=name_prefix,
name_suffix=name_suffix)
self._prj = prj
self._tech_info = tech_info
self._exc_libs = set(sch_exc_libs)
self.lib_path = lib_path
def create_master_instance(self, gen_cls, lib_name, params, used_cell_names, **kwargs):
# type: (Type[Module], str, Dict[str, Any], Set[str], **Any) -> Module
"""Create a new non-finalized master instance.
This instance is used to determine if we created this instance before.
Parameters
----------
gen_cls : Type[Module]
the generator Python class.
lib_name : str
generated instance library name.
params : Dict[str, Any]
instance parameters dictionary.
used_cell_names : Set[str]
a set of all used cell names.
**kwargs : Any
optional arguments for the generator.
Returns
-------
master : Module
the non-finalized generated instance.
"""
kwargs = kwargs.copy()
kwargs['lib_name'] = lib_name
kwargs['params'] = params
kwargs['used_names'] = used_cell_names
# noinspection PyTypeChecker
return gen_cls(self, **kwargs)
def create_masters_in_db(self, lib_name, content_list, debug=False):
# type: (str, Sequence[Any], bool) -> None
"""Create the masters in the design database.
Parameters
----------
lib_name : str
library to create the designs in.
content_list : Sequence[Any]
a list of the master contents. Must be created in this order.
debug : bool
True to print debug messages
"""
if self._prj is None:
raise ValueError('BagProject is not defined.')
self._prj.instantiate_schematic(lib_name, content_list, lib_path=self.lib_path)
@property
def tech_info(self):
# type: () -> TechInfo
"""the :class:`~bag.layout.core.TechInfo` instance."""
return self._tech_info
def is_lib_excluded(self, lib_name):
# type: (str) -> bool
"""Returns true if the given schematic library does not contain generators.
Parameters
----------
lib_name : str
library name
Returns
-------
is_excluded : bool
True if given library is excluded.
"""
return lib_name in self._exc_libs
class SchInstance(object):
"""A class representing a schematic instance.
Parameters
----------
database : ModuleDB
the schematic generator database.
gen_lib_name : str
the schematic generator library name.
gen_cell_name : str
the schematic generator cell name.
inst_name : str
name of this instance.
static : bool
True if the schematic generator is static.
connections : Optional[Dict[str, str]]
If given, initialize instance terminal connections to this dictionary.
master : Optional[Module]
If given, set the master of this instance.
parameters : Optional[Dict[str, Any]]
If given, set the instance parameters to this dictionary.
"""
def __init__(self,
database, # type: MasterDB
gen_lib_name, # type: str
gen_cell_name, # type: str
inst_name, # type: str
static=False, # type: bool
connections=None, # type: Optional[Dict[str, str]]
master=None, # type: Optional[Module]
parameters=None, # type: Optional[Dict[str, Any]]
):
# type: (...) -> None
self._db = database
self._master = master
self._name = inst_name
self._gen_lib_name = gen_lib_name
self._gen_cell_name = gen_cell_name
self._static = static
self._term_mapping = {} if connections is None else connections
self.parameters = {} if parameters is None else parameters
def change_generator(self, gen_lib_name, gen_cell_name, static=False):
# type: (str, str, bool) -> None
"""Change the master associated with this instance.
All instance parameters and terminal mappings will be reset.
Parameters
----------
gen_lib_name : str
the new schematic generator library name.
gen_cell_name : str
the new schematic generator cell name.
static : bool
True if the schematic generator is static.
"""
self._master = None
self._gen_lib_name = gen_lib_name
self._gen_cell_name = gen_cell_name
self._static = static
self.parameters.clear()
self._term_mapping.clear()
@property
def name(self):
# type: () -> str
"""Returns the instance name."""
return self._name
@property
def connections(self):
# type: () -> Dict[str, str]
"""Returns the instance terminals connection dictionary."""
return self._term_mapping
@property
def is_primitive(self):
# type: () -> bool
"""Returns true if this is an instance of a primitive schematic generator."""
if self._static:
return True
if self._master is None:
raise ValueError('Instance %s has no master. '
'Did you forget to call design()?' % self._name)
return self._master.is_primitive()
@property
def should_delete(self):
# type: () -> bool
"""Returns true if this instance should be deleted."""
return self._master is not None and self._master.should_delete_instance()
@property
def master(self):
# type: () -> Optional[Module]
return self._master
@property
def master_cell_name(self):
# type: () -> str
"""Returns the schematic master cell name."""
return self._gen_cell_name if self._master is None else self._master.cell_name
@property
def master_key(self):
# type: () -> Any
return self._master.key
def copy(self, inst_name, connections=None):
# type: (str, Optional[Dict[str, str]]) -> SchInstance
"""Returns a copy of this SchInstance.
Parameters
----------
inst_name : str
the new instance name.
connections : Optional[Dict[str, str]]
If given, will set the connections of this instance to this dictionary.
Returns
-------
sch_inst : SchInstance
a copy of this SchInstance, with connections potentially updated.
"""
if connections is None:
connections = self._term_mapping.copy()
return SchInstance(self._db, self._gen_lib_name, self._gen_cell_name, inst_name,
static=self._static, connections=connections, master=self._master,
parameters=self.parameters.copy())
def get_master_lib_name(self, impl_lib):
# type: (str) -> str
"""Returns the schematic master library name.
Parameters
----------
impl_lib : str
library where schematic masters will be created.
Returns
-------
master_lib : str
the schematic master library name.
"""
return self._gen_lib_name if self.is_primitive else impl_lib
def design_specs(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Update the instance master."""
self._update_master('design_specs', args, kwargs)
def design(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Update the instance master."""
self._update_master('design', args, kwargs)
def _update_master(self, design_fun, args, kwargs):
# type: (str, Tuple[Any, ...], Dict[str, Any]) -> None
"""Create a new master."""
if args:
key = 'args'
idx = 1
while key in kwargs:
key = 'args_%d' % idx
idx += 1
kwargs[key] = args
else:
key = None
self._master = self._db.new_master(self._gen_lib_name, self._gen_cell_name,
params=kwargs, design_args=key,
design_fun=design_fun) # type: Module
if self._master.is_primitive():
self.parameters.update(self._master.get_schematic_parameters())
def implement_design(self, lib_name, top_cell_name='', prefix='', suffix='', **kwargs):
# type: (str, str, str, str, **Any) -> None
"""Implement this design module in the given library.
If the given library already exists, this method will not delete or override
any pre-existing cells in that library.
If you use this method, you do not need to call update_structure(),
as this method calls it for you.
This method only works if BagProject is given.
Parameters
----------
lib_name : str
name of the new library to put the generated schematics.
top_cell_name : str
the cell name of the top level design.
prefix : str
prefix to add to cell names.
suffix : str
suffix to add to cell names.
**kwargs : Any
additional arguments.
"""
if 'erase' in kwargs:
print('DEPRECATED WARNING: erase is no longer supported '
'in implement_design() and has no effect')
debug = kwargs.get('debug', False)
rename_dict = kwargs.get('rename_dict', None)
if not top_cell_name:
top_cell_name = None
if 'lib_path' in kwargs:
self._db.lib_path = kwargs['lib_path']
self._db.cell_prefix = prefix
self._db.cell_suffix = suffix
self._db.instantiate_masters([self._master], [top_cell_name], lib_name=lib_name,
debug=debug, rename_dict=rename_dict)
def get_layout_params(self, **kwargs):
# type: (Any) -> Dict[str, Any]
"""Backwards compatibility function."""
if hasattr(self._master, 'get_layout_params'):
return getattr(self._master, 'get_layout_params')(**kwargs)
else:
return kwargs
class Module(DesignMaster, metaclass=abc.ABCMeta):
"""The base class of all schematic generators. This represents a schematic master.
This class defines all the methods needed to implement a design in the CAD database.
Parameters
----------
database : ModuleDB
the design database object.
yaml_fname : str
the netlist information file name.
**kwargs :
additional arguments
Attributes
----------
parameters : dict[str, any]
the design parameters dictionary.
instances : dict[str, None or :class:`~bag.design.Module` or list[:class:`~bag.design.Module`]]
the instance dictionary.
"""
# noinspection PyUnusedLocal
def __init__(self, database, yaml_fname, **kwargs):
# type: (ModuleDB, str, **Any) -> None
lib_name = kwargs['lib_name']
params = kwargs['params']
used_names = kwargs['used_names']
design_fun = kwargs['design_fun']
design_args = kwargs['design_args']
self.tech_info = database.tech_info
self.instances = {} # type: Dict[str, Union[SchInstance, List[SchInstance]]]
self.pin_map = {}
self.new_pins = []
self.parameters = {}
self._pin_list = None
self._yaml_fname = os.path.abspath(yaml_fname)
self.sch_info = read_yaml(self._yaml_fname)
self._orig_lib_name = self.sch_info['lib_name']
self._orig_cell_name = self.sch_info['cell_name']
self._design_fun = design_fun
self._design_args = design_args
# create initial instances and populate instance map
for inst_name, inst_attr in self.sch_info['instances'].items():
lib_name = inst_attr['lib_name']
cell_name = inst_attr['cell_name']
static = database.is_lib_excluded(lib_name)
self.instances[inst_name] = SchInstance(database, lib_name, cell_name, inst_name,
static=static)
# fill in pin map
for pin in self.sch_info['pins']:
self.pin_map[pin] = pin
# initialize schematic master
DesignMaster.__init__(self, database, lib_name, params, used_names)
@property
def pin_list(self):
# type: () -> List[str]
return self._pin_list
@abc.abstractmethod
def design(self, **kwargs):
"""To be overridden by subclasses to design this module.
To design instances of this module, you can
call their :meth:`.design` method or any other ways you coded.
To modify schematic structure, call:
:meth:`.rename_pin`
:meth:`.delete_instance`
:meth:`.replace_instance_master`
:meth:`.reconnect_instance_terminal`
:meth:`.array_instance`
"""
pass
def finalize(self):
# type: () -> None
"""Finalize this master instance.
"""
# invoke design function
fun = getattr(self, self._design_fun)
if self._design_args:
args = self.params.pop(self._design_args)
fun(*args, **self.params)
else:
fun(**self.params)
# backwards compatibility
if self.key is None:
self.params.clear()
self.params.update(self.parameters)
self.update_master_info()
self.children = set()
for inst_list in self.instances.values():
if isinstance(inst_list, SchInstance):
if not inst_list.is_primitive:
self.children.add(inst_list.master_key)
else:
for inst in inst_list:
if not inst.is_primitive:
self.children.add(inst.master_key)
# compute pins
self._pin_list = [pin_name for pin_name, _ in self.new_pins]
self._pin_list.extend((val for val in self.pin_map.values() if val))
# call super finalize routine
super(Module, self).finalize()
@classmethod
def get_params_info(cls):
# type: () -> Optional[Dict[str, str]]
"""Returns a dictionary from parameter names to descriptions.
Returns
-------
param_info : Optional[Dict[str, str]]
dictionary from parameter names to descriptions.
"""
return None
def get_master_basename(self):
# type: () -> str
"""Returns the base name to use for this instance.
Returns
-------
basename : str
the base name for this instance.
"""
return self._orig_cell_name
def get_content(self, lib_name, rename_fun):
# type: (str, Callable[[str], str]) -> Optional[Tuple[Any,...]]
"""Returns the content of this master instance.
Parameters
----------
lib_name : str
the library to create the design masters in.
rename_fun : Callable[[str], str]
a function that renames design masters.
Returns
-------
content : Optional[Tuple[Any,...]]
the master content data structure.
"""
if self.is_primitive():
return None
# populate instance transform mapping dictionary
inst_map = {}
for inst_name, inst_list in self.instances.items():
if isinstance(inst_list, SchInstance):
inst_list = [inst_list]
info_list = []
for inst in inst_list:
if not inst.should_delete:
cur_lib = inst.get_master_lib_name(lib_name)
info_list.append(dict(
name=inst.name,
lib_name=cur_lib,
cell_name= inst.master_cell_name if inst.is_primitive else rename_fun(inst.master_cell_name),
params=inst.parameters,
term_mapping=inst.connections,
))
inst_map[inst_name] = info_list
return (self._orig_lib_name, self._orig_cell_name, rename_fun(self.cell_name),
self.pin_map, inst_map, self.new_pins)
@property
def cell_name(self):
# type: () -> str
"""The master cell name."""
if self.is_primitive():
return self.get_cell_name_from_parameters()
return super(Module, self).cell_name
@property
def orig_cell_name(self):
# type: () -> str
"""The original schematic template cell name."""
return self._orig_cell_name
def is_primitive(self):
# type: () -> bool
"""Returns True if this Module represents a BAG primitive.
NOTE: This method is only used by BAG and schematic primitives. This method prevents
the module from being copied during design implementation. Custom subclasses should
not override this method.
Returns
-------
is_primitive : bool
True if this Module represents a BAG primitive.
"""
return False
def should_delete_instance(self):
# type: () -> bool
"""Returns True if this instance should be deleted based on its parameters.
This method is mainly used to delete 0 finger or 0 width transistors. However,
You can override this method if there exists parameter settings which corresponds
to an empty schematic.
Returns
-------
delete : bool
True if parent should delete this instance.
"""
return False
def get_schematic_parameters(self):
# type: () -> Dict[str, str]
"""Returns the schematic parameter dictionary of this instance.
NOTE: This method is only used by BAG primitives, as they are
implemented with parameterized cells in the CAD database. Custom
subclasses should not override this method.
Returns
-------
params : Dict[str, str]
the schematic parameter dictionary.
"""
return {}
def get_cell_name_from_parameters(self):
"""Returns new cell name based on parameters.
NOTE: This method is only used by BAG primitives. This method
enables a BAG primitive to change the cell master based on
design parameters (e.g. change transistor instance based on the
intent parameter). Custom subclasses should not override this
method.
Returns
-------
cell : str
the cell name based on parameters.
"""
return super(Module, self).cell_name
def rename_pin(self, old_pin, new_pin):
# type: (str, str) -> None
"""Renames an input/output pin of this schematic.
NOTE: Make sure to call :meth:`.reconnect_instance_terminal` so that instances are
connected to the new pin.
Parameters
----------
old_pin : str
the old pin name.
new_pin : str
the new pin name.
"""
self.pin_map[old_pin] = new_pin
def add_pin(self, new_pin, pin_type):
# type: (str, str) -> None
"""Adds a new pin to this schematic.
NOTE: Make sure to call :meth:`.reconnect_instance_terminal` so that instances are
connected to the new pin.
Parameters
----------
new_pin : str
the new pin name.
pin_type : str
the new pin type. We current support "input", "output", or "inputOutput"
"""
self.new_pins.append([new_pin, pin_type])
def remove_pin(self, remove_pin):
# type: (str) -> None
"""Removes a pin from this schematic.
Parameters
----------
remove_pin : str
the pin to remove.
"""
self.rename_pin(remove_pin, '')
def delete_instance(self, inst_name):
# type: (str) -> None
"""Delete the instance with the given name.
Parameters
----------
inst_name : str
the child instance to delete.
"""
self.instances[inst_name] = []
def replace_instance_master(self, inst_name, lib_name, cell_name, static=False, index=None):
# type: (str, str, str, bool, Optional[int]) -> None
"""Replace the master of the given instance.
NOTE: all terminal connections will be reset. Call reconnect_instance_terminal() to modify
terminal connections.
Parameters
----------
inst_name : str
the child instance to replace.
lib_name : str
the new library name.
cell_name : str
the new cell name.
static : bool
True if we're replacing instance with a static schematic instead of a design module.
index : Optional[int]
If index is not None and the child instance has been arrayed, this is the instance
array index that we are replacing.
If index is None, the entire child instance (whether arrayed or not) will be replaced
by a single new instance.
"""
if inst_name not in self.instances:
raise ValueError('Cannot find instance with name: %s' % inst_name)
# check if this is arrayed
if index is not None and isinstance(self.instances[inst_name], list):
self.instances[inst_name][index].change_generator(lib_name, cell_name, static=static)
else:
self.instances[inst_name] = SchInstance(self.master_db, lib_name, cell_name, inst_name,
static=static)
def reconnect_instance_terminal(self, inst_name, term_name, net_name, index=None):
"""Reconnect the instance terminal to a new net.
Parameters
----------
inst_name : str
the child instance to modify.
term_name : Union[str, List[str]]
the instance terminal name to reconnect.
If a list is given, it is applied to each arrayed instance.
net_name : Union[str, List[str]]
the net to connect the instance terminal to.
If a list is given, it is applied to each arrayed instance.
index : Optional[int]
If not None and the given instance is arrayed, will only modify terminal
connection for the instance at the given index.
If None and the given instance is arrayed, all instances in the array
will be reconnected.
"""
if index is not None:
# only modify terminal connection for one instance in the array
if isinstance(term_name, str) and isinstance(net_name, str):
self.instances[inst_name][index].connections[term_name] = net_name
else:
raise ValueError('If index is not None, '
'both term_name and net_name must be string.')
else:
# modify terminal connection for all instances in the array
cur_inst_list = self.instances[inst_name]
if isinstance(cur_inst_list, SchInstance):
cur_inst_list = [cur_inst_list]
num_insts = len(cur_inst_list)
if not isinstance(term_name, list) and not isinstance(term_name, tuple):
if not isinstance(term_name, str):
raise ValueError('term_name = %s must be string.' % term_name)
term_name = [term_name] * num_insts
else:
if len(term_name) != num_insts:
raise ValueError('term_name length = %d != %d' % (len(term_name), num_insts))
if not isinstance(net_name, list) and not isinstance(net_name, tuple):
if not isinstance(net_name, str):
raise ValueError('net_name = %s must be string.' % net_name)
net_name = [net_name] * num_insts
else:
if len(net_name) != num_insts:
raise ValueError('net_name length = %d != %d' % (len(net_name), num_insts))
for inst, tname, nname in zip(cur_inst_list, term_name, net_name):
inst.connections[tname] = nname
def array_instance(self, inst_name, inst_name_list, term_list=None):
# type: (str, List[str], Optional[List[Dict[str, str]]]) -> None
"""Replace the given instance by an array of instances.
This method will replace self.instances[inst_name] by a list of
Modules. The user can then design each of those modules.
Parameters
----------
inst_name : str
the instance to array.
inst_name_list : List[str]
a list of the names for each array item.
term_list : Optional[List[Dict[str, str]]]
a list of modified terminal connections for each array item. The keys are
instance terminal names, and the values are the net names to connect
them to. Only terminal connections different than the parent instance
should be listed here.
If None, assume terminal connections are not changed.
"""
num_inst = len(inst_name_list)
if not term_list:
term_list = [None] * num_inst
if num_inst != len(term_list):
msg = 'len(inst_name_list) = %d != len(term_list) = %d'
raise ValueError(msg % (num_inst, len(term_list)))
orig_inst = self.instances[inst_name]
if not isinstance(orig_inst, SchInstance):
raise ValueError('Instance %s is already arrayed.' % inst_name)
self.instances[inst_name] = [orig_inst.copy(iname, connections=iterm)
for iname, iterm in zip(inst_name_list, term_list)]
def design_dc_bias_sources(self, # type: Module
vbias_dict, # type: Optional[Dict[str, List[str]]]
ibias_dict, # type: Optional[Dict[str, List[str]]]
vinst_name, # type: str
iinst_name, # type: str
define_vdd=True, # type: bool
):
# type: (...) -> None
"""Convenience function for generating DC bias sources.
Given DC voltage/current bias sources information, array the given voltage/current bias
sources and configure the voltage/current.
Each bias dictionary is a dictionary from bias source name to a 3-element list. The first
two elements are the PLUS/MINUS net names, respectively, and the third element is the DC
voltage/current value as a string or float. A variable name can be given to define a
testbench parameter.
Parameters
----------
vbias_dict : Optional[Dict[str, List[str]]]
the voltage bias dictionary. None or empty to disable.
ibias_dict : Optional[Dict[str, List[str]]]
the current bias dictionary. None or empty to disable.
vinst_name : str
the DC voltage source instance name.
iinst_name : str
the DC current source instance name.
define_vdd : bool
True to include a supply voltage source connected to VDD/VSS, with voltage value 'vdd'.
"""
if define_vdd and 'SUP' not in vbias_dict:
vbias_dict = vbias_dict.copy()
vbias_dict['SUP'] = ['VDD', 'VSS', 'vdd']
for bias_dict, name_template, param_name, inst_name in \
((vbias_dict, 'V%s', 'vdc', vinst_name), (ibias_dict, 'I%s', 'idc', iinst_name)):
if bias_dict:
name_list, term_list, val_list, param_dict_list = [], [], [], []
for name in sorted(bias_dict.keys()):
value_tuple = bias_dict[name]
pname, nname, bias_val = value_tuple[:3]
param_dict = value_tuple[3] if len(value_tuple) > 3 \
else None # type: Optional[Dict]
term_list.append(dict(PLUS=pname, MINUS=nname))
name_list.append(name_template % name)
param_dict_list.append(param_dict)
if isinstance(bias_val, str):
val_list.append(bias_val)
elif isinstance(bias_val, int) or isinstance(bias_val, float):
val_list.append(float_to_si_string(bias_val))
else:
raise ValueError('value %s of type %s '
'not supported' % (bias_val, type(bias_val)))
self.array_instance(inst_name, name_list, term_list=term_list)
for inst, val, param_dict in zip(self.instances[inst_name], val_list,
param_dict_list):
inst.parameters[param_name] = val
if param_dict is not None:
for k, v in param_dict.items():
if isinstance(v, str):
pass
elif isinstance(v, int) or isinstance(v, float):
v = float_to_si_string(v)
else:
raise ValueError('value %s of type %s not supported' % (v, type(v)))
inst.parameters[k] = v
else:
self.delete_instance(inst_name)
def design_dummy_transistors(self, dum_info, inst_name, vdd_name, vss_name, net_map=None):
# type: (List[Tuple[Any]], str, str, str, Optional[Dict[str, str]]) -> None
"""Convenience function for generating dummy transistor schematic.
Given dummy information (computed by AnalogBase) and a BAG transistor instance,
this method generates dummy schematics by arraying and modifying the BAG
transistor instance.
Parameters
----------
dum_info : List[Tuple[Any]]
the dummy information data structure.
inst_name : str
the BAG transistor instance name.
vdd_name : str
VDD net name. Used for PMOS dummies.
vss_name : str
VSS net name. Used for NMOS dummies.
net_map : Optional[Dict[str, str]]
optional net name transformation mapping.
"""
if not dum_info:
self.delete_instance(inst_name)
else:
num_arr = len(dum_info)
arr_name_list = ['XDUMMY%d' % idx for idx in range(num_arr)]
self.array_instance(inst_name, arr_name_list)
for idx, ((mos_type, w, lch, th, s_net, d_net), fg) in enumerate(dum_info):
if mos_type == 'pch':
cell_name = 'pmos4_standard'
sup_name = vdd_name
else:
cell_name = 'nmos4_standard'
sup_name = vss_name
if net_map is not None:
s_net = net_map.get(s_net, s_net)
d_net = net_map.get(d_net, d_net)
s_name = s_net if s_net else sup_name
d_name = d_net if d_net else sup_name
self.replace_instance_master(inst_name, 'BAG_prim', cell_name, index=idx)
self.reconnect_instance_terminal(inst_name, 'G', sup_name, index=idx)
self.reconnect_instance_terminal(inst_name, 'B', sup_name, index=idx)
self.reconnect_instance_terminal(inst_name, 'D', d_name, index=idx)
self.reconnect_instance_terminal(inst_name, 'S', s_name, index=idx)
self.instances[inst_name][idx].design(w=w, l=lch, nf=fg, intent=th)
class MosModuleBase(Module):
"""The base design class for the bag primitive transistor.
Parameters
----------
database : ModuleDB
the design database object.
yaml_file : str
the netlist information file name.
**kwargs :
additional arguments
"""
def __init__(self, database, yaml_file, **kwargs):
Module.__init__(self, database, yaml_file, **kwargs)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
return dict(
w='transistor width, in meters or number of fins.',
l='transistor length, in meters.',
nf='transistor number of fingers.',
intent='transistor threshold flavor.',
)
def design(self, w=1e-6, l=60e-9, nf=1, intent='standard'):
pass
def get_schematic_parameters(self):
# type: () -> Dict[str, str]
w_res = self.tech_info.tech_params['mos']['width_resolution']
l_res = self.tech_info.tech_params['mos']['length_resolution']
w = self.params['w']
l = self.params['l']
nf = self.params['nf']
wstr = w if isinstance(w, str) else float_to_si_string(int(round(w / w_res)) * w_res)
lstr = l if isinstance(l, str) else float_to_si_string(int(round(l / l_res)) * l_res)
nstr = nf if isinstance(nf, str) else '%d' % nf
return dict(w=wstr, l=lstr, nf=nstr)
def get_cell_name_from_parameters(self):
# type: () -> str
mos_type = self.orig_cell_name.split('_')[0]
return '%s_%s' % (mos_type, self.params['intent'])
def is_primitive(self):
# type: () -> bool
return True
def should_delete_instance(self):
# type: () -> bool
return self.params['nf'] == 0 or self.params['w'] == 0 or self.params['l'] == 0
class ResPhysicalModuleBase(Module):
"""The base design class for a real resistor parametrized by width and length.
Parameters
----------
database : ModuleDB
the design database object.
yaml_file : str
the netlist information file name.
**kwargs :
additional arguments
"""
def __init__(self, database, yaml_file, **kwargs):
Module.__init__(self, database, yaml_file, **kwargs)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
return dict(
w='resistor width, in meters.',
l='resistor length, in meters.',
intent='resistor flavor.',
)
def design(self, w=1e-6, l=1e-6, intent='standard'):
pass
def get_schematic_parameters(self):
# type: () -> Dict[str, str]
w = self.params['w']
l = self.params['l']
wstr = w if isinstance(w, str) else float_to_si_string(w)
lstr = l if isinstance(l, str) else float_to_si_string(l)
return dict(w=wstr, l=lstr)
def get_cell_name_from_parameters(self):
# type: () -> str
return 'res_%s' % self.params['intent']
def is_primitive(self):
# type: () -> bool
return True
def should_delete_instance(self):
# type: () -> bool
return self.params['w'] == 0 or self.params['l'] == 0
class ResMetalModule(Module):
"""The base design class for a metal resistor.
Parameters
----------
database : ModuleDB
the design database object.
yaml_file : str
the netlist information file name.
**kwargs :
additional arguments
"""
def __init__(self, database, yaml_file, **kwargs):
Module.__init__(self, database, yaml_file, **kwargs)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
return dict(
w='resistor width, in meters.',
l='resistor length, in meters.',
layer='the metal layer ID.',
)
def design(self, w, l, layer):
# type: (float, float, int) -> None
pass
def get_schematic_parameters(self):
# type: () -> Dict[str, str]
w = self.params['w']
l = self.params['l']
layer = self.params['layer']
wstr = float_to_si_string(w)
lstr = float_to_si_string(l)
lay_str = str(layer)
return dict(w=wstr, l=lstr, layer=lay_str)
def is_primitive(self):
# type: () -> bool
return True
def should_delete_instance(self):
# type: () -> bool
return self.params['w'] == 0 or self.params['l'] == 0
================================================
FILE: bag/interface/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/interface/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This packages defines classes to interface with CAD database and circuit simulators.
"""
from .server import SkillServer
from .zmqwrapper import ZMQRouter, ZMQDealer
__all__ = ['SkillServer', 'ZMQRouter', 'ZMQDealer', ]
================================================
FILE: bag/interface/base.py
================================================
# -*- coding: utf-8 -*-
"""This module defines the base of all interface classes.
"""
from typing import Dict, Any
from ..io.template import new_template_env
class InterfaceBase:
"""The base class of all interfaces.
Provides various helper methods common to all interfaces.
"""
def __init__(self):
self._tmp_env = new_template_env('bag.interface', 'templates')
def render_file_template(self, temp_name, params):
# type: (str, Dict[str, Any]) -> str
"""Returns the rendered content from the given template file."""
template = self._tmp_env.get_template(temp_name)
return template.render(**params)
================================================
FILE: bag/interface/database.py
================================================
# -*- coding: utf-8 -*-
"""This module defines DbAccess, the base class for CAD database manipulation.
"""
from typing import TYPE_CHECKING, List, Dict, Tuple, Optional, Sequence, Any, Union
import os
import abc
import traceback
import yaml
from ..io.file import make_temp_dir, read_file, write_file
from ..verification import make_checker
from .base import InterfaceBase
if TYPE_CHECKING:
from ..verification import Checker
def dict_to_item_list(table):
"""Given a Python dictionary, convert to sorted item list.
Parameters
----------
table : dict[str, any]
a Python dictionary where the keys are strings.
Returns
-------
assoc_list : list[(str, str)]
the sorted item list representation of the given dictionary.
"""
return [[key, table[key]] for key in sorted(table.keys())]
def format_inst_map(inst_map):
"""Given instance map from DesignModule, format it for database changes.
Parameters
----------
inst_map : Dict[str, Any]
the instance map created by DesignModule.
Returns
-------
ans : List[(str, Any)]
the database change instance map.
"""
ans = []
for old_inst_name, rinst_list in inst_map.items():
new_rinst_list = [dict(name=rinst['name'],
lib_name=rinst['lib_name'],
cell_name=rinst['cell_name'],
params=dict_to_item_list(rinst['params']),
term_mapping=dict_to_item_list(rinst['term_mapping']),
) for rinst in rinst_list]
ans.append([old_inst_name, new_rinst_list])
return ans
class DbAccess(InterfaceBase, abc.ABC):
"""A class that manipulates the CAD database.
Parameters
----------
tmp_dir : str
temporary file directory for DbAccess.
db_config : Dict[str, Any]
the database configuration dictionary.
"""
def __init__(self, tmp_dir, db_config):
# type: (str, Dict[str, Any]) -> None
InterfaceBase.__init__(self)
self.tmp_dir = make_temp_dir('dbTmp', parent_dir=tmp_dir)
self.db_config = db_config
self.exc_libs = set(db_config['schematic']['exclude_libraries'])
# noinspection PyBroadException
try:
check_kwargs = self.db_config['checker'].copy()
check_kwargs['tmp_dir'] = self.tmp_dir
self.checker = make_checker(**check_kwargs) # type: Optional[Checker]
except Exception:
stack_trace = traceback.format_exc()
print('*WARNING* error creating Checker:\n%s' % stack_trace)
print('*WARNING* LVS/RCX will be disabled.')
self.checker = None # type: Optional[Checker]
# set default lib path
self._default_lib_path = self.get_default_lib_path(db_config)
@classmethod
def get_default_lib_path(cls, db_config):
lib_path_fallback = os.path.abspath('.')
default_lib_path = os.path.abspath(db_config.get('default_lib_path', lib_path_fallback))
if not os.path.isdir(default_lib_path):
default_lib_path = lib_path_fallback
return default_lib_path
@property
def default_lib_path(self):
"""Returns the default directory to create new libraries in.
Returns
-------
lib_path : string
directory to create new libraries in.
"""
return self._default_lib_path
@abc.abstractmethod
def close(self):
"""Terminate the database server gracefully.
"""
pass
@abc.abstractmethod
def parse_schematic_template(self, lib_name, cell_name):
"""Parse the given schematic template.
Parameters
----------
lib_name : str
name of the library.
cell_name : str
name of the cell.
Returns
-------
template : str
the content of the netlist structure file.
"""
return ""
@abc.abstractmethod
def get_cells_in_library(self, lib_name):
"""Get a list of cells in the given library.
Returns an empty list if the given library does not exist.
Parameters
----------
lib_name : str
the library name.
Returns
-------
cell_list : list[str]
a list of cells in the library
"""
return []
@abc.abstractmethod
def create_library(self, lib_name, lib_path=''):
"""Create a new library if one does not exist yet.
Parameters
----------
lib_name : string
the library name.
lib_path : string
directory to create the library in. If Empty, use default location.
"""
pass
@abc.abstractmethod
def create_implementation(self, lib_name, template_list, change_list, lib_path=''):
"""Create implementation of a design in the CAD database.
Parameters
----------
lib_name : str
implementation library name.
template_list : list
a list of schematic templates to copy to the new library.
change_list :
a list of changes to be performed on each copied templates.
lib_path : str
directory to create the library in. If Empty, use default location.
"""
pass
@abc.abstractmethod
def configure_testbench(self, tb_lib, tb_cell):
"""Configure testbench state for the given testbench.
This method fill in process-specific information for the given testbench.
Parameters
----------
tb_lib : str
testbench library name.
tb_cell : str
testbench cell name.
Returns
-------
cur_env : str
the current simulation environment.
envs : list[str]
a list of available simulation environments.
parameters : dict[str, str]
a list of testbench parameter values, represented as string.
"""
return "", [], {}
@abc.abstractmethod
def get_testbench_info(self, tb_lib, tb_cell):
"""Returns information about an existing testbench.
Parameters
----------
tb_lib : str
testbench library.
tb_cell : str
testbench cell.
Returns
-------
cur_envs : list[str]
the current simulation environments.
envs : list[str]
a list of available simulation environments.
parameters : dict[str, str]
a list of testbench parameter values, represented as string.
outputs : dict[str, str]
a list of testbench output expressions.
"""
return [], [], {}, {}
@abc.abstractmethod
def update_testbench(self, # type: DbAccess
lib, # type: str
cell, # type: str
parameters, # type: Dict[str, str]
sim_envs, # type: Sequence[str]
config_rules, # type: Sequence[List[str]]
env_parameters, # type: Sequence[List[Tuple[str, str]]]
):
# type: (...) -> None
"""Update the given testbench configuration.
Parameters
----------
lib : str
testbench library.
cell : str
testbench cell.
parameters : Dict[str, str]
testbench parameters.
sim_envs : Sequence[str]
list of enabled simulation environments.
config_rules : Sequence[List[str]]
config view mapping rules, list of (lib, cell, view) rules.
env_parameters : Sequence[List[Tuple[str, str]]]
list of param/value list for each simulation environment.
"""
pass
@abc.abstractmethod
def instantiate_layout_pcell(self, lib_name, cell_name, view_name,
inst_lib, inst_cell, params, pin_mapping):
"""Create a layout cell with a single pcell instance.
Parameters
----------
lib_name : str
layout library name.
cell_name : str
layout cell name.
view_name : str
layout view name, default is "layout".
inst_lib : str
pcell library name.
inst_cell : str
pcell cell name.
params : dict[str, any]
the parameter dictionary.
pin_mapping: dict[str, str]
the pin mapping dictionary.
"""
pass
@abc.abstractmethod
def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):
# type: (str, str, str, Sequence[Any]) -> None
"""Create a batch of layouts.
Parameters
----------
lib_name : str
layout library name.
view_name : str
layout view name.
via_tech : str
via technology library name.
layout_list : Sequence[Any]
a list of layouts to create
"""
pass
@abc.abstractmethod
def release_write_locks(self, lib_name, cell_view_list):
# type: (str, Sequence[Tuple[str, str]]) -> None
"""Release write locks from all the given cells.
Parameters
----------
lib_name : str
the library name.
cell_view_list : Sequence[Tuple[str, str]]
list of cell/view name tuples.
"""
pass
@abc.abstractmethod
def create_schematic_from_netlist(self, netlist, lib_name, cell_name,
sch_view=None, **kwargs):
# type: (str, str, str, Optional[str], **Any) -> None
"""Create a schematic from a netlist.
This is mainly used to create extracted schematic from an extracted netlist.
Parameters
----------
netlist : str
the netlist file name.
lib_name : str
library name.
cell_name : str
cell_name
sch_view : Optional[str]
schematic view name. The default value is implemendation dependent.
**kwargs : Any
additional implementation-dependent arguments.
"""
pass
@abc.abstractmethod
def create_verilog_view(self, verilog_file, lib_name, cell_name, **kwargs):
# type: (str, str, str, **Any) -> None
"""Create a verilog view for mix-signal simulation.
Parameters
----------
verilog_file : str
the verilog file name.
lib_name : str
library name.
cell_name : str
cell name.
**kwargs : Any
additional implementation-dependent arguments.
"""
pass
def get_python_template(self, lib_name, cell_name, primitive_table):
# type: (str, str, Dict[str, str]) -> str
"""Returns the default Python Module template for the given schematic.
Parameters
----------
lib_name : str
the library name.
cell_name : str
the cell name.
primitive_table : Dict[str, str]
a dictionary from primitive cell name to module template file name.
Returns
-------
template : str
the default Python Module template.
"""
param_dict = dict(lib_name=lib_name, cell_name=cell_name)
if lib_name == 'BAG_prim':
if cell_name in primitive_table:
# load template from user defined file
template = self._tmp_env.from_string(read_file(primitive_table[cell_name]))
return template.render(**param_dict)
else:
if cell_name.startswith('nmos4_') or cell_name.startswith('pmos4_'):
# transistor template
module_name = 'MosModuleBase'
elif cell_name == 'res_ideal':
# ideal resistor template
module_name = 'ResIdealModuleBase'
elif cell_name == 'res_metal':
module_name = 'ResMetalModule'
elif cell_name == 'cap_ideal':
# ideal capacitor template
module_name = 'CapIdealModuleBase'
elif cell_name.startswith('res_'):
# physical resistor template
module_name = 'ResPhysicalModuleBase'
else:
raise Exception('Unknown primitive cell: %s' % cell_name)
param_dict['module_name'] = module_name
return self.render_file_template('PrimModule.pyi', param_dict)
else:
# use default empty template.
return self.render_file_template('Module.pyi', param_dict)
def _process_rcx_output(self, netlist, log_fname, lib_name, cell_name, create_schematic):
if create_schematic:
if netlist is None:
return False, log_fname
if netlist:
# create schematic only if netlist name is not empty.
self.create_schematic_from_netlist(netlist, lib_name, cell_name)
return True, log_fname
else:
return netlist, log_fname
async def async_run_lvs(self, lib_name: str, cell_name: str, **kwargs: Any) -> Tuple[bool, str]:
"""A coroutine for running LVS.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell_name
**kwargs : Any
optional keyword arguments. See Checker class for details.
LVS parameters should be specified as lvs_params.
Returns
-------
value : bool
True if LVS succeeds
log_fname : str
name of the LVS log file.
"""
if self.checker is None:
raise Exception('LVS/RCX is disabled.')
kwargs['params'] = kwargs.pop('lvs_params', None)
return await self.checker.async_run_lvs(lib_name, cell_name, **kwargs)
async def async_run_rcx(self, # type: DbAccess
lib_name: str,
cell_name: str,
create_schematic: bool = True,
**kwargs: Any
) -> Tuple[Union[bool, Optional[str]], str]:
"""Run RCX on the given cell.
The behavior and the first return value of this method depends on the
input arguments. The second return argument will always be the RCX
log file name.
If create_schematic is True, this method will run RCX, then if it succeeds,
create a schematic of the extracted netlist in the database. It then returns
a boolean value which will be True if RCX succeeds.
If create_schematic is False, this method will run RCX, then return a string
which is the extracted netlist filename. If RCX failed, None will be returned
instead.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell_name
override RCX parameter values.
create_schematic : bool
True to automatically create extracted schematic in database if RCX
is successful and it is supported.
**kwargs : Any
optional keyword arguments. See Checker class for details.
RCX parameters should be specified as rcx_params.
Returns
-------
value : Union[bool, Optional[str]]
The return value, as described.
log_fname : str
name of the RCX log file.
"""
kwargs['params'] = kwargs.pop('rcx_params', None)
netlist, log_fname = await self.checker.async_run_rcx(lib_name, cell_name, **kwargs)
return self._process_rcx_output(netlist, log_fname, lib_name, cell_name, create_schematic)
async def async_export_layout(self, lib_name: str, cell_name: str,
out_file: str, *args: Any, **kwargs: Any) -> str:
"""Export layout.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
out_file : str
output file name.
*args : Any
optional list arguments.
**kwargs : Any
optional keyword arguments. See Checker class for details.
Returns
-------
log_fname : str
log file name. Empty if task cancelled.
"""
if self.checker is None:
raise Exception('layout export is disabled.')
return await self.checker.async_export_layout(lib_name, cell_name, out_file,
*args, **kwargs)
def import_design_library(self, lib_name, dsn_db, new_lib_path):
"""Import all design templates in the given library from CAD database.
Parameters
----------
lib_name : str
name of the library.
dsn_db : ModuleDB
the design database object.
new_lib_path: str
location to import new libraries to.
"""
imported_cells = set()
for cell_name in self.get_cells_in_library(lib_name):
self._import_design(lib_name, cell_name, imported_cells, dsn_db, new_lib_path)
def import_sch_cellview(self, lib_name, cell_name, dsn_db, new_lib_path):
"""Import the given schematic and symbol template into Python.
This import process is done recursively.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
dsn_db : ModuleDB
the design database object.
new_lib_path: str
location to import new libraries to.
"""
imported_cells = set()
self._import_design(lib_name, cell_name, imported_cells, dsn_db, new_lib_path)
def _import_design(self, lib_name, cell_name, imported_cells, dsn_db, new_lib_path):
"""Recursive helper for import_design_library.
"""
# check if we already imported this schematic
key = '%s__%s' % (lib_name, cell_name)
if key in imported_cells:
return
imported_cells.add(key)
# create root directory if missing
root_path = dsn_db.get_library_path(lib_name)
if root_path is None:
root_path = new_lib_path
dsn_db.append_library(lib_name, new_lib_path)
package_path = os.path.join(root_path, lib_name)
python_file = os.path.join(package_path, '%s.py' % cell_name)
yaml_file = os.path.join(package_path, 'netlist_info', '%s.yaml' % cell_name)
yaml_dir = os.path.dirname(yaml_file)
if not os.path.exists(yaml_dir):
os.makedirs(yaml_dir)
write_file(os.path.join(package_path, '__init__.py'), '\n',
mkdir=False)
# update netlist file
content = self.parse_schematic_template(lib_name, cell_name)
sch_info = yaml.load(content, Loader=yaml.Loader)
try:
write_file(yaml_file, content)
except IOError:
print('Warning: cannot write to %s.' % yaml_file)
# generate new design module file if necessary.
if not os.path.exists(python_file):
content = self.get_python_template(lib_name, cell_name,
self.db_config.get('prim_table', {}))
write_file(python_file, content + '\n', mkdir=False)
# recursively import all children
for inst_name, inst_attrs in sch_info['instances'].items():
inst_lib_name = inst_attrs['lib_name']
if inst_lib_name not in self.exc_libs:
inst_cell_name = inst_attrs['cell_name']
self._import_design(inst_lib_name, inst_cell_name, imported_cells, dsn_db,
new_lib_path)
def instantiate_schematic(self, lib_name, content_list, lib_path=''):
"""Create the given schematics in CAD database.
Parameters
----------
lib_name : str
name of the new library to put the concrete schematics.
content_list : Sequence[Any]
list of schematics to create.
lib_path : str
the path to create the library in. If empty, use default location.
"""
template_list, change_list = [], []
for content in content_list:
if content is not None:
master_lib, master_cell, impl_cell, pin_map, inst_map, new_pins = content
# add to template list
template_list.append([master_lib, master_cell, impl_cell])
# construct change object
change = dict(
name=impl_cell,
pin_map=dict_to_item_list(pin_map),
inst_list=format_inst_map(inst_map),
new_pins=new_pins,
)
change_list.append(change)
self.create_implementation(lib_name, template_list, change_list, lib_path=lib_path)
================================================
FILE: bag/interface/ocean.py
================================================
# -*- coding: utf-8 -*-
"""This module implements bag's interaction with an ocean simulator.
"""
from typing import TYPE_CHECKING, Dict, Any, Optional
import os
import bag.io
from .simulator import SimProcessManager
if TYPE_CHECKING:
from .simulator import ProcInfo
class OceanInterface(SimProcessManager):
"""This class handles interaction with Ocean simulators.
Parameters
----------
tmp_dir : str
temporary file directory for SimAccess.
sim_config : Dict[str, Any]
the simulation configuration dictionary.
"""
def __init__(self, tmp_dir, sim_config):
# type: (str, Dict[str, Any]) -> None
"""Initialize a new SkillInterface object.
"""
SimProcessManager.__init__(self, tmp_dir, sim_config)
def format_parameter_value(self, param_config, precision):
# type: (Dict[str, Any], int) -> str
"""Format the given parameter value as a string.
To support both single value parameter and parameter sweeps, each parameter value is
represented as a string instead of simple floats. This method will cast a parameter
configuration (which can either be a single value or a sweep) to a
simulator-specific string.
Parameters
----------
param_config: Dict[str, Any]
a dictionary that describes this parameter value.
4 formats are supported. This is best explained by example.
single value:
dict(type='single', value=1.0)
sweep a given list of values:
dict(type='list', values=[1.0, 2.0, 3.0])
linear sweep with inclusive start, inclusive stop, and step size:
dict(type='linstep', start=1.0, stop=3.0, step=1.0)
logarithmic sweep with given number of points per decade:
dict(type='decade', start=1.0, stop=10.0, num=10)
precision : int
the parameter value precision.
Returns
-------
param_str : str
a string representation of param_config
"""
fmt = '%.{}e'.format(precision)
swp_type = param_config['type']
if swp_type == 'single':
return fmt % param_config['value']
elif swp_type == 'list':
return ' '.join((fmt % val for val in param_config['values']))
elif swp_type == 'linstep':
syntax = '{From/To}Linear:%s:%s:%s{From/To}' % (fmt, fmt, fmt)
return syntax % (param_config['start'], param_config['step'], param_config['stop'])
elif swp_type == 'decade':
syntax = '{From/To}Decade:%s:%s:%s{From/To}' % (fmt, '%d', fmt)
return syntax % (param_config['start'], param_config['num'], param_config['stop'])
else:
raise Exception('Unsupported param_config: %s' % param_config)
def _get_ocean_info(self, save_dir, script_fname, log_fname):
"""Private helper function that launches ocean process."""
# get the simulation command.
sim_kwargs = self.sim_config['kwargs']
ocn_cmd = sim_kwargs['command']
env = sim_kwargs.get('env', None)
cwd = sim_kwargs.get('cwd', None)
sim_cmd = [ocn_cmd, '-nograph', '-replay', script_fname, '-log', log_fname]
if cwd is None:
# set working directory to BAG_WORK_DIR if None
cwd = os.environ['BAG_WORK_DIR']
# create empty log file to make sure it exists.
return sim_cmd, log_fname, env, cwd, save_dir
def setup_sim_process(self, lib, cell, outputs, precision, sim_tag):
# type: (str, str, Dict[str, str], int, Optional[str]) -> ProcInfo
sim_tag = sim_tag or 'BagSim'
job_options = self.sim_config['job_options']
init_file = self.sim_config['init_file']
view = self.sim_config['view']
state = self.sim_config['state']
# format job options as skill list of string
job_opt_str = "'( "
for key, val in job_options.items():
job_opt_str += '"%s" "%s" ' % (key, val)
job_opt_str += " )"
# create temporary save directory and log/script names
save_dir = bag.io.make_temp_dir(prefix='%s_data' % sim_tag, parent_dir=self.tmp_dir)
log_fname = os.path.join(save_dir, 'ocn_output.log')
script_fname = os.path.join(save_dir, 'run.ocn')
# setup ocean simulation script
script = self.render_file_template('run_simulation.ocn',
dict(
lib=lib,
cell=cell,
view=view,
state=state,
init_file=init_file,
save_dir=save_dir,
precision=precision,
sim_tag=sim_tag,
outputs=outputs,
job_opt_str=job_opt_str,
))
bag.io.write_file(script_fname, script)
return self._get_ocean_info(save_dir, script_fname, log_fname)
def setup_load_process(self, lib, cell, hist_name, outputs, precision):
# type: (str, str, str, Dict[str, str], int) -> ProcInfo
init_file = self.sim_config['init_file']
view = self.sim_config['view']
# create temporary save directory and log/script names
save_dir = bag.io.make_temp_dir(prefix='%s_data' % hist_name, parent_dir=self.tmp_dir)
log_fname = os.path.join(save_dir, 'ocn_output.log')
script_fname = os.path.join(save_dir, 'run.ocn')
# setup ocean load script
script = self.render_file_template('load_results.ocn',
dict(
lib=lib,
cell=cell,
view=view,
init_file=init_file,
save_dir=save_dir,
precision=precision,
hist_name=hist_name,
outputs=outputs,
))
bag.io.write_file(script_fname, script)
# launch ocean
return self._get_ocean_info(save_dir, script_fname, log_fname)
================================================
FILE: bag/interface/server.py
================================================
# -*- coding: utf-8 -*-
"""This class defines SkillOceanServer, a server that handles skill/ocean requests.
The SkillOceanServer listens for skill/ocean requests from bag. Skill commands will
be forwarded to Virtuoso for execution, and Ocean simulation requests will be handled
by starting an Ocean subprocess. It also provides utility for bag to query simulation
progress and allows parallel simulation.
Client-side communication:
the client will always send a request object, which is a python dictionary.
This script processes the request and sends the appropriate commands to
Virtuoso.
Virtuoso side communication:
To ensure this process receive all the data from Virtuoso properly, Virtuoso
will print a single line of integer indicating the number of bytes to read.
Then, virtuoso will print out exactly that many bytes of data, followed by
a newline (to flush the standard input). This script handles that protcol
and will strip the newline before sending result back to client.
"""
import traceback
import numpy as np
from .. import io
def _object_to_skill_file_helper(py_obj, file_obj):
"""Recursive helper function for object_to_skill_file
Parameters
----------
py_obj : any
the object to convert.
file_obj : file
the file object to write to. Must be created with io
package so that encodings are handled correctly.
"""
# fix potential raw bytes
py_obj = io.fix_string(py_obj)
if isinstance(py_obj, str):
# string
file_obj.write(py_obj)
elif isinstance(py_obj, (float, np.floating)):
# prepend type flag
file_obj.write('#float {:f}'.format(py_obj))
elif isinstance(py_obj, bool):
bool_val = 1 if py_obj else 0
file_obj.write('#bool {:d}'.format(bool_val))
elif isinstance(py_obj, (int, np.integer)):
# prepend type flag
file_obj.write('#int {:d}'.format(py_obj))
elif isinstance(py_obj, list) or isinstance(py_obj, tuple):
# a list of other objects.
file_obj.write('#list\n')
for val in py_obj:
_object_to_skill_file_helper(val, file_obj)
file_obj.write('\n')
file_obj.write('#end')
elif isinstance(py_obj, dict):
# disembodied property lists
file_obj.write('#prop_list\n')
for key, val in py_obj.items():
file_obj.write('{}\n'.format(key))
_object_to_skill_file_helper(val, file_obj)
file_obj.write('\n')
file_obj.write('#end')
else:
raise Exception('Unsupported python data type: %s' % type(py_obj))
def object_to_skill_file(py_obj, file_obj):
"""Write the given python object to a file readable by Skill.
Write a Python object to file that can be parsed into equivalent
skill object by Virtuoso. Currently only strings, lists, and dictionaries
are supported.
Parameters
----------
py_obj : any
the object to convert.
file_obj : file
the file object to write to. Must be created with io
package so that encodings are handled correctly.
"""
_object_to_skill_file_helper(py_obj, file_obj)
file_obj.write('\n')
bag_proc_prompt = 'BAG_PROMPT>>> '
class SkillServer(object):
"""A server that handles skill commands.
This server is started and ran by virtuoso. It listens for commands from bag
from a ZMQ socket, then pass the command to virtuoso. It then gather the result
and send it back to bag.
Parameters
----------
router : :class:`bag.interface.ZMQRouter`
the :class:`~bag.interface.ZMQRouter` object used for socket communication.
virt_in : file
the virtuoso input file. Must be created with io
package so that encodings are handled correctly.
virt_out : file
the virtuoso output file. Must be created with io
package so that encodings are handled correctly.
tmpdir : str or None
if given, will save all temporary files to this folder.
"""
def __init__(self, router, virt_in, virt_out, tmpdir=None):
"""Create a new SkillOceanServer instance.
"""
self.handler = router
self.virt_in = virt_in
self.virt_out = virt_out
# create a directory for all temporary files
self.dtmp = io.make_temp_dir('skillTmp', parent_dir=tmpdir)
def run(self):
"""Starts this server.
"""
while not self.handler.is_closed():
# check if socket received message
if self.handler.poll_for_read(5):
req = self.handler.recv_obj()
if isinstance(req, dict) and 'type' in req:
if req['type'] == 'exit':
self.close()
elif req['type'] == 'skill':
expr, out_file = self.process_skill_request(req)
if expr is not None:
# send expression to virtuoso
self.send_skill(expr)
msg = self.recv_skill()
self.process_skill_result(msg, out_file)
else:
msg = '*Error* bag server error: bag request:\n%s' % str(req)
self.handler.send_obj(dict(type='error', data=msg))
else:
msg = '*Error* bag server error: bag request:\n%s' % str(req)
self.handler.send_obj(dict(type='error', data=msg))
def send_skill(self, expr):
"""Sends expr to virtuoso for evaluation.
Parameters
----------
expr : string
the skill expression.
"""
self.virt_in.write(expr)
self.virt_in.flush()
def recv_skill(self):
"""Receive response from virtuoso"""
num_bytes = int(self.virt_out.readline())
msg = self.virt_out.read(num_bytes)
if msg[-1] == '\n':
msg = msg[:-1]
return msg
def close(self):
"""Close this server."""
self.handler.close()
def process_skill_request(self, request):
"""Process the given skill request.
Based on the given request object, returns the skill expression
to be evaluated by Virtuoso. This method creates temporary
files for long input arguments and long output.
Parameters
----------
request : dict
the request object.
Returns
-------
expr : str or None
expression to be evaluated by Virtuoso. If None, an error occurred and
nothing needs to be evaluated
out_file : str or None
if not None, the result will be written to this file.
"""
try:
expr = request['expr']
input_files = request['input_files'] or {}
out_file = request['out_file']
except KeyError as e:
msg = '*Error* bag server error: %s' % str(e)
self.handler.send_obj(dict(type='error', data=msg))
return None, None
fname_dict = {}
# write input parameters to files
for key, val in input_files.items():
with io.open_temp(prefix=key, delete=False, dir=self.dtmp) as file_obj:
fname_dict[key] = '"%s"' % file_obj.name
# noinspection PyBroadException
try:
object_to_skill_file(val, file_obj)
except Exception:
stack_trace = traceback.format_exc()
msg = '*Error* bag server error: \n%s' % stack_trace
self.handler.send_obj(dict(type='error', data=msg))
return None, None
# generate output file
if out_file:
with io.open_temp(prefix=out_file, delete=False, dir=self.dtmp) as file_obj:
fname_dict[out_file] = '"%s"' % file_obj.name
out_file = file_obj.name
# fill in parameters to expression
expr = expr.format(**fname_dict)
return expr, out_file
def process_skill_result(self, msg, out_file=None):
"""Process the given skill output, then send result to socket.
Parameters
----------
msg : str
skill expression evaluation output.
out_file : str or None
if not None, read result from this file.
"""
# read file if needed, and only if there are no errors.
if msg.startswith('*Error*'):
# an error occurred, forward error message directly
self.handler.send_obj(dict(type='error', data=msg))
elif out_file:
# read result from file.
try:
msg = io.read_file(out_file)
data = dict(type='str', data=msg)
except IOError:
stack_trace = traceback.format_exc()
msg = '*Error* error reading file:\n%s' % stack_trace
data = dict(type='error', data=msg)
self.handler.send_obj(data)
else:
# return output from virtuoso directly
self.handler.send_obj(dict(type='str', data=msg))
================================================
FILE: bag/interface/simulator.py
================================================
# -*- coding: utf-8 -*-
"""This module handles high level simulation routines.
This module defines SimAccess, which provides methods to run simulations
and retrieve results.
"""
from typing import Dict, Optional, Sequence, Any, Tuple, Union
import abc
from ..io import make_temp_dir
from ..concurrent.core import SubProcessManager
from .base import InterfaceBase
class SimAccess(InterfaceBase, abc.ABC):
"""A class that interacts with a simulator.
Parameters
----------
tmp_dir : str
temporary file directory for SimAccess.
sim_config : Dict[str, Any]
the simulation configuration dictionary.
"""
def __init__(self, tmp_dir, sim_config):
# type: (str, Dict[str, Any]) -> None
InterfaceBase.__init__(self)
self.sim_config = sim_config
self.tmp_dir = make_temp_dir('simTmp', parent_dir=tmp_dir)
@abc.abstractmethod
def format_parameter_value(self, param_config, precision):
# type: (Dict[str, Any], int) -> str
"""Format the given parameter value as a string.
To support both single value parameter and parameter sweeps, each parameter value is represented
as a string instead of simple floats. This method will cast a parameter configuration (which can
either be a single value or a sweep) to a simulator-specific string.
Parameters
----------
param_config: Dict[str, Any]
a dictionary that describes this parameter value.
4 formats are supported. This is best explained by example.
single value:
dict(type='single', value=1.0)
sweep a given list of values:
dict(type='list', values=[1.0, 2.0, 3.0])
linear sweep with inclusive start, inclusive stop, and step size:
dict(type='linstep', start=1.0, stop=3.0, step=1.0)
logarithmic sweep with given number of points per decade:
dict(type='decade', start=1.0, stop=10.0, num=10)
precision : int
the parameter value precision.
Returns
-------
param_str : str
a string representation of param_config
"""
return ""
@abc.abstractmethod
async def async_run_simulation(self, tb_lib, tb_cell, outputs, precision=6, sim_tag=None):
# type: (str, str, Dict[str, str], int, Optional[str]) -> str
"""A coroutine for simulation a testbench.
Parameters
----------
tb_lib : str
testbench library name.
tb_cell : str
testbench cell name.
outputs : Dict[str, str]
the variable-to-expression dictionary.
precision : int
precision of floating point results.
sim_tag : Optional[str]
a descriptive tag describing this simulation run.
Returns
-------
value : str
the save directory path.
"""
pass
@abc.abstractmethod
async def async_load_results(self, lib, cell, hist_name, outputs, precision=6):
# type: (str, str, str, Dict[str, str], int) -> str
"""A coroutine for loading simulation results.
Parameters
----------
lib : str
testbench library name.
cell : str
testbench cell name.
hist_name : str
simulation history name.
outputs : Dict[str, str]
the variable-to-expression dictionary.
precision : int
precision of floating point results.
Returns
-------
value : str
the save directory path.
"""
pass
ProcInfo = Tuple[Union[str, Sequence[str]], str, Optional[Dict[str, str]], Optional[str], str]
class SimProcessManager(SimAccess, metaclass=abc.ABCMeta):
"""An implementation of :class:`SimAccess` using :class:`SubProcessManager`.
Parameters
----------
tmp_dir : str
temporary file directory for SimAccess.
sim_config : Dict[str, Any]
the simulation configuration dictionary.
"""
def __init__(self, tmp_dir, sim_config):
# type: (str, Dict[str, Any]) -> None
SimAccess.__init__(self, tmp_dir, sim_config)
cancel_timeout = sim_config.get('cancel_timeout_ms', None)
if cancel_timeout is not None:
cancel_timeout /= 1e3
self._manager = SubProcessManager(max_workers=sim_config.get('max_workers', None),
cancel_timeout=cancel_timeout)
@abc.abstractmethod
def setup_sim_process(self, lib, cell, outputs, precision, sim_tag):
# type: (str, str, Dict[str, str], int, Optional[str]) -> ProcInfo
"""This method performs any setup necessary to configure a simulation process.
Parameters
----------
lib : str
testbench library name.
cell : str
testbench cell name.
outputs : Dict[str, str]
the variable-to-expression dictionary.
precision : int
precision of floating point results.
sim_tag : Optional[str]
a descriptive tag describing this simulation run.
Returns
-------
args : Union[str, Sequence[str]]
command to run, as string or list of string arguments.
log : str
log file name.
env : Optional[Dict[str, str]]
environment variable dictionary. None to inherit from parent.
cwd : Optional[str]
working directory path. None to inherit from parent.
save_dir : str
save directory path.
"""
return '', '', None, None, ''
@abc.abstractmethod
def setup_load_process(self, lib, cell, hist_name, outputs, precision):
# type: (str, str, str, Dict[str, str], int) -> ProcInfo
"""This method performs any setup necessary to configure a result loading process.
Parameters
----------
lib : str
testbench library name.
cell : str
testbench cell name.
hist_name : str
simulation history name.
outputs : Dict[str, str]
the variable-to-expression dictionary.
precision : int
precision of floating point results.
Returns
-------
args : Union[str, Sequence[str]]
command to run, as string or list of string arguments.
log : str
log file name.
env : Optional[Dict[str, str]]
environment variable dictionary. None to inherit from parent.
cwd : Optional[str]
working directory path. None to inherit from parent.
save_dir : str
save directory path.
"""
return '', '', None, None, ''
async def async_run_simulation(self, tb_lib: str, tb_cell: str,
outputs: Dict[str, str],
precision: int = 6,
sim_tag: Optional[str] = None) -> str:
args, log, env, cwd, save_dir = self.setup_sim_process(tb_lib, tb_cell, outputs, precision,
sim_tag)
await self._manager.async_new_subprocess(args, log, env=env, cwd=cwd)
return save_dir
async def async_load_results(self, lib: str, cell: str, hist_name: str,
outputs: Dict[str, str],
precision: int = 6) -> str:
args, log, env, cwd, save_dir = self.setup_load_process(lib, cell, hist_name, outputs,
precision)
await self._manager.async_new_subprocess(args, log, env=env, cwd=cwd)
return save_dir
================================================
FILE: bag/interface/skill.py
================================================
# -*- coding: utf-8 -*-
"""This module implements all CAD database manipulations using skill commands.
"""
from typing import List, Dict, Optional, Any, Tuple
import os
import shutil
import yaml
from ..io.common import get_encoding, fix_string
from ..io.file import open_temp
from .database import DbAccess
try:
import cybagoa
except ImportError:
cybagoa = None
def _dict_to_pcell_params(table):
"""Convert given parameter dictionary to pcell parameter list format.
Parameters
----------
table : dict[str, any]
the parameter dictionary.
Returns
-------
param_list : list[any]
the Pcell parameter list
"""
param_list = []
for key, val in table.items():
# python 2/3 compatibility: convert raw bytes to string.
val = fix_string(val)
if isinstance(val, float):
param_list.append([key, "float", val])
elif isinstance(val, str):
# unicode string
param_list.append([key, "string", val])
elif isinstance(val, int):
param_list.append([key, "int", val])
elif isinstance(val, bool):
param_list.append([key, "bool", val])
else:
raise Exception('Unsupported parameter %s with type: %s' % (key, type(val)))
return param_list
def to_skill_list_str(pylist):
"""Convert given python list to a skill list string.
Parameters
----------
pylist : list[str]
a list of string.
Returns
-------
ans : str
a string representation of the equivalent skill list.
"""
content = ' '.join(('"%s"' % val for val in pylist))
return "'( %s )" % content
def _handle_reply(reply):
"""Process the given reply."""
if isinstance(reply, dict):
if reply.get('type') == 'error':
if 'data' not in reply:
raise Exception('Unknown reply format: %s' % reply)
raise VirtuosoException(reply['data'])
else:
try:
return reply['data']
except Exception:
raise Exception('Unknown reply format: %s' % reply)
else:
raise Exception('Unknown reply format: %s' % reply)
class VirtuosoException(Exception):
"""Exception raised when Virtuoso returns an error."""
def __init__(self, *args, **kwargs):
# noinspection PyArgumentList
Exception.__init__(self, *args, **kwargs)
class SkillInterface(DbAccess):
"""Skill interface between bag and Virtuoso.
This class sends all bag's database and simulation operations to
an external Virtuoso process, then get the result from it.
Parameters
----------
dealer : :class:`bag.interface.ZMQDealer`
the socket used to communicate with :class:`~bag.interface.SkillOceanServer`.
tmp_dir : string
temporary file directory for DbAccess.
db_config : dict[str, any]
the database configuration dictionary.
"""
def __init__(self, dealer, tmp_dir, db_config):
"""Initialize a new SkillInterface object.
"""
DbAccess.__init__(self, tmp_dir, db_config)
self.handler = dealer
self._rcx_jobs = {}
def close(self):
"""Terminate the database server gracefully.
"""
self.handler.send_obj(dict(type='exit'))
self.handler.close()
def _eval_skill(self, expr, input_files=None, out_file=None):
# type: (str, Optional[Dict[str, Any]], Optional[str]) -> str
"""Send a request to evaluate the given skill expression.
Because Virtuoso has a limit on the input/output data (< 4096 bytes),
if your input is large, you need to write it to a file and have
Virtuoso open the file to parse it. Similarly, if you expect a
large output, you need to make Virtuoso write the result to the
file, then read it yourself. The parameters input_files and
out_file help you achieve this functionality.
For example, if you need to evaluate "skill_fun(arg fname)", where
arg is a file containing the list [1 2 3], and fname is the output
file name, you will call this function with:
expr = "skill_fun({arg} {fname})"
input_files = { "arg": [1 2 3] }
out_file = "fname"
the bag server will then a temporary file for arg and fname, write
the list [1 2 3] into the file for arg, call Virtuoso, then read
the output file fname and return the result.
Parameters
----------
expr : string
the skill expression to evaluate.
input_files : dict[string, any] or None
A dictionary of input files content.
out_file : string or None
the output file name argument in expr.
Returns
-------
result : str
a string representation of the result.
Raises
------
:class: `.VirtuosoException` :
if virtuoso encounters errors while evaluating the expression.
"""
request = dict(
type='skill',
expr=expr,
input_files=input_files,
out_file=out_file,
)
self.handler.send_obj(request)
reply = self.handler.recv_obj()
return _handle_reply(reply)
def parse_schematic_template(self, lib_name, cell_name):
"""Parse the given schematic template.
Parameters
----------
lib_name : str
name of the library.
cell_name : str
name of the cell.
Returns
-------
template : str
the content of the netlist structure file.
"""
cmd = 'parse_cad_sch( "%s" "%s" {netlist_info} )' % (lib_name, cell_name)
return self._eval_skill(cmd, out_file='netlist_info')
def get_cells_in_library(self, lib_name):
"""Get a list of cells in the given library.
Returns an empty list if the given library does not exist.
Parameters
----------
lib_name : str
the library name.
Returns
-------
cell_list : list[str]
a list of cells in the library
"""
cmd = 'get_cells_in_library_file( "%s" {cell_file} )' % lib_name
return self._eval_skill(cmd, out_file='cell_file').split()
def create_library(self, lib_name, lib_path=''):
"""Create a new library if one does not exist yet.
Parameters
----------
lib_name : string
the library name.
lib_path : string
directory to create the library in. If Empty, use default location.
"""
lib_path = lib_path or self.default_lib_path
tech_lib = self.db_config['schematic']['tech_lib']
return self._eval_skill('create_or_erase_library('
'"{}" "{}" "{}" nil)'.format(lib_name, tech_lib, lib_path))
def create_implementation(self, lib_name, template_list, change_list, lib_path=''):
"""Create implementation of a design in the CAD database.
Parameters
----------
lib_name : str
implementation library name.
template_list : list
a list of schematic templates to copy to the new library.
change_list :
a list of changes to be performed on each copied templates.
lib_path : str
directory to create the library in. If Empty, use default location.
"""
lib_path = lib_path or self.default_lib_path
tech_lib = self.db_config['schematic']['tech_lib']
if cybagoa is not None and self.db_config['schematic'].get('use_cybagoa', False):
cds_lib_path = os.environ.get('CDS_LIB_PATH', './cds.lib')
sch_name = 'schematic'
sym_name = 'symbol'
encoding = get_encoding()
# release write locks
cell_view_list = []
for _, _, cell_name in template_list:
cell_view_list.append((cell_name, sch_name))
cell_view_list.append((cell_name, sym_name))
self.release_write_locks(lib_name, cell_view_list)
# create library in case it doesn't exist
self.create_library(lib_name, lib_path)
# write schematic
with cybagoa.PyOASchematicWriter(cds_lib_path, lib_name, encoding) as writer:
for temp_info, change_info in zip(template_list, change_list):
sch_cell = cybagoa.PySchCell(temp_info[0], temp_info[1], temp_info[2], encoding)
for old_pin, new_pin in change_info['pin_map']:
sch_cell.rename_pin(old_pin, new_pin)
for inst_name, rinst_list in change_info['inst_list']:
sch_cell.add_inst(inst_name, lib_name, rinst_list)
writer.add_sch_cell(sch_cell)
writer.create_schematics(sch_name, sym_name)
copy = 'nil'
else:
copy = "'t"
in_files = {'template_list': template_list,
'change_list': change_list}
sympin = to_skill_list_str(self.db_config['schematic']['sympin'])
ipin = to_skill_list_str(self.db_config['schematic']['ipin'])
opin = to_skill_list_str(self.db_config['schematic']['opin'])
iopin = to_skill_list_str(self.db_config['schematic']['iopin'])
simulators = to_skill_list_str(self.db_config['schematic']['simulators'])
cmd = ('create_concrete_schematic( "%s" "%s" "%s" {template_list} '
'{change_list} %s %s %s %s %s %s)' % (lib_name, tech_lib, lib_path,
sympin, ipin, opin, iopin, simulators, copy))
return self._eval_skill(cmd, input_files=in_files)
def configure_testbench(self, tb_lib, tb_cell):
"""Update testbench state for the given testbench.
This method fill in process-specific information for the given testbench.
Parameters
----------
tb_lib : str
testbench library name.
tb_cell : str
testbench cell name.
Returns
-------
cur_env : str
the current simulation environment.
envs : list[str]
a list of available simulation environments.
parameters : dict[str, str]
a list of testbench parameter values, represented as string.
"""
tb_config = self.db_config['testbench']
cmd = ('instantiate_testbench("{tb_cell}" "{targ_lib}" ' +
'"{config_libs}" "{config_views}" "{config_stops}" ' +
'"{default_corner}" "{corner_file}" {def_files} ' +
'"{tech_lib}" {result_file})')
cmd = cmd.format(tb_cell=tb_cell,
targ_lib=tb_lib,
config_libs=tb_config['config_libs'],
config_views=tb_config['config_views'],
config_stops=tb_config['config_stops'],
default_corner=tb_config['default_env'],
corner_file=tb_config['env_file'],
def_files=to_skill_list_str(tb_config['def_files']),
tech_lib=self.db_config['schematic']['tech_lib'],
result_file='{result_file}')
output = yaml.load(self._eval_skill(cmd, out_file='result_file'), Loader=yaml.Loader)
return tb_config['default_env'], output['corners'], output['parameters'], output['outputs']
def get_testbench_info(self, tb_lib, tb_cell):
"""Returns information about an existing testbench.
Parameters
----------
tb_lib : str
testbench library.
tb_cell : str
testbench cell.
Returns
-------
cur_envs : list[str]
the current simulation environments.
envs : list[str]
a list of available simulation environments.
parameters : dict[str, str]
a list of testbench parameter values, represented as string.
outputs : dict[str, str]
a list of testbench output expressions.
"""
cmd = 'get_testbench_info("{tb_lib}" "{tb_cell}" {result_file})'
cmd = cmd.format(tb_lib=tb_lib,
tb_cell=tb_cell,
result_file='{result_file}')
output = yaml.load(self._eval_skill(cmd, out_file='result_file'), Loader=yaml.Loader)
return output['enabled_corners'], output['corners'], output['parameters'], output['outputs']
def update_testbench(self,
lib, # type: str
cell, # type: str
parameters, # type: Dict[str, str]
sim_envs, # type: List[str]
config_rules, # type: List[List[str]]
env_parameters # type: List[List[Tuple[str, str]]]
):
# type: (...) -> None
"""Update the given testbench configuration.
Parameters
----------
lib : str
testbench library.
cell : str
testbench cell.
parameters : Dict[str, str]
testbench parameters.
sim_envs : List[str]
list of enabled simulation environments.
config_rules : List[List[str]]
config view mapping rules, list of (lib, cell, view) rules.
env_parameters : List[List[Tuple[str, str]]]
list of param/value list for each simulation environment.
"""
cmd = ('modify_testbench("%s" "%s" {conf_rules} {run_opts} '
'{sim_envs} {params} {env_params})' % (lib, cell))
in_files = {'conf_rules': config_rules,
'run_opts': [],
'sim_envs': sim_envs,
'params': list(parameters.items()),
'env_params': list(zip(sim_envs, env_parameters)),
}
self._eval_skill(cmd, input_files=in_files)
def instantiate_layout_pcell(self, lib_name, cell_name, view_name,
inst_lib, inst_cell, params, pin_mapping):
"""Create a layout cell with a single pcell instance.
Parameters
----------
lib_name : str
layout library name.
cell_name : str
layout cell name.
view_name : str
layout view name, default is "layout".
inst_lib : str
pcell library name.
inst_cell : str
pcell cell name.
params : dict[str, any]
the parameter dictionary.
pin_mapping: dict[str, str]
the pin mapping dictionary.
"""
# create library in case it doesn't exist
self.create_library(lib_name)
# convert parameter dictionary to pcell params list format
param_list = _dict_to_pcell_params(params)
cmd = ('create_layout_with_pcell( "%s" "%s" "%s" "%s" "%s"'
'{params} {pin_mapping} )' % (lib_name, cell_name,
view_name, inst_lib, inst_cell))
in_files = {'params': param_list, 'pin_mapping': list(pin_mapping.items())}
return self._eval_skill(cmd, input_files=in_files)
def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):
"""Create a batch of layouts.
Parameters
----------
lib_name : str
layout library name.
view_name : str
layout view name.
via_tech : str
via technology library name.
layout_list : list[any]
a list of layouts to create
"""
# create library in case it doesn't exist
self.create_library(lib_name)
# convert parameter dictionary to pcell params list format
new_layout_list = []
for info_list in layout_list:
new_inst_list = []
for inst in info_list[1]:
if 'params' in inst:
inst = inst.copy()
inst['params'] = _dict_to_pcell_params(inst['params'])
if 'master_key' in inst:
# SKILL inteface cannot handle master_key info, so we remove it
# from InstanceInfo if we find it
inst.pop('master_key')
new_inst_list.append(inst)
new_info_list = info_list[:]
new_info_list[1] = new_inst_list
new_layout_list.append(new_info_list)
cmd = 'create_layout( "%s" "%s" "%s" {layout_list} )' % (lib_name, view_name, via_tech)
in_files = {'layout_list': new_layout_list}
return self._eval_skill(cmd, input_files=in_files)
def release_write_locks(self, lib_name, cell_view_list):
"""Release write locks from all the given cells.
Parameters
----------
lib_name : string
the library name.
cell_view_list : List[(string, string)]
list of cell/view name tuples.
"""
cmd = 'release_write_locks( "%s" {cell_view_list} )' % lib_name
in_files = {'cell_view_list': cell_view_list}
return self._eval_skill(cmd, input_files=in_files)
def create_schematic_from_netlist(self, netlist, lib_name, cell_name,
sch_view=None, **kwargs):
# type: (str, str, str, Optional[str], **Any) -> None
"""Create a schematic from a netlist.
This is mainly used to create extracted schematic from an extracted netlist.
Parameters
----------
netlist : str
the netlist file name.
lib_name : str
library name.
cell_name : str
cell_name
sch_view : Optional[str]
schematic view name. The default value is implemendation dependent.
**kwargs : Any
additional implementation-dependent arguments.
"""
calview_config = self.db_config.get('calibreview', None)
use_calibreview = self.db_config.get('use_calibreview', True)
if calview_config is not None and use_calibreview:
# create calibre view from extraction netlist
cell_map = calview_config['cell_map']
sch_view = sch_view or calview_config['view_name']
# create calibre view config file
tmp_params = dict(
netlist_file=netlist,
lib_name=lib_name,
cell_name=cell_name,
calibre_cellmap=cell_map,
view_name=sch_view,
)
content = self.render_file_template('calibreview_setup.txt', tmp_params)
with open_temp(prefix='calview', dir=self.tmp_dir, delete=False) as f:
fname = f.name
f.write(content)
# delete old calibre view
cmd = f'delete_cellview( "{lib_name}" "{cell_name}" "{sch_view}" )'
self._eval_skill(cmd)
# make extracted schematic
calibre_root_version = os.path.basename(os.environ['MGC_HOME']).split('.')[0]
calibre_year = int(calibre_root_version[-4:])
if calibre_year > 2011:
cmd = f'mgc_rve_load_setup_file( "{fname}" )'
else:
cmd0 = f'mgc_eview_globals->outputLibrary = "{lib_name}"'
self._eval_skill(cmd0)
cmd0 = f'mgc_eview_globals->schematicLibrary = "{lib_name}"'
self._eval_skill(cmd0)
cmd0 = f'mgc_eview_globals->cellMapFile = "{cell_map}"'
self._eval_skill(cmd0)
cmd0 = 'mgc_eview_globals->createUnmatchedTerminals = t'
self._eval_skill(cmd0)
# cmd0 = 'mgc_eview_globals->preserveDeviceCase = t'
# self._eval_skill(cmd0)
cmd0 = 'mgc_eview_globals->devicePlacementArrayed = t'
self._eval_skill(cmd0)
cmd0 = 'mgc_eview_globals->showCalviewDlg = nil'
self._eval_skill(cmd0)
cmd = f'mgc_rve_create_cellview("{netlist}")'
self._eval_skill(cmd)
else:
# get netlists to copy
netlist_dir = os.path.dirname(netlist)
netlist_files = self.checker.get_rcx_netlists(lib_name, cell_name)
if not netlist_files:
# some error checking. Shouldn't be needed but just in case
raise ValueError('RCX did not generate any netlists')
# copy netlists to a "netlist" subfolder in the CAD database
cell_dir = self.get_cell_directory(lib_name, cell_name)
targ_dir = os.path.join(cell_dir, 'netlist')
os.makedirs(targ_dir, exist_ok=True)
for fname in netlist_files:
shutil.copy(os.path.join(netlist_dir, fname), targ_dir)
# create symbolic link as aliases
symlink = os.path.join(targ_dir, 'netlist')
try:
os.remove(symlink)
except FileNotFoundError:
pass
os.symlink(netlist_files[0], symlink)
def get_cell_directory(self, lib_name, cell_name):
# type: (str, str) -> str
"""Returns the directory name of the given cell.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
Returns
-------
cell_dir : str
path to the cell directory.
"""
# use yaml.load to remove outermost quotation marks
lib_dir = yaml.load(self._eval_skill(f'get_lib_directory( "{lib_name}" )'), Loader=yaml.Loader)
if not lib_dir:
raise ValueError('Library %s not found.' % lib_name)
return os.path.join(lib_dir, cell_name)
def create_verilog_view(self, verilog_file, lib_name, cell_name, **kwargs):
# type: (str, str, str, **Any) -> None
"""Create a verilog view for mix-signal simulation.
Parameters
----------
verilog_file : str
the verilog file name.
lib_name : str
library name.
cell_name : str
cell name.
**kwargs : Any
additional implementation-dependent arguments.
"""
# delete old verilog view
cmd = 'delete_cellview( "%s" "%s" "verilog" )' % (lib_name, cell_name)
self._eval_skill(cmd)
cmd = 'schInstallHDL("%s" "%s" "verilog" "%s" t)' % (lib_name, cell_name, verilog_file)
self._eval_skill(cmd)
================================================
FILE: bag/interface/templates/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/interface/templates/Module.pyi
================================================
# -*- coding: utf-8 -*-
from typing import Dict
import os
import pkg_resources
from bag.design.module import Module
# noinspection PyPep8Naming
class {{ lib_name }}__{{ cell_name }}(Module):
"""Module for library {{ lib_name }} cell {{ cell_name }}.
Fill in high level description here.
"""
yaml_file = pkg_resources.resource_filename(__name__,
os.path.join('netlist_info',
'{{ cell_name }}.yaml'))
def __init__(self, database, parent=None, prj=None, **kwargs):
Module.__init__(self, database, self.yaml_file, parent=parent, prj=prj, **kwargs)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
"""Returns a dictionary from parameter names to descriptions.
Returns
-------
param_info : Optional[Dict[str, str]]
dictionary from parameter names to descriptions.
"""
return dict(
)
def design(self):
"""To be overridden by subclasses to design this module.
This method should fill in values for all parameters in
self.parameters. To design instances of this module, you can
call their design() method or any other ways you coded.
To modify schematic structure, call:
rename_pin()
delete_instance()
replace_instance_master()
reconnect_instance_terminal()
restore_instance()
array_instance()
"""
pass
================================================
FILE: bag/interface/templates/PrimModule.pyi
================================================
# -*- coding: utf-8 -*-
import os
import pkg_resources
from bag.design.module import {{ module_name }}
# noinspection PyPep8Naming
class {{ lib_name }}__{{ cell_name }}({{ module_name }}):
"""design module for {{ lib_name }}__{{ cell_name }}.
"""
yaml_file = pkg_resources.resource_filename(__name__,
os.path.join('netlist_info',
'{{ cell_name }}.yaml'))
def __init__(self, database, parent=None, prj=None, **kwargs):
{{ module_name }}.__init__(self, database, self.yaml_file, parent=parent, prj=prj, **kwargs)
================================================
FILE: bag/interface/templates/calibreview_setup.txt
================================================
calibre_view_netlist_file : {{ netlist_file }}
output_library : {{ lib_name }}
schematic_library : {{ lib_name }}
cell_name : {{ cell_name }}
cellmap_file : {{ calibre_cellmap }}
calibreview_log_file : ./calview.log
calibreview_name : {{ view_name }}
calibreview_type : schematic
create_terminals : all
preserve_device_case : on
execute_callbacks : off
reset_properties : (m=1)
magnify_devices_by : 1
magnify_parasitics_by : 1
device_placement : arrayed
parasitic_placement : arrayed
show_parasitic_polygons : off
open_calibreview : don't_open
generate_spectre_netlist : off
================================================
FILE: bag/interface/templates/load_results.ocn
================================================
lib = "{{ lib }}"
cell = "{{ cell }}"
view = "{{ view }}"
init_file = "{{ init_file }}"
save_dir = "{{ save_dir }}"
precision = {{ precision }}
hist_name = "{{ hist_name }}"
; initialize environment variables
when( strlen(init_file) > 0
load(init_file)
)
; save parametric waveform values as a flattened list.
procedure( save_param_wave_values(wave fmt line_fmt fhandle)
let( (vec wave_cls tmp_val)
if( drIsWaveform(wave) then
; 1D waveform, simply print all values
vec = drGetWaveformYVec(wave)
wave_cls = className(classOf(drGetElem(vec 0)))
if( wave_cls == 'adtComplex then
; print complex
for( i 0 drVectorLength(vec) - 1
tmp_val = drGetElem(vec i)
if( imag(tmp_val) < 0 then
; fix for negative imaginary part.
sprintf(line_fmt "%s%sj\n" fmt fmt)
else
sprintf(line_fmt "%s+%sj\n" fmt fmt)
)
fprintf(fhandle line_fmt real(tmp_val) imag(tmp_val))
)
else
; print real value
for( i 0 drVectorLength(vec) - 1
fprintf(fhandle line_fmt drGetElem(vec i))
)
)
else
; parametric waveform, recurse
foreach(val sweepValues(wave)
save_param_wave_values(famValue(wave val) fmt line_fmt fhandle)
)
)
)
)
; define save functions
; save a waveform to file.
; the given waveform will be saved to the file "/.data" as a flattened 1D array.
; the sweep parameter names of this waveform will be saved to the file "/.sweep",
; and the values of each parameter will be saved to the file "/.info".
; data_list_struct is a tconc struct of (waveform_name, waveform_data_file_handle) pairs.
procedure( save_waveform(directory var_name wave precision data_list_struct)
let( (fmt line_fmt wave_cls entry data_file sweep_file fhandle
name_list val_list sweep_df iter_wave)
sprintf(fmt "%%.%de" precision)
sprintf(line_fmt "%s\n" fmt)
wave_cls = className(classOf(wave))
if( not( entry = assoc( var_name cdar(data_list_struct) ) ) then
; first time saving this variable
sprintf(data_file "%s/%s.data" directory var_name)
sprintf(sweep_file "%s/%s.sweep" directory var_name)
cond(
( or( drIsWaveform(wave) drIsParamWave(wave) )
; save sweep names
fhandle = outfile( sweep_file "w" )
name_list = sweepNames(wave)
foreach(swp_name name_list
fprintf(fhandle "%s\n" swp_name)
)
close(fhandle)
; save sweep values
iter_wave = wave
foreach(swp_name name_list
; save output most sweep values
val_list = sweepValues(iter_wave)
sprintf(sweep_df "%s/%s.info" directory swp_name)
unless( isFile(sweep_df)
fhandle = outfile( sweep_df "w" )
foreach(val val_list
fprintf(fhandle line_fmt val)
)
close(fhandle)
)
; remove outer sweep
when( drIsParamWave(iter_wave)
iter_wave = famValue(iter_wave car(val_list))
)
)
fhandle = outfile( data_file "w" )
)
( or( wave_cls == 'flonum wave_cls == 'fixnum wave_cls == 'adtComplex )
; scalar data, make empty sweep file
fhandle = outfile( sweep_file "w")
close(fhandle)
fhandle = outfile( data_file "w" )
)
( t
; unsupported type
error("Unsupported data for output %s: %A\n" var_name wave)
)
)
tconc( data_list_struct list(var_name fhandle) )
else
fhandle = cadr(entry)
)
; append data to file
if( or( drIsWaveform(wave) drIsParamWave(wave) ) then
save_param_wave_values(wave fmt line_fmt fhandle)
else
; print single point value
if( wave_cls == 'adtComplex then
; print complex
if( imag(wave) < 0 then
; fix for negative imaginary part.
sprintf(line_fmt "%s%sj\n" fmt fmt)
else
sprintf(line_fmt "%s+%sj\n" fmt fmt)
)
fprintf(fhandle line_fmt real(wave) imag(wave))
else
fprintf(fhandle line_fmt wave)
)
)
't
)
)
ocnSetXLMode()
ocnxlTargetCellView(lib cell view)
; load result database
rdb = axlReadHistoryResDB(hist_name)
unless( rdb
error("Cannot find database associated with name %s" hist_name)
)
point_list = rdb->points()
sprintf(sweep_fname "%s/sweep.info" save_dir)
sweep_f = outfile( sweep_fname "w" )
; write sweep parameters title
when( point_list
point = car(point_list)
test_list = point->tests()
when( test_list
corner = car(test_list)->cornerName
par_names = setof( name point->params(?corner corner ?sortBy 'name)~>name
and( (name != "corModelSpec") (name != "temperature") ) )
fprintf(sweep_f "corner ")
fprintf(sweep_f "%s\n" buildString( par_names " " ))
)
)
; iterate through each design point and save data.
data_list_struct = tconc(nil 0)
total_points = length(point_list)
cur_idx = 1
foreach(point point_list
printf("*Info* saving process: %d/%d\n" cur_idx total_points)
cur_idx = cur_idx + 1
foreach(test point->tests()
; write param values to file.
corner = test->cornerName
params = setof(par point->params(?corner corner ?sortBy 'name)
and( (par->name != "corModelSpec") (par->name != "temperature") ) )
param_vals = mapcar( lambda( (par) par->valueAsString(?digits precision ?notation 'eng) ) params )
fprintf(sweep_f "%s " corner)
fprintf(sweep_f "%s\n" buildString( param_vals " " ))
; open results
openResults(test->resultsDir)
{% for var, expr in outputs.items() %}
tmp = {{ expr }}
save_waveform( save_dir "{{ var }}" tmp precision data_list_struct )
{% endfor %}
)
)
; close opened files
close(sweep_f)
foreach( entry cdar(data_list_struct)
close(cadr(entry))
)
ocnxlEndXLMode()
exit()
================================================
FILE: bag/interface/templates/run_simulation.ocn
================================================
lib = "{{ lib }}"
cell = "{{ cell }}"
view = "{{ view }}"
state = "{{ state }}"
init_file = "{{ init_file }}"
save_dir = "{{ save_dir }}"
precision = {{ precision }}
sim_tag = "{{ sim_tag }}"
job_opt_list = {{ job_opt_str }}
; initialize environment variables
when( strlen(init_file) > 0
load(init_file)
)
; save parametric waveform values as a flattened list.
procedure( save_param_wave_values(wave fmt line_fmt fhandle)
let( (vec wave_cls tmp_val)
if( drIsWaveform(wave) then
; 1D waveform, simply print all values
vec = drGetWaveformYVec(wave)
wave_cls = className(classOf(drGetElem(vec 0)))
if( wave_cls == 'adtComplex then
; print complex
for( i 0 drVectorLength(vec) - 1
tmp_val = drGetElem(vec i)
if( imag(tmp_val) < 0 then
; fix for negative imaginary part.
sprintf(line_fmt "%s%sj\n" fmt fmt)
else
sprintf(line_fmt "%s+%sj\n" fmt fmt)
)
fprintf(fhandle line_fmt real(tmp_val) imag(tmp_val))
)
else
; print real value
for( i 0 drVectorLength(vec) - 1
fprintf(fhandle line_fmt drGetElem(vec i))
)
)
else
; parametric waveform, recurse
foreach(val sweepValues(wave)
save_param_wave_values(famValue(wave val) fmt line_fmt fhandle)
)
)
)
)
; define save functions
; save a waveform to file.
; the given waveform will be saved to the file "/.data" as a flattened 1D array.
; the sweep parameter names of this waveform will be saved to the file "/.sweep",
; and the values of each parameter will be saved to the file "/.info".
; data_list_struct is a tconc struct of (waveform_name, waveform_data_file_handle) pairs.
procedure( save_waveform(directory var_name wave precision data_list_struct)
let( (fmt line_fmt wave_cls entry data_file sweep_file fhandle
name_list val_list sweep_df iter_wave)
sprintf(fmt "%%.%de" precision)
sprintf(line_fmt "%s\n" fmt)
wave_cls = className(classOf(wave))
if( not( entry = assoc( var_name cdar(data_list_struct) ) ) then
; first time saving this variable
sprintf(data_file "%s/%s.data" directory var_name)
sprintf(sweep_file "%s/%s.sweep" directory var_name)
cond(
( or( drIsWaveform(wave) drIsParamWave(wave) )
; save sweep names
fhandle = outfile( sweep_file "w" )
name_list = sweepNames(wave)
foreach(swp_name name_list
fprintf(fhandle "%s\n" swp_name)
)
close(fhandle)
; save sweep values
iter_wave = wave
foreach(swp_name name_list
; save output most sweep values
val_list = sweepValues(iter_wave)
sprintf(sweep_df "%s/%s.info" directory swp_name)
unless( isFile(sweep_df)
fhandle = outfile( sweep_df "w" )
foreach(val val_list
fprintf(fhandle line_fmt val)
)
close(fhandle)
)
; remove outer sweep
when( drIsParamWave(iter_wave)
iter_wave = famValue(iter_wave car(val_list))
)
)
fhandle = outfile( data_file "w" )
)
( or( wave_cls == 'flonum wave_cls == 'fixnum wave_cls == 'adtComplex )
; scalar data, make empty sweep file
fhandle = outfile( sweep_file "w")
close(fhandle)
fhandle = outfile( data_file "w" )
)
( t
; unsupported type
error("Unsupported data for output %s: %A\n" var_name wave)
)
)
tconc( data_list_struct list(var_name fhandle) )
else
fhandle = cadr(entry)
)
; append data to file
if( or( drIsWaveform(wave) drIsParamWave(wave) ) then
save_param_wave_values(wave fmt line_fmt fhandle)
else
; print single point value
if( wave_cls == 'adtComplex then
; print complex
if( imag(wave) < 0 then
; fix for negative imaginary part.
sprintf(line_fmt "%s%sj\n" fmt fmt)
else
sprintf(line_fmt "%s+%sj\n" fmt fmt)
)
fprintf(fhandle line_fmt real(wave) imag(wave))
else
fprintf(fhandle line_fmt wave)
)
)
't
)
)
ocnSetXLMode()
ocnxlTargetCellView(lib cell view)
ocnxlLoadSetupState(state 'overwrite)
ocnxlHistoryPrefix(sim_tag)
ocnxlJobSetup(job_opt_list)
printf("*Info* Creating netlist...\n")
createNetlist( ?recreateAll t ?display nil )
printf("*Info* Starting simulation...\n")
ocnxlRun(?mode 'sweepAndCorners ?nominalCornerEnabled nil ?allCornersEnabled 't
?allSweepsEnabled 't)
; load result database
hist_name = ocnxlGetCurrentHistory()
rdb = axlReadHistoryResDB(hist_name)
point_list = rdb->points()
sprintf(sweep_fname "%s/sweep.info" save_dir)
sweep_f = outfile( sweep_fname "w" )
; write sweep parameters title
when( point_list
point = car(point_list)
test_list = point->tests()
when( test_list
corner = car(test_list)->cornerName
par_names = setof( name point->params(?corner corner ?sortBy 'name)~>name
and( (name != "corModelSpec") (name != "temperature") ) )
fprintf(sweep_f "corner ")
fprintf(sweep_f "%s\n" buildString( par_names " " ))
)
)
; iterate through each design point and save data.
data_list_struct = tconc(nil 0)
total_points = length(point_list)
cur_idx = 1
foreach(point point_list
printf("*Info* saving process: %d/%d\n" cur_idx total_points)
cur_idx = cur_idx + 1
foreach(test point->tests()
; write param values to file.
corner = test->cornerName
params = setof(par point->params(?corner corner ?sortBy 'name)
and( (par->name != "corModelSpec") (par->name != "temperature") ) )
param_vals = mapcar( lambda( (par) par->valueAsString(?digits precision ?notation 'eng) ) params )
fprintf(sweep_f "%s " corner)
fprintf(sweep_f "%s\n" buildString( param_vals " " ))
; open results
openResults(test->resultsDir)
{% for var, expr in outputs.items() %}
tmp = {{ expr }}
save_waveform( save_dir "{{ var }}" tmp precision data_list_struct )
{% endfor %}
)
)
; close opened files
close(sweep_f)
foreach( entry cdar(data_list_struct)
close(cadr(entry))
)
ocnxlEndXLMode()
exit()
================================================
FILE: bag/interface/zmqwrapper.py
================================================
# -*- coding: utf-8 -*-
"""This module defines various wrapper around ZMQ sockets."""
from datetime import datetime
import zlib
import pprint
from pathlib import Path
import os
import yaml
import zmq
from .. import io
class ZMQDealer(object):
"""A class that interacts with a ZMQ dealer socket.
a dealer socket is an asynchronous socket that can issue multiple requests
without needing to wait for an reply. This class encapsulates the ZMQ
socket details and provide more convenient API to use.
Parameters
----------
port : int
the port to connect to.
pipeline : int
number of messages allowed in a pipeline. Only affects file
transfer performance.
host : str
the host to connect to.
log_file : str or None
the log file. None to disable logging.
"""
def __init__(self, port, pipeline=100, host='localhost', log_file=None):
"""Create a new ZMQDealer object.
"""
context = zmq.Context.instance()
# noinspection PyUnresolvedReferences
self.socket = context.socket(zmq.DEALER)
self.socket.hwm = pipeline
self.socket.connect('tcp://%s:%d' % (host, port))
self._log_file = log_file
self.poller = zmq.Poller()
# noinspection PyUnresolvedReferences
self.poller.register(self.socket, zmq.POLLIN)
if self._log_file is not None:
self._log_file = Path(self._log_file).resolve()
# If log file directory does not exists, create it
log_dir: Path = self._log_file.parent
log_dir.mkdir(parents=True, exist_ok=True)
# time stamp the file
now = datetime.now()
time_stamp = now.strftime('%Y%m%d_%H%M%S%f')
ext = self._log_file.suffix
self._log_file = str(log_dir / f'{self._log_file.stem}_{time_stamp}{ext}')
def log_msg(self, msg):
"""Log the given message"""
if self._log_file is not None:
io.write_file(self._log_file, '%s\n' % msg, append=True)
def log_obj(self, msg, obj):
"""Log the given object"""
if self._log_file is not None:
obj_str = pprint.pformat(obj)
io.write_file(self._log_file, '%s\n%s\n' % (msg, obj_str), append=True)
def close(self):
"""Close the underlying socket."""
self.socket.close()
def send_obj(self, obj):
"""Sends a python object using pickle serialization and zlib compression.
Parameters
----------
obj : any
the object to send.
"""
p = io.to_bytes(yaml.dump(obj))
z = zlib.compress(p)
self.log_obj('sending data:', obj)
self.socket.send(z)
def recv_obj(self, timeout=None, enable_cancel=False):
"""Receive a python object, serialized with pickle and compressed with zlib.
Parameters
----------
timeout : int or None
the timeout to wait in miliseconds. If None, wait indefinitely.
enable_cancel : bool
If True, allows the user to press Ctrl-C to abort. For this to work,
the other end must know how to process the stop request dictionary.
Returns
-------
obj : any
the received object. None if timeout reached.
"""
try:
events = self.poller.poll(timeout=timeout)
except KeyboardInterrupt:
if not enable_cancel:
# re-raise exception if cancellation is not enabled.
raise
self.send_obj(dict(type='stop'))
print('Stop signal sent, waiting for reply. Press Ctrl-C again to force exit.')
try:
events = self.poller.poll(timeout=timeout)
except KeyboardInterrupt:
print('Force exiting.')
return None
if events:
data = self.socket.recv()
z = io.fix_string(zlib.decompress(data))
obj = yaml.load(z, Loader=yaml.Loader)
self.log_obj('received data:', obj)
return obj
else:
self.log_msg('timeout with %d ms reached.' % timeout)
return None
def recv_msg(self):
"""Receive a string message.
Returns
-------
msg : str
the received object.
"""
data = self.socket.recv()
self.log_msg('received message:\n%s' % data)
return data
class ZMQRouter(object):
"""A class that interacts with a ZMQ router socket.
a router socket is an asynchronous socket that can receive multiple requests
without needing to issue an reply. This class encapsulates the ZMQ socket
details and provide more convenient API to use.
Parameters
----------
port : int or None
the port to connect to. If None, then a random port between min_port and max_port
will be chosen.
min_port : int
the minimum random port number (inclusive).
max_port : int
the maximum random port number (exclusive).
pipeline : int
number of messages allowed in a pipeline. Only affects file
transfer performance.
log_file : str or None
the log file. None to disable logging.
"""
def __init__(self, port=None, min_port=5000, max_port=9999, pipeline=100, log_file=None):
"""Create a new ZMQDealer object.
"""
context = zmq.Context.instance()
# noinspection PyUnresolvedReferences
self.socket = context.socket(zmq.ROUTER)
self.socket.hwm = pipeline
if port is not None:
self.socket.bind('tcp://*:%d' % port)
self.port = port
else:
self.port = self.socket.bind_to_random_port('tcp://*', min_port=min_port, max_port=max_port)
self.addr = None
self._log_file = log_file
if self._log_file is not None:
self._log_file = os.path.abspath(self._log_file)
# If log file directory does not exists, create it
log_dir = os.path.dirname(self._log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# clears any existing log
if os.path.exists(self._log_file):
os.remove(self._log_file)
def get_port(self):
"""Returns the port number."""
return self.port
def is_closed(self):
"""Returns True if this router is closed."""
return self.socket.closed
def close(self):
"""Close the underlying socket."""
self.socket.close()
def log_msg(self, msg):
"""Log the given message"""
if self._log_file is not None:
io.write_file(self._log_file, '%s\n' % msg, append=True)
def log_obj(self, msg, obj):
"""Log the given object"""
if self._log_file is not None:
obj_str = pprint.pformat(obj)
io.write_file(self._log_file, '%s\n%s\n' % (msg, obj_str), append=True)
def send_msg(self, msg, addr=None):
"""Sends a string message
Parameters
----------
msg : str
the message to send.
addr : str or None
the address to send the object to. If None, send to last sender.
"""
addr = addr or self.addr
if addr is None:
warn_msg = '*WARNING* No receiver address specified. Message not sent:\n%s' % msg
self.log_msg(warn_msg)
else:
self.log_msg('sending message:\n%s' % msg)
self.socket.send_multipart([addr, msg])
def send_obj(self, obj, addr=None):
"""Sends a python object using pickle serialization and zlib compression.
Parameters
----------
obj : any
the object to send.
addr : str or None
the address to send the object to. If None, send to last sender.
"""
addr = addr or self.addr
if addr is None:
warn_msg = '*WARNING* No receiver address specified. Message not sent:'
self.log_obj(warn_msg, obj)
else:
p = io.to_bytes(yaml.dump(obj))
z = zlib.compress(p)
self.log_obj('sending data:', obj)
self.socket.send_multipart([addr, z])
def poll_for_read(self, timeout):
"""Poll this socket for given timeout for read event.
Parameters
----------
timeout : int
timeout in miliseconds.
Returns
-------
status : int
nonzero value means that this socket is ready for read.
"""
return self.socket.poll(timeout=timeout)
def recv_obj(self):
"""Receive a python object, serialized with pickle and compressed with zlib.
Returns
-------
obj : any
the received object.
"""
self.addr, data = self.socket.recv_multipart()
z = io.fix_string(zlib.decompress(data))
obj = yaml.load(z, Loader=yaml.Loader)
self.log_obj('received data:', obj)
return obj
def get_last_sender_addr(self):
"""Returns the address of the sender of last received message.
Returns
-------
addr : str
the last sender address
"""
return self.addr
================================================
FILE: bag/io/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/io/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package provides all IO related functionalities for BAG.
Most importantly, this module sorts out all the bytes v.s. unicode differences
and simplifies writing python2/3 compatible code.
"""
from .common import fix_string, to_bytes, set_encoding, get_encoding, \
set_error_policy, get_error_policy
from .sim_data import load_sim_results, save_sim_results, load_sim_file
from .file import read_file, read_resource, read_yaml, read_yaml_env, readlines_iter, \
write_file, make_temp_dir, open_temp, open_file, Pickle, Yaml
from . import process
__all__ = ['fix_string', 'to_bytes', 'set_encoding', 'get_encoding',
'set_error_policy', 'get_error_policy',
'load_sim_results', 'save_sim_results', 'load_sim_file',
'read_file', 'read_resource', 'read_yaml', 'read_yaml_env', 'readlines_iter',
'write_file', 'make_temp_dir', 'open_temp', 'open_file',
'Pickle', 'Yaml'
]
================================================
FILE: bag/io/common.py
================================================
# -*- coding: utf-8 -*-
"""This module contains some commonly used IO functions.
In particular, this module keeps track of BAG's system-wide encoding/decoding settings.
"""
# default BAG file encoding.
bag_encoding = 'utf-8'
# default codec error policy
bag_codec_error = 'replace'
def fix_string(obj):
"""Fix the given potential string object to ensure python 2/3 compatibility.
If the given object is raw bytes, decode it into a string using
current encoding and return it. Otherwise, just return the given object.
This method is useful for writing python 2/3 compatible code.
Parameters
----------
obj :
any python object.
Returns
-------
val :
the given object, or a decoded string if the given object is bytes.
"""
if isinstance(obj, bytes):
obj = obj.decode(encoding=bag_encoding, errors=bag_codec_error)
return obj
def to_bytes(my_str):
"""Convert the given string to raw bytes.
Parameters
----------
my_str : string
the string to encode to bytes.
Returns
-------
val : bytes
raw bytes of the string.
"""
return bytes(my_str.encode(encoding=bag_encoding, errors=bag_codec_error))
def set_encoding(new_encoding):
"""Sets the BAG input/output encoding.
Parameters
----------
new_encoding : string
the new encoding name.
"""
global bag_encoding
if not isinstance(new_encoding, str):
raise Exception('encoding name must be string/unicode.')
bag_encoding = new_encoding
def get_encoding():
"""Returns the BAG input/output encoding.
Returns
-------
bag_encoding : unicode
the encoding name.
"""
return bag_encoding
def set_error_policy(new_policy):
"""Sets the error policy on encoding/decoding errors.
Parameters
----------
new_policy : string
the new error policy name. See codecs package documentation
for more information.
"""
global bag_codec_error
bag_codec_error = new_policy
def get_error_policy():
"""Returns the current BAG encoding/decoding error policy.
Returns
-------
policy : unicode
the current error policy name.
"""
return bag_codec_error
================================================
FILE: bag/io/file.py
================================================
# -*- coding: utf-8 -*-
"""This module handles file related IO.
"""
from typing import Dict, Any
import os
import tempfile
import time
import pkg_resources
import codecs
import string
import yaml
import pickle
from .common import bag_encoding, bag_codec_error
class Pickle:
"""
A global class for reading and writing Pickle format.
"""
@staticmethod
def save(obj, file, **kwargs) -> None:
with open(file, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(file, **kwargs):
with open(file, 'rb') as f:
return pickle.load(f)
class Yaml:
"""
A global class for reading and writing yaml format
For backward compatibility some module functions may overlap with this.
"""
@staticmethod
def save(obj, file, **kwargs) -> None:
with open(file, 'w') as f:
yaml.dump(obj, f)
@staticmethod
def load(file, **kwargs):
with open(file, 'r') as f:
return yaml.load(f, Loader=yaml.Loader)
def open_file(fname, mode):
"""Opens a file with the correct encoding interface.
Use this method if you need to have a file handle.
Parameters
----------
fname : string
the file name.
mode : string
the mode, either 'r', 'w', or 'a'.
Returns
-------
file_obj : file
a file objects that reads/writes string with the BAG system encoding.
"""
if mode != 'r' and mode != 'w' and mode != 'a':
raise ValueError("Only supports 'r', 'w', or 'a' mode.")
return open(fname, mode, encoding=bag_encoding, errors=bag_codec_error)
def read_file(fname):
"""Read the given file and return content as string.
Parameters
----------
fname : string
the file name.
Returns
-------
content : unicode
the content as a unicode string.
"""
with open_file(fname, 'r') as f:
content = f.read()
return content
def readlines_iter(fname):
"""Iterate over lines in a file.
Parameters
----------
fname : string
the file name.
Yields
------
line : unicode
a line in the file.
"""
with open_file(fname, 'r') as f:
for line in f:
yield line
def read_yaml_env(fname):
# type: (str) -> Dict[str, Any]
"""Parse YAML file with environment variable substitution.
Parameters
----------
fname : str
yaml file name.
Returns
-------
table : Dict[str, Any]
the yaml file as a dictionary.
"""
content = read_file(fname)
# substitute environment variables
content = string.Template(content).substitute(os.environ)
return yaml.load(content, Loader=yaml.Loader)
def read_yaml(fname):
"""Read the given file using YAML.
Parameters
----------
fname : string
the file name.
Returns
-------
content : Any
the object returned by YAML.
"""
with open_file(fname, 'r') as f:
content = yaml.load(f, Loader=yaml.Loader)
return content
def read_resource(package, fname):
"""Read the given resource file and return content as string.
Parameters
----------
package : string
the package name.
fname : string
the resource file name.
Returns
-------
content : unicode
the content as a unicode string.
"""
raw_content = pkg_resources.resource_string(package, fname)
return raw_content.decode(encoding=bag_encoding, errors=bag_codec_error)
def write_file(fname, content, append=False, mkdir=True):
"""Writes the given content to file.
Parameters
----------
fname : string
the file name.
content : unicode
the unicode string to write to file.
append : bool
True to append instead of overwrite.
mkdir : bool
If True, will create parent directories if they don't exist.
"""
if mkdir:
fname = os.path.abspath(fname)
dname = os.path.dirname(fname)
os.makedirs(dname, exist_ok=True)
mode = 'a' if append else 'w'
with open_file(fname, mode) as f:
f.write(content)
def make_temp_dir(prefix, parent_dir=None):
"""Create a new temporary directory.
Parameters
----------
prefix : string
the directory prefix.
parent_dir : string
the parent directory.
"""
prefix += time.strftime("_%Y%m%d_%H%M%S")
parent_dir = parent_dir or tempfile.gettempdir()
return tempfile.mkdtemp(prefix=prefix, dir=parent_dir)
def open_temp(**kwargs):
"""Opens a new temporary file for writing with unicode interface.
Parameters
----------
**kwargs
the tempfile keyword arguments. See documentation for
:func:`tempfile.NamedTemporaryFile`.
Returns
-------
file : file
the opened file that accepts unicode input.
"""
timestr = time.strftime("_%Y%m%d_%H%M%S")
if 'prefix' in kwargs:
kwargs['prefix'] += timestr
else:
kwargs['prefix'] = timestr
temp = tempfile.NamedTemporaryFile(**kwargs)
return codecs.getwriter(bag_encoding)(temp, errors=bag_codec_error)
================================================
FILE: bag/io/gui.py
================================================
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
import json
import select
import PyQt5.QtWidgets as QtWidgets
import PyQt5.QtCore as QtCore
from .file import write_file, open_file
from .common import to_bytes
if os.name != 'posix':
raise Exception('bag.io.gui module current only works for POSIX systems.')
class StdinThread(QtCore.QThread):
"""A QT worker thread that reads stdin."""
update = QtCore.pyqtSignal('QString')
def __init__(self, parent):
QtCore.QThread.__init__(self, parent=parent)
self.stop = False
def run(self):
while not self.stop:
try:
stdin, _, _ = select.select([sys.stdin], [], [], 0.05)
if stdin:
cmd = sys.stdin.readline().strip()
else:
cmd = None
except:
cmd = 'exit'
if cmd is not None:
self.stop = (cmd == 'exit')
self.update.emit(cmd)
class LogWidget(QtWidgets.QFrame):
"""A Logger window widget.
Note: due to QPlainTextEdit always adding an extra newline when calling
appendPlainText(), we keep track of internal buffer and only print output
one line at a time. This may cause some message to not display immediately.
"""
def __init__(self, parent=None):
QtWidgets.QFrame.__init__(self, parent=parent)
self.logger = QtWidgets.QPlainTextEdit(parent=self)
self.logger.setReadOnly(True)
self.logger.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.logger.setMinimumWidth(1100)
self.buffer = ''
self.clear_button = QtWidgets.QPushButton('Clear Log', parent=self)
self.clear_button.clicked.connect(self.clear_log)
self.save_button = QtWidgets.QPushButton('Save Log As...', parent=self)
self.save_button.clicked.connect(self.save_log)
self.lay = QtWidgets.QVBoxLayout(self)
self.lay.addWidget(self.logger)
self.lay.addWidget(self.clear_button)
self.lay.addWidget(self.save_button)
def clear_log(self):
self.logger.setPlainText('')
self.buffer = ''
def save_log(self):
root_dir = os.getcwd()
fname, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', root_dir)
if fname:
write_file(fname, self.logger.toPlainText() + '\n')
def print_file(self, file_obj):
# this code converts all types of newlines (such as '\r\n') to '\n',
# and make sure any ending newlines are preserved.
for line in file_obj:
if self.buffer:
line = self.buffer + line
self.buffer = ''
if line.endswith('\n'):
self.logger.appendPlainText(line[:-1])
else:
self.buffer = line
class LogViewer(QtWidgets.QWidget):
"""A Simple window to see process log in real time.."""
def __init__(self):
QtWidgets.QWidget.__init__(self)
# combo box label
self.label = QtWidgets.QLabel('Log File: ', parent=self)
# populate log selection combo box.
self.combo_box = QtWidgets.QComboBox(parent=self)
self.log_files = []
self.reader = None
self.logger = LogWidget(parent=self)
# setup GUI
self.setWindowTitle('BAG Simulation Log Viewer')
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.layout = QtWidgets.QGridLayout(self)
self.layout.addWidget(self.label, 0, 0, alignment=QtCore.Qt.AlignRight)
self.layout.addWidget(self.combo_box, 0, 1, alignment=QtCore.Qt.AlignLeft)
self.layout.addWidget(self.logger, 1, 0, -1, -1)
self.layout.setRowStretch(0, 0.0)
self.layout.setRowStretch(1, 1.0)
self.layout.setColumnStretch(0, 0.0)
self.layout.setColumnStretch(1, 0.0)
# setup file watcher
self.cur_paths = None
self.watcher = QtCore.QFileSystemWatcher(parent=self)
# setup signals
self.watcher.fileChanged.connect(self.update_logfile)
self.combo_box.currentIndexChanged.connect(self.change_log)
# start thread
self.thread = StdinThread(self)
self.thread.update.connect(self.parse_cmd)
self.thread.start()
def closeEvent(self, evt):
if not self.thread.stop:
self.thread.stop = True
self.thread.wait()
QtWidgets.QWidget.closeEvent(self, evt)
@QtCore.pyqtSlot('QString')
def parse_cmd(self, cmd):
if cmd == 'exit':
self.close()
else:
try:
cmd = json.loads(cmd)
if cmd[0] == 'add':
self.add_log(cmd[1], cmd[2])
elif cmd[0] == 'remove':
self.remove_log(cmd[1])
except:
pass
@QtCore.pyqtSlot('int')
def change_log(self, new_idx):
# print('log change called, switching to index %d' % new_idx)
if self.cur_paths is not None:
self.watcher.removePaths(self.cur_paths)
self.logger.clear_log()
if self.reader is not None:
self.reader.close()
self.reader = None
if new_idx >= 0:
fname = os.path.abspath(self.log_files[new_idx])
dname = os.path.dirname(fname)
self.reader = open_file(fname, 'r')
self.logger.print_file(self.reader)
self.cur_paths = [dname, fname]
self.watcher.addPaths(self.cur_paths)
@QtCore.pyqtSlot('QString')
def update_logfile(self, fname):
# print('filechanged called, fname = %s' % fname)
if self.reader is not None:
self.logger.print_file(self.reader)
def remove_log(self, log_tag):
idx = self.combo_box.findText(log_tag)
if idx >= 0:
del self.log_files[idx]
self.combo_box.removeItem(idx)
def add_log(self, log_tag, log_file):
self.remove_log(log_tag)
if os.path.isfile(log_file):
self.log_files.append(log_file)
self.combo_box.addItem(log_tag)
def app_start():
app = QtWidgets.QApplication([])
window = LogViewer()
app.window_reference = window
window.show()
app.exec_()
def start_viewer():
cmd = [sys.executable, '-m', 'bag.io.gui']
devnull = open(os.devnull, 'w')
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=devnull,
stderr=subprocess.STDOUT,
preexec_fn=os.setpgrp)
return proc
def add_log(proc, tag, fname):
if proc is not None:
if proc.poll() is not None or proc.stdin.closed:
# process finished
return False
cmd_str = json.dumps(['add', tag, fname]) + '\n'
proc.stdin.write(to_bytes(cmd_str))
proc.stdin.flush()
return True
def remove_log(proc, tag):
if proc is not None:
if proc.poll() is not None or proc.stdin.closed:
# process finished
return False
cmd_str = json.dumps(['remove', tag]) + '\n'
proc.stdin.write(to_bytes(cmd_str))
proc.stdin.flush()
return True
def close(proc):
if proc is not None and proc.poll() is None:
proc.stdin.close()
if __name__ == '__main__':
app_start()
================================================
FILE: bag/io/process.py
================================================
# -*- coding: utf-8 -*-
"""This module provides functions to help you run external processes.
"""
import os
import sys
from .common import bag_encoding, bag_codec_error
from .file import write_file
import multiprocessing
# noinspection PyCompatibility
import concurrent.futures
if sys.version_info[0] < 3:
# use subprocess32 for timeout feature.
if os.name != 'posix':
raise Exception('bag.io.process module current only works for POSIX systems.')
# noinspection PyUnresolvedReferences,PyPackageRequirements
import subprocess32 as subprocess
else:
import subprocess
def run_proc_with_quit(proc_id, quit_dict, args, logfile=None, append=False, env=None, cwd=None):
if logfile is None:
logfile = os.devnull
mode = 'ab' if append else 'wb'
with open(logfile, mode) as logf:
if proc_id in quit_dict:
return None
proc = subprocess.Popen(args, stdout=logf, stderr=subprocess.STDOUT,
env=env, cwd=cwd)
retcode = None
num_kill = 0
timeout = 0.05
while retcode is None and num_kill <= 2:
try:
retcode = proc.wait(timeout=timeout)
except subprocess.TimeoutExpired:
if proc_id in quit_dict:
if num_kill == 0:
proc.terminate()
timeout = quit_dict[proc_id]
elif num_kill == 1:
proc.kill()
num_kill += 1
return proc.returncode
def run_and_wait(args, timeout=None, logfile=None, append=False,
env=None, cwd=None):
"""Run a command in a subprocess, then wait for it to finish.
Parameters
----------
args : string or list[string]
the command to run. Should be either a command string or a list
of command string and its arguments as strings. A list is preferred;
see Python subprocess documentation.
timeout : float or None
the amount of time to wait for the command to finish, in seconds.
If None, waits indefinitely.
logfile : string or None
If given, stdout and stderr will be written to this file.
append : bool
True to append to the logfile. Defaults to False.
env : dict[string, any]
If not None, environment variables of the subprocess will be set
according to this dictionary instead of inheriting from current
process.
cwd : string or None
The current working directory of the subprocess.
Returns
-------
output : string
the standard output and standard error from the command.
Raises
------
subprocess.CalledProcessError
if any error occurred in the subprocess.
"""
output = subprocess.check_output(args, stderr=subprocess.STDOUT,
timeout=timeout, env=env, cwd=cwd)
output = output.decode(encoding=bag_encoding, errors=bag_codec_error)
if logfile is not None:
write_file(logfile, output, append=append)
return output
class ProcessManager(object):
"""A class that manages subprocesses.
This class is for starting processes that you do not need to wait on,
and allows you to query for their status or terminate/kill them if needed.
Parameters
----------
max_workers : int or None
number of maximum allowed subprocesses. If None, defaults to system
CPU count.
cancel_timeout : float or None
Number of seconds to wait for a process to terminate once SIGTERM or
SIGKILL is issued. Defaults to 10 seconds.
"""
def __init__(self, max_workers=None, cancel_timeout=10.0):
if max_workers is None:
max_workers = multiprocessing.cpu_count()
if cancel_timeout is None:
cancel_timeout = 10.0
self._exec = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
self._cancel_timeout = cancel_timeout
self._future_dict = {}
self._quit_dict = {}
def close(self, timeout=10.0):
"""Cancel all processes.
Parameters
----------
timeout : float
time to wait in seconds for each process to terminate.
"""
for proc_id in self._future_dict.keys():
self.cancel(proc_id, timeout=timeout)
self._exec.shutdown()
self._quit_dict.clear()
self._future_dict.clear()
def new_thread(self, fun, basename=None, callback=None):
"""Put a new custom task in queue.
Execute the given function in a thread asynchronously. The given function
must take two arguments, The first argument is a unique string that represents
this task, and the second argument is a dictionary. The dictionary will
map the unique string to a timeout (in second) if this task is being cancelled.
The function should periodically check the dictionary and terminate gracefully.
Before function returns, it should also delete the unique string from dictionary
if it exists.
Parameters
----------
fun : callable
the function to execute in a thread, as described above.
basename : string or None
If given, this will be used as the basis for generating the unique
process ID.
callback : callable
If given, this function will automatically be executed when the
process finished. This function should take a single argument,
which is a Future object that returns the return code of the
process.
Returns
-------
proc_id : string
a unique string representing this process. Can be used later
to query process status or cancel process.
"""
# find unique process ID
proc_id = basename or 'proc'
cur_idx = 1
while proc_id in self._future_dict:
proc_id = '%s_%d' % (proc_id, cur_idx)
cur_idx += 1
future = self._exec.submit(fun, proc_id, self._quit_dict)
if callback is not None:
future.add_done_callback(callback)
self._future_dict[proc_id] = future
return proc_id
def new_process(self, args, basename=None, logfile=None, append=False,
env=None, cwd=None, callback=None):
"""Put a new process in queue.
When the process is done, its return code will be returned.
Parameters
----------
args : string or list[string]
the command to run as a string or list of string arguments. See
Python subprocess documentation. list of string format is preferred.
basename : string or None
If given, this will be used as the basis for generating the unique
process ID.
logfile : string or None
If given, stdout and stderr will be written to this file. Otherwise,
they will be redirected to `os.devnull`.
append : bool
True to append to ``logfile`` instead of overwritng it.
env : dict[string, string] or None
If given, environment variables of the process will be set according
to this dictionary.
cwd : string or None
current working directory of the process.
callback : callable
If given, this function will automatically be executed when the
process finished. This function should take a single argument,
which is a Future object that returns the return code of the
process.
Returns
-------
proc_id : string
a unique string representing this process. Can be used later
to query process status or cancel process.
"""
# find unique process ID
proc_id = basename or 'proc'
cur_idx = 1
while proc_id in self._future_dict:
proc_id = '%s_%d' % (proc_id, cur_idx)
cur_idx += 1
future = self._exec.submit(self._start_cmd, args, proc_id,
logfile=logfile, append=append, env=env, cwd=cwd)
if callback is not None:
future.add_done_callback(callback)
self._future_dict[proc_id] = future
return proc_id
@staticmethod
def _get_output(future, timeout=None):
"""Get output from future. Return None when exception."""
try:
if future.exception(timeout=timeout) is None:
return future.result()
else:
return None
except concurrent.futures.CancelledError:
return None
def cancel(self, proc_id, timeout=None):
"""Cancel the given process.
If the process haven't started, this method prevents it from started.
Otherwise, we first send a SIGTERM signal to kill the process. If
after ``timeout`` seconds the process is still alive, we will send a
SIGKILL signal. If after another ``timeout`` seconds the process is
still alive, an Exception will be raised.
Parameters
----------
proc_id : string
the process ID to cancel.
timeout : float or None
number of seconds to wait for cancellation. If None, use default
timeout.
Returns
-------
output :
output of the thread if it successfully terminates.
Otherwise, return None.
"""
if timeout is None:
timeout = self._cancel_timeout
future = self._future_dict.get(proc_id, None)
if future is None:
return None
if future.done():
# process already done, return status.
del self._future_dict[proc_id]
return self._get_output(future)
if future.cancel():
# we cancelled process before it made into the thread pool.
del self._future_dict[proc_id]
return None
else:
# inform thread it should try to quit.
self._quit_dict[proc_id] = timeout
try:
output = self._get_output(future, timeout=4 * timeout)
del self._future_dict[proc_id]
return output
except concurrent.futures.TimeoutError:
# shouldn't get here, but we did
print("*WARNING* worker thread refuse to die...")
del self._future_dict[proc_id]
return None
def done(self, proc_id):
"""Returns True if the given process finished or is cancelled successfully.
Parameters
----------
proc_id : string
the process ID.
Returns
-------
done : bool
True if the process is cancelled or completed.
"""
return self._future_dict[proc_id].done()
def wait(self, proc_id, timeout=None, cancel_timeout=None):
"""Wait for the given process to finish, then return its return code.
If ``timeout`` is None, waits indefinitely. Otherwise, if after
``timeout`` seconds the process is still running, a
:class:`concurrent.futures.TimeoutError` will be raised.
However, it is safe to catch this error and call wait again.
If Ctrl-C is pressed before process finish or before timeout
is reached, the process will be cancelled.
Parameters
----------
proc_id : string
the process ID.
timeout : float or None
number of seconds to wait. If None, waits indefinitely.
cancel_timeout : float or None
number of seconds to wait for process cancellation. If None,
use default timeout.
Returns
-------
output :
output of the thread if it successfully terminates. Otherwise return None.
"""
if cancel_timeout is None:
cancel_timeout = self._cancel_timeout
future = self._future_dict[proc_id]
try:
output = future.result(timeout=timeout)
# remove future from dictionary.
del self._future_dict[proc_id]
return output
except KeyboardInterrupt:
# cancel the process
print('KeyboardInterrupt received, cancelling %s...' % proc_id)
return self.cancel(proc_id, timeout=cancel_timeout)
def _start_cmd(self, args, proc_id, logfile=None, append=False, env=None, cwd=None):
"""The function that actually starts the subprocess. Executed by thread."""
retcode = run_proc_with_quit(proc_id, self._quit_dict, args, logfile=logfile,
append=append, env=env, cwd=cwd)
if proc_id in self._quit_dict:
del self._quit_dict[proc_id]
return retcode
================================================
FILE: bag/io/sim_data.py
================================================
# -*- coding: utf-8 -*-
"""This module handles simulation data related IO.
Note : when reading data files, we use Numpy to handle the encodings,
so BAG encoding settings will not apply.
"""
import os
import glob
import numpy as np
import h5py
from .common import bag_encoding, bag_codec_error
illegal_var_name = ['sweep_params']
class SweepArray(np.ndarray):
"""Subclass of numpy array that adds sweep parameters attribute.
"""
def __new__(cls, data, sweep_params=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(data).view(cls)
# add the new attribute to the created instance
obj.sweep_params = sweep_params
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None:
return
self.sweep_params = getattr(obj, 'sweep_params', None)
def __reduce__(self):
# Get the parent's __reduce__ tuple
pickled_state = super(SweepArray, self).__reduce__()
# Create our own tuple to pass to __setstate__
new_state = pickled_state[2] + (self.sweep_params,)
# Return a tuple that replaces the parent's __setstate__ tuple with our own
return pickled_state[0], pickled_state[1], new_state
# noinspection PyMethodOverriding
def __setstate__(self, state):
self.sweep_params = state[-1] # Set the info attribute
# Call the parent's __setstate__ with the other tuple elements.
# noinspection PyArgumentList
super(SweepArray, self).__setstate__(state[0:-1])
def _get_sweep_params(fname):
"""Parse the sweep information file and reverse engineer sweep parameters.
Parameters
----------
fname : str
the sweep information file name.
Returns
-------
swp_list : list[str]
list of sweep parameter names. index 0 is the outer-most loop.
values_list : list[list[float or str]]
list of values list for each sweep parameter.
"""
mat = np.genfromtxt(fname, dtype=np.unicode_)
header = mat[0, :]
data = mat[1:, :]
# eliminate same data
idx_list = []
for idx in range(len(header)):
bool_vec = data[:, idx] == data[0, idx] # type: np.ndarray
if not np.all(bool_vec):
idx_list.append(idx)
header = header[idx_list]
data = data[:, idx_list]
# find the first index of last element of each column.
last_first_idx = [np.where(data[:, idx] == data[-1, idx])[0][0] for idx in range(len(header))]
# sort by first index of last element; the column where the last element
# appears the earliest is the inner most loop.
order_list = np.argsort(last_first_idx) # type: np.ndarray
# get list of values
values_list = []
skip_len = 1
for idx in order_list:
end_idx = last_first_idx[idx] + 1
values = data[0:end_idx:skip_len, idx]
if header[idx] != 'corner':
values = values.astype(np.float)
skip_len *= len(values)
values_list.append(values)
swp_list = header[order_list][::-1].tolist()
values_list.reverse()
return swp_list, values_list
def load_sim_results(save_dir):
"""Load exported simulation results from the given directory.
Parameters
----------
save_dir : str
the save directory path.
Returns
-------
results : dict[str, any]
the simulation data dictionary.
most keys in result is either a sweep parameter or an output signal.
the values are the corresponding data as a numpy array. In addition,
results has a key called 'sweep_params', which contains a dictionary from
output signal name to a list of sweep parameters of that output.
"""
if not save_dir:
return None
results = {}
sweep_params = {}
# load sweep parameter values
top_swp_list, values_list = _get_sweep_params(os.path.join(save_dir, 'sweep.info'))
top_shape = []
for swp, values in zip(top_swp_list, values_list):
results[swp] = values
top_shape.append(len(values))
for swp_name in glob.glob(os.path.join(save_dir, '*.sweep')):
base_name = os.path.basename(swp_name).split('.')[0]
data_name = os.path.join(save_dir, '%s.data' % base_name)
try:
data_arr = np.loadtxt(data_name)
except ValueError:
# try loading complex
data_arr = np.loadtxt(data_name, dtype=complex)
# get sweep parameter names
with open(swp_name, 'r', encoding='utf-8') as f:
swp_list = [str(line.strip()) for line in f]
# make a copy of master sweep list and sweep shape
cur_swp_list = list(top_swp_list)
cur_shape = list(top_shape)
for swp in swp_list:
if swp not in results:
fname = os.path.join(save_dir, '%s.info' % swp)
results[swp] = np.loadtxt(fname)
# if sweep has more than one element.
if results[swp].shape:
cur_swp_list.append(swp)
cur_shape.append(results[swp].shape[0])
# sanity check
if base_name in results:
raise Exception('Error: output named %s already in results' % base_name)
# reshape data array
data_arr = data_arr.reshape(cur_shape)
results[base_name] = SweepArray(data_arr, cur_swp_list)
# record sweep parameters for this data
sweep_params[base_name] = cur_swp_list
if 'sweep_params' in results:
raise Exception('illegal output name: sweep_params')
results['sweep_params'] = sweep_params
return results
def save_sim_results(results, fname, compression='gzip'):
"""Saves the given simulation results dictionary as a HDF5 file.
Parameters
----------
results : dict[string, any]
the results dictionary.
fname : str
the file to save results to.
compression : str
HDF5 compression method. Defaults to 'gzip'.
"""
# create directory if it didn't exist.
fname = os.path.abspath(fname)
dir_name = os.path.dirname(fname)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
sweep_info = results['sweep_params']
with h5py.File(fname, 'w') as f:
for name, swp_vars in sweep_info.items():
# store data
data = np.asarray(results[name])
if not data.shape:
dset = f.create_dataset(name, data=data)
else:
dset = f.create_dataset(name, data=data, compression=compression)
# h5py workaround: need to explicitly store unicode
dset.attrs['sweep_params'] = [swp.encode(encoding=bag_encoding, errors=bag_codec_error)
for swp in swp_vars]
# store sweep parameter values
for var in swp_vars:
if var not in f:
swp_data = results[var]
if np.issubdtype(swp_data.dtype, np.unicode_):
# we need to explicitly encode unicode strings to bytes
swp_data = [v.encode(encoding=bag_encoding, errors=bag_codec_error) for v in swp_data]
f.create_dataset(var, data=swp_data, compression=compression)
def load_sim_file(fname):
"""Read simulation results from HDF5 file.
Parameters
----------
fname : str
the file to read.
Returns
-------
results : dict[str, any]
the result dictionary.
"""
if not os.path.isfile(fname):
raise ValueError('%s is not a file.' % fname)
results = {}
sweep_params = {}
with h5py.File(fname, 'r') as f:
for name in f:
dset = f[name]
dset_data = dset[()]
if np.issubdtype(dset.dtype, np.bytes_):
# decode byte values to unicode arrays
dset_data = np.array([v.decode(encoding=bag_encoding, errors=bag_codec_error) for v in dset_data])
if 'sweep_params' in dset.attrs:
cur_swp = [swp.decode(encoding=bag_encoding, errors=bag_codec_error)
for swp in dset.attrs['sweep_params']]
results[name] = SweepArray(dset_data, cur_swp)
sweep_params[name] = cur_swp
else:
results[name] = dset_data
results['sweep_params'] = sweep_params
return results
================================================
FILE: bag/io/template.py
================================================
# -*- coding: utf-8 -*-
"""This module defines methods to create files from templates.
"""
from jinja2 import Environment, PackageLoader, select_autoescape
def new_template_env(parent_package, tmp_folder):
# type: (str, str) -> Environment
return Environment(trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
autoescape=select_autoescape(default_for_string=False),
loader=PackageLoader(parent_package, package_path=tmp_folder),
enable_async=False,
)
================================================
FILE: bag/layout/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/layout/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package contains code for templated based layout.
"""
from .core import BagLayout, TechInfo
from .routing import RoutingGrid
from .template import TemplateDB
from . import util
__all__ = ['BagLayout', 'TechInfo',
'RoutingGrid',
'TemplateDB',
]
================================================
FILE: bag/layout/core.py
================================================
# -*- coding: utf-8 -*-
"""This module defines the base template class.
"""
from typing import Dict, List, Iterator, Tuple, Optional, Union, Callable, Any
import abc
import math
import numpy as np
from itertools import chain
from .. import io
from .util import BBox
from .objects import Rect, Via, ViaInfo, Instance, InstanceInfo, PinInfo
from .objects import Path, Polygon, Blockage, Boundary
from ..util.search import BinaryIterator
# try to import cybagoa module
try:
import cybagoa
except ImportError:
cybagoa = None
class TechInfo(object, metaclass=abc.ABCMeta):
"""A base class that create vias.
This class provides the API for making vias. Each process should subclass this class and
implement the make_via method.
Parameters
----------
res : float
the grid resolution of this technology.
layout_unit : float
the layout unit, in meters.
via_tech : string
the via technology library name. This is usually the PDK library name.
process_params : dict[str, any]
process specific parameters.
Attributes
----------
tech_params : dict[str, any]
technology specific parameters.
"""
def __init__(self, res, layout_unit, via_tech, process_params):
self._resolution = res
self._layout_unit = layout_unit
self._via_tech = via_tech
self.tech_params = process_params
@abc.abstractmethod
def get_well_layers(self, sub_type):
# type: (str) -> List[Tuple[str, str]]
"""Returns a list of well layers associated with the given substrate type."""
return []
@abc.abstractmethod
def get_implant_layers(self, mos_type, res_type=None):
# type: (str, Optional[str]) -> List[Tuple[str, str]]
"""Returns a list of implant layers associated with the given device type.
Parameters
----------
mos_type : str
one of 'nch', 'pch', 'ntap', or 'ptap'
res_type : Optional[str]
If given, the return layers will be for the substrate of the given resistor type.
Returns
-------
imp_list : List[Tuple[str, str]]
list of implant layers.
"""
return []
@abc.abstractmethod
def get_threshold_layers(self, mos_type, threshold, res_type=None):
# type: (str, str, Optional[str]) -> List[Tuple[str, str]]
"""Returns a list of threshold layers."""
return []
@abc.abstractmethod
def get_exclude_layer(self, layer_id):
# type: (int) -> Tuple[str, str]
"""Returns the metal exclude layer"""
return '', ''
@abc.abstractmethod
def get_dnw_margin_unit(self, dnw_mode):
# type: (str) -> int
"""Returns the required DNW margin given the DNW mode.
Parameters
----------
dnw_mode : str
the DNW mode string.
Returns
-------
dnw_margin : int
the DNW margin in resolution units.
"""
return 0
@abc.abstractmethod
def get_dnw_layers(self):
# type: () -> List[Tuple[str, str]]
"""Returns a list of layers that defines DNW.
Returns
-------
lay_list : List[Tuple[str, str]]
list of DNW layers.
"""
return []
@abc.abstractmethod
def get_res_metal_layers(self, layer_id):
# type: (int) -> List[Tuple[str, str]]
"""Returns a list of layers associated with the given metal resistor.
Parameters
----------
layer_id : int
the metal layer ID.
Returns
-------
res_list : List[Tuple[str, str]]
list of resistor layers.
"""
return []
@abc.abstractmethod
def get_metal_dummy_layers(self, layer_id):
# type: (int) -> List[Tuple[str, str]]
"""Returns a list of layers associated with the given metal dummy layers.
Parameters
----------
layer_id : int
the metal layer ID.
Returns
-------
res_list : List[Tuple[str, str]]
list of metal dummy layers.
"""
return []
@abc.abstractmethod
def add_cell_boundary(self, template, box):
"""Adds a cell boundary object to the given template.
This is usually the PR boundary.
Parameters
----------
template : TemplateBase
the template to draw the cell boundary in.
box : BBox
the cell boundary bounding box.
"""
pass
@abc.abstractmethod
def draw_device_blockage(self, template):
"""Draw device blockage layers on the given template.
Parameters
----------
template : TemplateBase
the template to draw the device block layers on
"""
pass
@abc.abstractmethod
def get_via_drc_info(self, vname, vtype, mtype, mw_unit, is_bot):
"""Return data structures used to identify VIA DRC rules.
Parameters
----------
vname : string
the via type name.
vtype : string
the via type, square/hrect/vrect/etc.
mtype : string
name of the metal layer via is connecting. Can be either top or bottom.
mw_unit : int
width of the metal, in resolution units.
is_bot : bool
True if the given metal is the bottom metal.
Returns
-------
sp : Tuple[int, int]
horizontal/vertical space between adjacent vias, in resolution units.
sp2_list : List[Tuple[int, int]] or None
horizontal/vertical space between adjacent vias if the via has 2 or more neighbors.
None if no constraint.
sp3_list : List[Tuple[int, int]] or None
horizontal/vertical space between adjacent vias if the via has 3 or more neighbors.
None if no constraint.
sp6_list : List[Tuple[int, int]] or None
horizontal/vertical space between adjacent vias if the via has 6 or more neighbors.
None if no constraint.
dim : Tuple[int, int]
the via width/height in resolution units.
enc : List[Tuple[int, int]]
a list of valid horizontal/vertical enclosure of the via on the given metal
layer, in resolution units.
arr_enc : List[Tuple[int, int]] or None
a list of valid horizontal/vertical enclosure of the via on the given metal
layer if this is a "via array", in layout units.
None if no constraint.
arr_test : callable or None
a function that accepts two inputs, the number of via rows and number of via
columns, and returns True if those numbers describe a "via array".
None if no constraint.
"""
return (0, 0), [(0, 0)], [(0, 0)], [(0, 0)], (0, 0), [(0, 0)], None, None
@abc.abstractmethod
def get_min_space(self, layer_type, width, unit_mode=False, same_color=False):
"""Returns the minimum spacing needed around a wire on the given layer with the given width.
Parameters
----------
layer_type : str
the wiring layer type.
width : Union[float, int]
the width of the wire, in layout units.
unit_mode : bool
True if dimension are given/returned in resolution units.
same_color : bool
True to use same-color spacing.
Returns
-------
sp : Union[float, int]
the minimum spacing needed.
"""
return 0.0
@abc.abstractmethod
def get_min_line_end_space(self, layer_type, width, unit_mode=False):
"""Returns the minimum line-end spacing of a wire with given width.
Parameters
----------
layer_type : str
the wiring layer type.
width : Union[float, int]
the width of the wire, in layout units.
unit_mode : bool
True if dimension are given/returned in resolution units.
Returns
-------
sp : Union[float, int]
the minimum line-end space.
"""
return 0.0
@abc.abstractmethod
def get_min_length(self, layer_type, width):
# type: (str, float) -> float
"""Returns the minimum length of a wire on the given layer with the given width.
Parameters
----------
layer_type : str
the wiring layer type.
width : float
the width of the wire, in layout units.
Returns
-------
min_length : float
the minimum length.
"""
return 0.0
@abc.abstractmethod
def get_layer_id(self, layer_name):
"""Return the layer id for the given layer name.
Parameters
----------
layer_name : string
the layer name.
Returns
-------
layer_id : int
the layer ID.
"""
return 0
@abc.abstractmethod
def get_layer_name(self, layer_id):
"""Return the layer name(s) for the given routing grid layer ID.
Parameters
----------
layer_id : int
the routing grid layer ID.
Returns
-------
name : string or Tuple[string]
name of the layer. Returns a tuple of names if this is a double
patterning layer.
"""
return ''
@abc.abstractmethod
def get_layer_type(self, layer_name):
"""Returns the metal type of the given wiring layer.
Parameters
----------
layer_name : str
the wiring layer name.
Returns
-------
metal_type : string
the metal layer type.
"""
return ''
@abc.abstractmethod
def get_via_name(self, bot_layer_id):
"""Returns the via type name of the given via.
Parameters
----------
bot_layer_id : int
the via bottom layer ID
Returns
-------
name : string
the via type name.
"""
return ''
@abc.abstractmethod
def get_metal_em_specs(self, layer_name, w, l=-1, vertical=False, **kwargs):
"""Returns a tuple of EM current/resistance specs of the given wire.
Parameters
----------
layer_name : str
the metal layer name.
w : float
the width of the metal in layout units (dimension perpendicular to current flow).
l : float
the length of the metal in layout units (dimension parallel to current flow).
If negative, disable length enhancement.
vertical : bool
True to compute vertical current.
**kwargs :
optional EM specs parameters.
Returns
-------
idc : float
maximum DC current, in Amperes.
iac_rms : float
maximum AC RMS current, in Amperes.
iac_peak : float
maximum AC peak current, in Amperes.
"""
return float('inf'), float('inf'), float('inf')
@abc.abstractmethod
def get_via_em_specs(self, via_name, # type: str
bm_layer, # type: str
tm_layer, # type: str
via_type='square', # type: str
bm_dim=(-1, -1), # type: Tuple[float, float]
tm_dim=(-1, -1), # type: Tuple[float, float]
array=False, # type: bool
**kwargs):
# type: (...) -> Tuple[float ,float, float]
"""Returns a tuple of EM current/resistance specs of the given via.
Parameters
----------
via_name : str
the via type name.
bm_layer : str
the bottom layer name.
tm_layer : str
the top layer name.
via_type : str
the via type, square/vrect/hrect/etc.
bm_dim : Tuple[float, float]
bottom layer metal width/length in layout units. If negative,
disable length/width enhancement.
tm_dim : Tuple[float, float]
top layer metal width/length in layout units. If negative,
disable length/width enhancement.
array : bool
True if this via is in a via array.
**kwargs :
optional EM specs parameters.
Returns
-------
idc : float
maximum DC current per via, in Amperes.
iac_rms : float
maximum AC RMS current per via, in Amperes.
iac_peak : float
maximum AC peak current per via, in Amperes.
"""
return float('inf'), float('inf'), float('inf')
@abc.abstractmethod
def get_res_rsquare(self, res_type):
"""Returns R-square for the given resistor type.
This is used to do some approximate resistor dimension calculation.
Parameters
----------
res_type : string
the resistor type.
Returns
-------
rsquare : float
resistance in Ohms per unit square of the given resistor type.
"""
return 0.0
@abc.abstractmethod
def get_res_width_bounds(self, res_type):
"""Returns the maximum and minimum resistor width for the given resistor type.
Parameters
----------
res_type : string
the resistor type.
Returns
-------
wmin : float
minimum resistor width, in layout units.
wmax : float
maximum resistor width, in layout units.
"""
return 0.0, 0.0
@abc.abstractmethod
def get_res_length_bounds(self, res_type):
"""Returns the maximum and minimum resistor length for the given resistor type.
Parameters
----------
res_type : string
the resistor type.
Returns
-------
lmin : float
minimum resistor length, in layout units.
lmax : float
maximum resistor length, in layout units.
"""
return 0.0, 0.0
@abc.abstractmethod
def get_res_min_nsquare(self, res_type):
"""Returns the minimum allowable number of squares for the given resistor type.
Parameters
----------
res_type : string
the resistor type.
Returns
-------
nsq_min : flaot
minimum number of squares needed.
"""
return 1.0
@abc.abstractmethod
def get_res_em_specs(self, res_type, w, l=-1, **kwargs):
# type: (str, float, float, **Any) -> Tuple[float, float, float]
"""Returns a tuple of EM current/resistance specs of the given resistor.
Parameters
----------
res_type : string
the resistor type string.
w : float
the width of the metal in layout units (dimension perpendicular to current flow).
l : float
the length of the metal in layout units (dimension parallel to current flow).
If negative, disable length enhancement.
**kwargs : Any
optional EM specs parameters.
Returns
-------
idc : float
maximum DC current, in Amperes.
iac_rms : float
maximum AC RMS current, in Amperes.
iac_peak : float
maximum AC peak current, in Amperes.
"""
return float('inf'), float('inf'), float('inf')
@property
def via_tech_name(self):
"""Returns the via technology library name."""
return self._via_tech
@property
def pin_purpose(self):
"""Returns the layout pin purpose name."""
return 'pin'
@property
def resolution(self):
"""Returns the grid resolution."""
return self._resolution
@property
def layout_unit(self):
"""Returns the layout unit length, in meters."""
return self._layout_unit
def merge_well(self, template, inst_list, sub_type, threshold=None, res_type=None,
merge_imp=False):
# type: ('TemplateBase', List[Instance], str, Optional[str], Optional[str], bool) -> None
"""Merge the well of the given instances together."""
if threshold is not None:
lay_iter = chain(self.get_well_layers(sub_type),
self.get_threshold_layers(sub_type, threshold, res_type=res_type))
else:
lay_iter = self.get_well_layers(sub_type)
if merge_imp:
lay_iter = chain(lay_iter, self.get_implant_layers(sub_type, res_type=res_type))
for lay in lay_iter:
tot_box = BBox.get_invalid_bbox()
for inst in inst_list:
cur_box = inst.master.get_rect_bbox(lay)
tot_box = tot_box.merge(inst.translate_master_box(cur_box))
if tot_box.is_physical():
template.add_rect(lay, tot_box)
def use_flip_parity(self):
# type: () -> bool
"""Returns True if flip_parity dictionary is needed in this technology."""
return True
def finalize_template(self, template):
"""Perform any operations necessary on the given layout template before finalizing it.
By default, nothing is done.
Parameters
----------
template : TemplateBase
the template object.
"""
pass
def get_res_info(self, res_type, w, l, **kwargs):
"""Returns a dictionary containing EM information of the given resistor.
Parameters
----------
res_type : string or (string, string)
the resistor type.
w : float
the resistor width in layout units (dimension perpendicular to current flow).
l : float
the resistor length in layout units (dimension parallel to current flow).
**kwargs :
optional parameters for EM rule calculations, such as nominal temperature,
AC rms delta-T, etc.
Returns
-------
info : dict[string, any]
A dictionary of wire information. Should have the following:
resistance : float
The resistance, in Ohms.
idc : float
The maximum allowable DC current, in Amperes.
iac_rms : float
The maximum allowable AC RMS current, in Amperes.
iac_peak : float
The maximum allowable AC peak current, in Amperes.
"""
rsq = self.get_res_rsquare(res_type)
res = l / w * rsq
idc, irms, ipeak = self.get_res_em_specs(res_type, w, l=l, **kwargs)
return dict(
resistance=res,
idc=idc,
iac_rms=irms,
iac_peak=ipeak,
)
def get_via_types(self, bmtype, tmtype):
return [('square', 1), ('vrect', 2), ('hrect', 2)]
def get_best_via_array(self, vname, bmtype, tmtype, bot_dir, top_dir, w, h, extend):
"""Maximize the number of vias in the given bounding box.
Parameters
----------
vname : str
the via type name.
bmtype : str
the bottom metal type name.
tmtype : str
the top metal type name.
bot_dir : str
the bottom wire direction. Either 'x' or 'y'.
top_dir : str
the top wire direction. Either 'x' or 'y'.
w : float
width of the via array bounding box, in layout units.
h : float
height of the via array bounding box, in layout units.
extend : bool
True if via can extend beyond bounding box.
Returns
-------
best_nxy : Tuple[int, int]
optimal number of vias per row/column.
best_mdim_list : List[Tuple[int, int]]
a list of bottom/top layer width/height, in resolution units.
vtype : str
the via type to draw, square/hrect/vrect/etc.
vdim : Tuple[int, int]
the via width/height, in resolution units.
via_space : Tuple[int, int]
the via horizontal/vertical spacing, in resolution units.
via_arr_dim : Tuple[int, int]
the via array width/height, in resolution units.
"""
# This entire optimization routine relies on the bounding box being measured integer units
res = self._resolution
w = int(round(w / res))
h = int(round(h / res))
# Depending on the routing direction of the metal, the provided width/height of the
# bounding box may correspond to either the x direction or y direction.
if bot_dir == 'x':
bb, be = h, w
else:
bb, be = w, h
if top_dir == 'x':
tb, te = h, w
else:
tb, te = w, h
# Initialize variables that will hold optimal via size at the end of the algorithm
best_num = None
best_nxy = [-1, -1]
best_mdim_list = None
best_type = None
best_vdim = None
best_sp = None
best_adim = None
# Perform via optimization algorithm for all available via types. Some technologies have
# both square and rectangular via types, which can be used in different situations. Each
# via_type has a weight which signifies a preference for choosing one type over another
via_type_list = self.get_via_types(bmtype, tmtype)
for vtype, weight in via_type_list:
# Extract via drc information from the loaded tech yaml file. Some drc info is optional
# so catch ValueErrors from missing info and move on
try:
# get space and enclosure rules for top and bottom layer
bot_drc_info = self.get_via_drc_info(vname, vtype, bmtype, bb, True)
top_drc_info = self.get_via_drc_info(vname, vtype, tmtype, tb, False)
sp, sp2_list, sp3_list, sp6_list, dim, encb, arr_encb, arr_testb = bot_drc_info
_, _, _, _, _, enct, arr_enct, arr_testt = top_drc_info
except ValueError:
continue
# optional sp2/sp3 rules enable different spacing rules for via arrays with 2 or 3 neighbors
if sp2_list is None:
sp2_list = [sp]
if sp3_list is None:
sp3_list = sp2_list
if sp6_list is None:
sp6_list = sp3_list
# Get minimum possible spacing between vias
spx_min, spy_min = sp
for high_sp_list in (sp2_list, sp3_list, sp6_list):
for high_spx, high_spy in high_sp_list:
spx_min = min(spx_min, high_spx)
spy_min = min(spy_min, high_spy)
# Get minimum possible enclosure size for top or bottom layers
extx = 0
exty = 0
for enc in chain(encb, enct):
extx = min(extx, enc[0])
exty = min(exty, enc[1])
# Allocate area in the bounding box for minimum enclosure, then find
# maximum number of vias that can fit in the remaining area with the minimum spacing
if np.isinf(spx_min):
nx_max = 1 if (w - 2 * extx) // dim[0] else 0
else:
nx_max = (w + spx_min - 2 * extx) // (dim[0] + spx_min)
if np.isinf(spy_min):
ny_max = 1 if (h - 2 * exty) // dim[1] else 0
else:
ny_max = (h + spy_min - 2 * exty) // (dim[1] + spy_min)
# Theoretically any combination of via array size from (1, 1) to (nx_max, ny_max) may actually
# work within the given bound box. Here we enumerate a list all of these possible via combinations
# starting from the max via number
nxy_list = [(a * b, a, b) for a in range(1, nx_max + 1) for b in range(1, ny_max + 1)]
nxy_list = sorted(nxy_list, reverse=True)
# Initialize variables that will hold the best working via array size for this via type
opt_nxy = None
opt_mdim_list = None
opt_adim = None
opt_sp = None
# This looping procedure will iterate over all possible via array configurations and select
# one that maximizes the number of vias while meeting all rules
for num, nx, ny in nxy_list:
# Determine whether we should be using sp/sp2/sp3 rules for the current via configuration
if (nx == 1 and ny >= 1) or (nx >= 1 and ny == 1):
sp_combo = [sp]
elif nx == 2 and ny == 2:
sp_combo = sp2_list
elif nx >= 6 and ny >= 6:
sp_combo = sp6_list
else:
sp_combo = sp3_list
# DRC rules can typically be satisfied with a number of different spacing rules, so here we
# iterate over each to find the best one. Note that since we break out of the loop immediately upon
# finding a valid via configuration, this code prioritizes spacing rules that are early on in the list
for spx, spy in sp_combo:
# Compute a bounding box for the via array without the enclosure
w_arr = dim[0] if nx == 1 else nx * (spx + dim[0]) - spx
h_arr = dim[1] if ny == 1 else ny * (spy + dim[1]) - spy
mdim_list = [None, None]
# Loop over all possible enclosure types and check whether this via configuration satisfies
# one of them for both the bottom metal and top metal
for idx, (mdir, tot_enc_list, arr_enc, arr_test) in \
enumerate([(bot_dir, encb, arr_encb, arr_testb),
(top_dir, enct, arr_enct, arr_testt)]):
# arr_test is a function that takes an array size as input and returns a boolean. If its
# is true the array size is valid and is added to the list of valid enclosures
if arr_test is not None and arr_test(ny, nx):
tot_enc_list = tot_enc_list + arr_enc
# If the routing direction is y, start by computing x-direction enclosure. ext_dim
# corresponds to x-direction. Vice-versa if the routing direction is x
if mdir == 'y':
enc_idx = 0
enc_dim = w_arr
ext_dim = h_arr
dim_lim = w
max_ext_dim = h
else:
enc_idx = 1
enc_dim = h_arr
ext_dim = w_arr
dim_lim = h
max_ext_dim = w
# Initialize variable to hold opposite direction enclosure size
min_ext_dim = None
# This loop selects the minimum opposite direction size that satisfies the enclosure
# rules
for enc in tot_enc_list:
cur_ext_dim = ext_dim + 2 * enc[1 - enc_idx]
# Check that the enclosure rule is satisfied. If extend is true, this passing enclosure
# size can exceed the maximum size set by the user provided bounding box
if (enc[enc_idx] * 2 + enc_dim <= dim_lim) and (extend or cur_ext_dim <= max_ext_dim):
# Select the minimum of all enclosures in the non-routing direction that satisfies
# the enclosure rules
if min_ext_dim is None or min_ext_dim > cur_ext_dim:
min_ext_dim = cur_ext_dim
# If none of the enclosures in the list meet the rules, the current spacing rules cannot
# be used to create a valid via, so we continue on to the next set of spacing rules
if min_ext_dim is None:
break
# Otherwise record the computed via dimensions that pass all checks
else:
min_ext_dim = max(min_ext_dim, max_ext_dim)
mdim_list[idx] = [min_ext_dim, min_ext_dim]
mdim_list[idx][enc_idx] = dim_lim
# If we've found a valid via configuration immediately break out of the loop
if mdim_list[0] is not None and mdim_list[1] is not None:
# passed
opt_mdim_list = mdim_list
opt_nxy = (nx, ny)
opt_adim = (w_arr, h_arr)
opt_sp = (spx, spy)
break
# If we've found a valid via array size immediately break out of the loop
if opt_nxy is not None:
break
# Select the best via out of all the passing via types. Vias are selected by choosing the
# highest 'best_num'. This is calculated by multiplying the via array size by the via weight
# Ties between vias are broken by minimizing drawn via area
if opt_nxy is not None:
opt_num = weight * opt_nxy[0] * opt_nxy[1]
if (best_num is None or opt_num > best_num or
(opt_num == best_num and self._via_better(best_mdim_list, opt_mdim_list))):
best_num = opt_num
best_nxy = opt_nxy
best_mdim_list = opt_mdim_list
best_type = vtype
best_vdim = dim
best_sp = opt_sp
best_adim = opt_adim
if best_num is None:
return None
return best_nxy, best_mdim_list, best_type, best_vdim, best_sp, best_adim
def _via_better(self, mdim_list1, mdim_list2):
"""Returns true if the via in mdim_list1 has smaller area compared with via in mdim_list2"""
res = self._resolution
better = False
for mdim1, mdim2 in zip(mdim_list1, mdim_list2):
area1 = int(round(mdim1[0] / res)) * int(round(mdim1[1] / res))
area2 = int(round(mdim2[0] / res)) * int(round(mdim2[1] / res))
if area1 < area2:
better = True
elif area1 > area2:
return False
return better
# noinspection PyMethodMayBeStatic
def get_via_id(self, bot_layer, top_layer):
"""Returns the via ID string given bottom and top layer name.
Defaults to "_"
Parameters
----------
bot_layer : string
the bottom layer name.
top_layer : string
the top layer name.
Returns
-------
via_id : string
the via ID string.
"""
return '%s_%s' % (top_layer, bot_layer)
def get_via_info(self, bbox, bot_layer, top_layer, bot_dir, bot_len=-1, top_len=-1,
extend=True, top_dir=None, **kwargs):
"""Create a via on the routing grid given the bounding box.
Parameters
----------
bbox : ..layout.util.BBox
the bounding box of the via.
bot_layer : Union[str, Tuple[str, str]]
the bottom layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
top_layer : Union[str, Tuple[str, str]]
the top layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
bot_dir : str
the bottom layer extension direction. Either 'x' or 'y'
bot_len : float
length of bottom wire connected to this Via, in layout units.
Used for length enhancement EM calculation.
top_len : float
length of top wire connected to this Via, in layout units.
Used for length enhancement EM calculation.
extend : bool
True if via extension can be drawn outside of bounding box.
top_dir : Optional[str]
top layer extension direction. Can force to extend in same direction as bottom.
**kwargs :
optional parameters for EM rule calculations, such as nominal temperature,
AC rms delta-T, etc.
Returns
-------
info : dict[string, any]
A dictionary of via information, or None if no solution. Should have the following:
resistance : float
The total via array resistance, in Ohms.
idc : float
The total via array maximum allowable DC current, in Amperes.
iac_rms : float
The total via array maximum allowable AC RMS current, in Amperes.
iac_peak : float
The total via array maximum allowable AC peak current, in Amperes.
params : dict[str, any]
A dictionary of via parameters.
top_box : ..layout.util.BBox
the top via layer bounding box, including extensions.
bot_box : ..layout.util.BBox
the bottom via layer bounding box, including extensions.
"""
# remove purpose
if isinstance(bot_layer, tuple):
bot_layer = bot_layer[0]
if isinstance(top_layer, tuple):
top_layer = top_layer[0]
bot_layer = io.fix_string(bot_layer)
top_layer = io.fix_string(top_layer)
bot_id = self.get_layer_id(bot_layer)
bmtype = self.get_layer_type(bot_layer)
tmtype = self.get_layer_type(top_layer)
vname = self.get_via_name(bot_id)
if not top_dir:
top_dir = 'x' if bot_dir == 'y' else 'y'
via_result = self.get_best_via_array(vname, bmtype, tmtype, bot_dir, top_dir,
bbox.width, bbox.height, extend)
if via_result is None:
# no solution found
return None
(nx, ny), mdim_list, vtype, vdim, (spx, spy), (warr_norm, harr_norm) = via_result
res = self.resolution
xc_norm = bbox.xc_unit
yc_norm = bbox.yc_unit
wbot_norm = mdim_list[0][0]
hbot_norm = mdim_list[0][1]
wtop_norm = mdim_list[1][0]
htop_norm = mdim_list[1][1]
# OpenAccess Via can't handle even + odd enclosure, so we truncate.
enc1_x = (wbot_norm - warr_norm) // 2 * res
enc1_y = (hbot_norm - harr_norm) // 2 * res
enc2_x = (wtop_norm - warr_norm) // 2 * res
enc2_y = (htop_norm - harr_norm) // 2 * res
# compute EM rule dimensions
if bot_dir == 'x':
bw, tw = hbot_norm * res, wtop_norm * res
else:
bw, tw = wbot_norm * res, htop_norm * res
bot_xl_norm = xc_norm - wbot_norm // 2
bot_yb_norm = yc_norm - hbot_norm // 2
top_xl_norm = xc_norm - wtop_norm // 2
top_yb_norm = yc_norm - htop_norm // 2
bot_box = BBox(bot_xl_norm, bot_yb_norm, bot_xl_norm + wbot_norm,
bot_yb_norm + hbot_norm, res, unit_mode=True)
top_box = BBox(top_xl_norm, top_yb_norm, top_xl_norm + wtop_norm,
top_yb_norm + htop_norm, res, unit_mode=True)
idc, irms, ipeak = self.get_via_em_specs(vname, bot_layer, top_layer, via_type=vtype,
bm_dim=(bw, bot_len), tm_dim=(tw, top_len),
array=nx > 1 or ny > 1, **kwargs)
params = {'id': self.get_via_id(bot_layer, top_layer),
'loc': (xc_norm * res, yc_norm * res),
'orient': 'R0',
'num_rows': ny,
'num_cols': nx,
'sp_rows': spy * res,
'sp_cols': spx * res,
# increase left/bottom enclusion if off-center.
'enc1': [enc1_x, enc1_x, enc1_y, enc1_y],
'enc2': [enc2_x, enc2_x, enc2_y, enc2_y],
'cut_width': vdim[0] * res,
'cut_height': vdim[1] * res,
}
ntot = nx * ny
return dict(
resistance=0.0,
idc=idc * ntot,
iac_rms=irms * ntot,
iac_peak=ipeak * ntot,
params=params,
top_box=top_box,
bot_box=bot_box,
)
def design_resistor(self, res_type, res_targ, idc=0.0, iac_rms=0.0,
iac_peak=0.0, num_even=True, **kwargs):
"""Finds the optimal resistor dimension that meets the given specs.
Assumes resistor length does not effect EM specs.
Parameters
----------
res_type : string
the resistor type.
res_targ : float
target resistor, in Ohms.
idc : float
maximum DC current spec, in Amperes.
iac_rms : float
maximum AC RMS current spec, in Amperes.
iac_peak : float
maximum AC peak current spec, in Amperes.
num_even : int
True to return even number of resistors.
**kwargs :
optional EM spec calculation parameters.
Returns
-------
num_par : int
number of resistors needed in parallel.
num_ser : int
number of resistors needed in series.
w : float
width of a unit resistor, in meters.
l : float
length of a unit resistor, in meters.
"""
resolution = self.resolution
rsq = self.get_res_rsquare(res_type)
wmin, wmax = self.get_res_width_bounds(res_type)
lmin, lmax = self.get_res_length_bounds(res_type)
min_nsq = self.get_res_min_nsquare(res_type)
wmin_unit = int(round(wmin / resolution))
wmax_unit = int(round(wmax / resolution))
lmin_unit = int(round(lmin / resolution))
lmax_unit = int(round(lmax / resolution))
# make sure width is always even
wmin_unit = -2 * (-wmin_unit // 2)
wmax_unit = 2 * (wmax_unit // 2)
# step 1: find number of parallel resistors and minimum resistor width.
if num_even:
npar_iter = BinaryIterator(2, None, step=2)
else:
npar_iter = BinaryIterator(1, None, step=1)
while npar_iter.has_next():
npar = npar_iter.get_next()
res_targ_par = res_targ * npar
idc_par = idc / npar
iac_rms_par = iac_rms / npar
iac_peak_par = iac_peak / npar
res_idc, res_irms, res_ipeak = self.get_res_em_specs(res_type, wmax, **kwargs)
if (0.0 < res_idc < idc_par or 0.0 < res_irms < iac_rms_par or
0.0 < res_ipeak < iac_peak_par):
npar_iter.up()
else:
# This could potentially work, find width solution
w_iter = BinaryIterator(wmin_unit, wmax_unit + 1, step=2)
while w_iter.has_next():
wcur_unit = w_iter.get_next()
lcur_unit = int(math.ceil(res_targ_par / rsq * wcur_unit))
if lcur_unit < max(lmin_unit, int(math.ceil(min_nsq * wcur_unit))):
w_iter.down()
else:
tmp = self.get_res_em_specs(res_type, wcur_unit * resolution,
l=lcur_unit * resolution, **kwargs)
res_idc, res_irms, res_ipeak = tmp
if (0.0 < res_idc < idc_par or 0.0 < res_irms < iac_rms_par or
0.0 < res_ipeak < iac_peak_par):
w_iter.up()
else:
w_iter.save_info((wcur_unit, lcur_unit))
w_iter.down()
w_info = w_iter.get_last_save_info()
if w_info is None:
# no solution; we need more parallel resistors
npar_iter.up()
else:
# solution!
npar_iter.save_info((npar, w_info[0], w_info[1]))
npar_iter.down()
# step 3: fix maximum length violation by having resistor in series.
num_par, wopt_unit, lopt_unit = npar_iter.get_last_save_info()
wopt = wopt_unit * resolution
if lopt_unit > lmax_unit:
num_ser = -(-lopt_unit // lmax_unit)
lopt = round(lopt_unit / num_ser / resolution) * resolution
else:
num_ser = 1
lopt = lopt_unit * resolution
# step 4: return answer
return num_par, num_ser, wopt * self.layout_unit, lopt * self.layout_unit
class DummyTechInfo(TechInfo):
"""A dummy TechInfo class.
Parameters
----------
tech_params : dict[str, any]
technology parameters dictionary.
"""
def __init__(self, tech_params):
TechInfo.__init__(self, 0.001, 1e-6, '', tech_params)
def get_well_layers(self, sub_type):
return []
def get_implant_layers(self, mos_type, res_type=None):
return []
def get_threshold_layers(self, mos_type, threshold, res_type=None):
return []
def get_dnw_layers(self):
# type: () -> List[Tuple[str, str]]
return []
def get_exclude_layer(self, layer_id):
# type: (int) -> Tuple[str, str]
"""Returns the metal exclude layer"""
return '', ''
def get_dnw_margin_unit(self, dnw_mode):
# type: (str) -> int
return 0
def get_res_metal_layers(self, layer_id):
# type: (int) -> List[Tuple[str, str]]
return []
def get_metal_dummy_layers(self, layer_id):
# type: (int) -> List[Tuple[str, str]]
return []
def add_cell_boundary(self, template, box):
pass
def draw_device_blockage(self, template):
pass
def get_via_drc_info(self, vname, vtype, mtype, mw_unit, is_bot):
return (0, 0), [(0, 0)], [(0, 0)], [(0, 0)], (0, 0), [(0, 0)], None, None
def get_min_space(self, layer_type, width, unit_mode=False, same_color=False):
return 0
def get_min_line_end_space(self, layer_type, width, unit_mode=False):
return 0
def get_min_length(self, layer_type, width):
return 0.0
def get_layer_id(self, layer_name):
return -1
def get_layer_name(self, layer_id):
return ''
def get_layer_type(self, layer_name):
return ''
def get_via_name(self, bot_layer_id):
return ''
def get_metal_em_specs(self, layer_name, w, l=-1, vertical=False, **kwargs):
return float('inf'), float('inf'), float('inf')
def get_via_em_specs(self, via_name, bm_layer, tm_layer, via_type='square',
bm_dim=(-1, -1), tm_dim=(-1, -1), array=False, **kwargs):
return float('inf'), float('inf'), float('inf')
def get_res_rsquare(self, res_type):
return 0.0
def get_res_width_bounds(self, res_type):
return 0.0, 0.0
def get_res_length_bounds(self, res_type):
return 0.0, 0.0
def get_res_min_nsquare(self, res_type):
return 1.0
def get_res_em_specs(self, res_type, w, l=-1, **kwargs):
return float('inf'), float('inf'), float('inf')
class BagLayout(object):
"""This class contains layout information of a cell.
Parameters
----------
grid : :class:`..layout.routing.RoutingGrid`
the routing grid instance.
use_cybagoa : bool
True to use cybagoa package to accelerate layout.
"""
def __init__(self, grid, use_cybagoa=False):
self._res = grid.resolution
self._via_tech = grid.tech_info.via_tech_name
self._pin_purpose = grid.tech_info.pin_purpose
self._make_pin_rect = True
self._inst_list = [] # type: List[Instance]
self._inst_primitives = [] # type: List[InstanceInfo]
self._rect_list = [] # type: List[Rect]
self._via_list = [] # type: List[Via]
self._via_primitives = [] # type: List[ViaInfo]
self._pin_list = [] # type: List[PinInfo]
self._path_list = [] # type: List[Path]
self._polygon_list = [] # type: List[Polygon]
self._blockage_list = [] # type: List[Blockage]
self._boundary_list = [] # type: List[Boundary]
self._used_inst_names = set()
self._used_pin_names = set()
self._raw_content = None
self._is_empty = True
self._finalized = False
self._use_cybagoa = use_cybagoa
@property
def pin_purpose(self):
"""Returns the default pin layer purpose name."""
return self._pin_purpose
@property
def is_empty(self):
"""Returns True if this layout is empty."""
return self._is_empty
def inst_iter(self):
# type: () -> Iterator[Instance]
return iter(self._inst_list)
def finalize(self):
# type: () -> None
"""Prevents any further changes to this layout.
"""
self._finalized = True
# get rectangles
rect_list = []
for obj in self._rect_list:
if obj.valid:
if not obj.bbox.is_physical():
print('WARNING: rectangle with non-physical bounding box found.', obj.layer)
else:
obj_content = obj.content
rect_list.append(obj_content)
# filter out invalid geometries
path_list, polygon_list, blockage_list, boundary_list, via_list = [], [], [], [], []
for targ_list, obj_list in ((path_list, self._path_list),
(polygon_list, self._polygon_list),
(blockage_list, self._blockage_list),
(boundary_list, self._boundary_list),
(via_list, self._via_list)):
for obj in obj_list:
if obj.valid:
targ_list.append(obj.content)
# get via primitives
via_list.extend(self._via_primitives)
# get instances
inst_list = [] # type: List[InstanceInfo]
for obj in self._inst_list:
if obj.valid:
obj_content = self._format_inst(obj)
inst_list.append(obj_content)
self._raw_content = [inst_list,
self._inst_primitives,
rect_list,
via_list,
self._pin_list,
path_list,
blockage_list,
boundary_list,
polygon_list,
]
if (not inst_list and not self._inst_primitives and not rect_list and not blockage_list and
not boundary_list and not via_list and not self._pin_list and not path_list and
not polygon_list):
self._is_empty = True
else:
self._is_empty = False
def get_rect_bbox(self, layer):
# type: (Union[str, Tuple[str, str]]) -> BBox
"""Returns the overall bounding box of all rectangles on the given layer.
Note: currently this does not check primitive instances or vias.
"""
if isinstance(layer, str):
layer = (layer, 'drawing')
box = BBox.get_invalid_bbox()
for rect in self._rect_list:
if layer == rect.layer:
box = box.merge(rect.bbox_array.get_overall_bbox())
for inst in self._inst_list:
box = box.merge(inst.get_rect_bbox(layer))
return box
def get_masters_set(self):
"""Returns a set of all template master keys used in this layout."""
return set((inst.master.key for inst in self._inst_list))
def _get_unused_inst_name(self, inst_name):
"""Returns a new inst name."""
if inst_name is None or inst_name in self._used_inst_names:
cnt = 0
inst_name = 'X%d' % cnt
while inst_name in self._used_inst_names:
cnt += 1
inst_name = 'X%d' % cnt
return inst_name
def _format_inst(self, inst):
# type: (Instance) -> InstanceInfo
"""Convert the given instance into dictionary representation."""
content = inst.content
inst_name = self._get_unused_inst_name(content.name)
content.name = inst_name
self._used_inst_names.add(inst_name)
return content
def get_content(self, # type: BagLayout
lib_name, # type: str
cell_name, # type: str
rename_fun, # type: Callable[[str], str]
):
# type: (...) -> Union[List[Any], Tuple[str, 'cybagoa.PyOALayout']]
"""returns a list describing geometries in this layout.
Parameters
----------
lib_name : str
the layout library name.
cell_name : str
the layout top level cell name.
rename_fun : Callable[[str], str]
the layout cell renaming function.
Returns
-------
content : Union[List[Any], Tuple[str, 'cybagoa.PyOALayout']]
a list describing this layout, or PyOALayout if cybagoa package is enabled.
"""
if not self._finalized:
raise Exception('Layout is not finalized.')
cell_name = rename_fun(cell_name)
(inst_list, inst_prim_list, rect_list, via_list, pin_list,
path_list, blockage_list, boundary_list, polygon_list) = self._raw_content
# update library name and apply layout cell renaming on instances
inst_tot_list = []
for inst in inst_list:
inst_temp = inst.copy()
inst_temp['lib'] = lib_name
inst_temp['cell'] = rename_fun(inst_temp['cell'])
inst_tot_list.append(inst_temp)
inst_tot_list.extend(inst_prim_list)
if self._use_cybagoa and cybagoa is not None:
encoding = io.get_encoding()
oa_layout = cybagoa.PyLayout(encoding)
for obj in inst_tot_list:
obj.pop('master_key', None)
oa_layout.add_inst(**obj)
for obj in rect_list:
oa_layout.add_rect(**obj)
for obj in via_list:
oa_layout.add_via(**obj)
for obj in pin_list:
oa_layout.add_pin(**obj)
for obj in path_list:
oa_layout.add_path(**obj)
for obj in blockage_list:
oa_layout.add_blockage(**obj)
for obj in boundary_list:
oa_layout.add_boundary(**obj)
for obj in polygon_list:
oa_layout.add_polygon(**obj)
return cell_name, oa_layout
else:
ans = [cell_name, inst_tot_list, rect_list, via_list, pin_list, path_list,
blockage_list, boundary_list, polygon_list]
return ans
def add_instance(self, instance):
"""Adds the given instance to this layout.
Parameters
----------
instance : ..layout.objects.Instance
the instance to add.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
# if isinstance(instance.nx, float) or isinstance(instance.ny, float):
# raise Exception('float nx/ny')
self._inst_list.append(instance)
def move_all_by(self, dx=0.0, dy=0.0, unit_mode=False):
# type: (Union[float, int], Union[float, int], bool) -> None
"""Move all layout objects in this layout by the given amount.
Parameters
----------
dx : Union[float, int]
the X shift.
dy : Union[float, int]
the Y shift.
unit_mode : bool
True if shift values are given in resolution units.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
for obj in chain(self._inst_list, self._inst_primitives, self._rect_list,
self._via_primitives, self._via_list, self._pin_list,
self._path_list, self._blockage_list, self._boundary_list,
self._polygon_list):
obj.move_by(dx=dx, dy=dy, unit_mode=unit_mode)
def add_instance_primitive(self, # type: BagLayout
lib_name, # type: str
cell_name, # type: str
loc, # type: Tuple[Union[float, int], Union[float, int]]
view_name='layout', # type: str
inst_name=None, # type: Optional[str]
orient="R0", # type: str
num_rows=1, # type: int
num_cols=1, # type: int
sp_rows=0, # type: Union[float, int]
sp_cols=0, # type: Union[float, int]
params=None, # type: Optional[Dict[str, Any]]
unit_mode=False, # type: bool
**kwargs
):
"""Adds a new (arrayed) primitive instance to this layout.
Parameters
----------
lib_name : str
instance library name.
cell_name : str
instance cell name.
loc : Tuple[Union[float, int], Union[float, int]]
instance location.
view_name : str
instance view name. Defaults to 'layout'.
inst_name : Optional[str]
instance name. If None or an instance with this name already exists,
a generated unique name is used.
orient : str
instance orientation. Defaults to "R0"
num_rows : int
number of rows. Must be positive integer.
num_cols : int
number of columns. Must be positive integer.
sp_rows : Union[float, int]
row spacing. Used for arraying given instance.
sp_cols : Union[float, int]
column spacing. Used for arraying given instance.
params : Optional[Dict[str, Any]]
the parameter dictionary. Used for adding pcell instance.
unit_mode : bool
True if distances are specified in resolution units.
**kwargs :
additional arguments. Usually implementation specific.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
res = self._res
if not unit_mode:
loc = [round(loc[0] / res) * res,
round(loc[1] / res) * res]
sp_rows = round(sp_rows / res) * res
sp_cols = round(sp_cols / res) * res
else:
loc = [loc[0] * res, loc[1] * res]
sp_rows *= res
sp_cols *= res
# get unique instance name
inst_name = self._get_unused_inst_name(inst_name)
self._used_inst_names.add(inst_name)
inst_info = InstanceInfo(self._res, lib=lib_name,
cell=cell_name,
view=view_name,
name=inst_name,
loc=loc,
orient=orient,
num_rows=num_rows,
num_cols=num_cols,
sp_rows=sp_rows,
sp_cols=sp_cols)
# if isinstance(num_rows, float) or isinstance(num_cols, float):
# raise Exception('float nx/ny')
if params is not None:
inst_info.params = params
inst_info.update(kwargs)
self._inst_primitives.append(inst_info)
def add_rect(self, rect):
"""Add a new (arrayed) rectangle.
Parameters
----------
rect : ..layout.objects.Rect
the rectangle object to add.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
self._rect_list.append(rect)
def add_path(self, path):
# type: (Path) -> None
"""Add a new path.
Parameters
----------
path : Path
the path object to add.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
self._path_list.append(path)
def add_polygon(self, polygon):
# type: (Polygon) -> None
"""Add a new polygon.
Parameters
----------
polygon : Polygon
the polygon object to add.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
self._polygon_list.append(polygon)
def add_blockage(self, blockage):
# type: (Blockage) -> None
"""Add a new blockage.
Parameters
----------
blockage : Blockage
the blockage object to add.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
self._blockage_list.append(blockage)
def add_boundary(self, boundary):
# type: (Boundary) -> None
"""Add a new boundary.
Parameters
----------
boundary : Boundary
the boundary object to add.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
self._boundary_list.append(boundary)
def add_via(self, via):
"""Add a new (arrayed) via.
Parameters
----------
via : ..layout.objects.Via
the via object to add.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
# if isinstance(via.nx, float) or isinstance(via.ny, float):
# raise Exception('float nx/ny')
self._via_list.append(via)
def add_via_primitive(self, via_type, loc, num_rows=1, num_cols=1, sp_rows=0.0, sp_cols=0.0,
enc1=None, enc2=None, orient='R0', cut_width=None, cut_height=None,
arr_nx=1, arr_ny=1, arr_spx=0.0, arr_spy=0.0):
"""Adds a primitive via by specifying all parameters.
Parameters
----------
via_type : str
the via type name.
loc : Tuple[float, float]
the via location as a two-element tuple.
num_rows : int
number of via cut rows.
num_cols : int
number of via cut columns.
sp_rows : float
spacing between via cut rows.
sp_cols : float
spacing between via cut columns.
enc1 : list[float]
a list of left, right, top, and bottom enclosure values on bottom layer.
Defaults to all 0.
enc2 : list[float]
a list of left, right, top, and bottom enclosure values on top layer.
Defaults. to all 0.
orient : str
orientation of the via.
cut_width : float or None
via cut width. This is used to create rectangle via.
cut_height : float or None
via cut height. This is used to create rectangle via.
arr_nx : int
number of columns.
arr_ny : int
number of rows.
arr_spx : float
column pitch.
arr_spy : float
row pitch.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
if arr_nx > 0 and arr_ny > 0:
if enc1 is None:
enc1 = [0.0, 0.0, 0.0, 0.0]
if enc2 is None:
enc2 = [0.0, 0.0, 0.0, 0.0]
# if isinstance(arr_nx, float) or isinstance(arr_ny, float):
# raise Exception('float nx/ny')
par = ViaInfo(self._res, id=via_type, loc=loc, orient=orient, num_rows=num_rows,
num_cols=num_cols,
sp_rows=sp_rows, sp_cols=sp_cols, enc1=enc1, enc2=enc2, )
if cut_width is not None:
par['cut_width'] = cut_width
if cut_height is not None:
par['cut_height'] = cut_height
if arr_nx > 1 or arr_ny > 1:
par['arr_nx'] = arr_nx
par['arr_ny'] = arr_ny
par['arr_spx'] = arr_spx
par['arr_spy'] = arr_spy
self._via_primitives.append(par)
def add_pin(self, net_name, layer, bbox, pin_name=None, label=None):
"""Add a new pin.
Parameters
----------
net_name : str
the net name associated with this pin.
layer : string or (string, string)
the layer name, or (layer, purpose) pair.
if purpose is not specified, defaults to 'pin'.
bbox : ..layout.util.BBox
the rectangle bounding box
pin_name : str or None
the pin name. If None or empty, auto-generate from net name.
label : str or None
the pin label text. If None or empty, will use net name as the text.
"""
if self._finalized:
raise Exception('Layout is already finalized.')
if isinstance(layer, bytes):
# interpret as unicode
layer = layer.decode('utf-8')
if isinstance(layer, str):
layer = (layer, self._pin_purpose)
else:
layer = layer[0], layer[1]
if not label:
label = net_name
pin_name = pin_name or net_name
idx = 1
while pin_name in self._used_pin_names:
pin_name = '%s_%d' % (net_name, idx)
idx += 1
par = PinInfo(self._res, net_name=net_name,
pin_name=pin_name,
label=label,
layer=list(layer),
bbox=[[bbox.left, bbox.bottom], [bbox.right, bbox.top]],
make_rect=self._make_pin_rect)
self._used_pin_names.add(pin_name)
self._pin_list.append(par)
def add_label(self, label, layer, bbox):
"""Add a new label.
This is mainly used to add voltage text labels.
Parameters
----------
label : str
the label text.
layer : Union[str, Tuple[str, str]]
the layer name, or (layer, purpose) pair.
if purpose is not specified, defaults to 'pin'.
bbox : ..layout.util.BBox
the rectangle bounding box
"""
if self._finalized:
raise Exception('Layout is already finalized.')
if isinstance(layer, bytes):
# interpret as unicode
layer = layer.decode('utf-8')
if isinstance(layer, str):
layer = (layer, self._pin_purpose)
else:
layer = layer[0], layer[1]
par = PinInfo(self._res, net_name='',
pin_name='',
label=label,
layer=list(layer),
bbox=[[bbox.left, bbox.bottom], [bbox.right, bbox.top]],
make_rect=False)
self._pin_list.append(par)
================================================
FILE: bag/layout/digital.py
================================================
# -*- coding: utf-8 -*-
"""This module defines layout template classes for digital standard cells.
"""
from typing import Dict, Any, Set, Tuple, List, Optional
import abc
from bag.io import read_yaml
from ..util.interval import IntervalSet
from .util import BBox
from .template import TemplateDB, TemplateBase
from .objects import Instance
from .routing import TrackID, WireArray
class StdCellBase(TemplateBase, metaclass=abc.ABCMeta):
"""The base class of all micro templates.
Parameters
----------
temp_db : TemplateDB
the template database.
lib_name : str
the layout library name.
params : Dict[str, Any]
the parameter values.
used_names : Set[str]
a set of already used cell names.
**kwargs
dictionary of optional parameters. See documentation of
:class:`bag.layout.template.TemplateBase` for details.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None
self._config = read_yaml(params['config_file'])
self._tech_params = self._config['tech_params']
self._cells = self._config['cells']
self._spaces = self._config['spaces']
self._bound_params = self._config['boundaries']
TemplateBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)
self._std_size = None # type: Optional[Tuple[int, int]]
self._std_size_bare = None # type: Optional[Tuple[int, int]]
self._draw_boundaries = False # type: bool
self._used_blocks = [] # type: List[IntervalSet]
@property
def min_space_width(self):
# type: () -> int
"""Returns the minimum space block width in number of standard cell columns."""
return self._spaces[-1]['num_col']
@property
def std_col_width(self):
# type: () -> float
"""Returns the standard cell column width."""
return self._tech_params['col_pitch']
@property
def std_col_width_unit(self):
# type: () -> float
"""Returns the standard cell column width in resolution units."""
res = self.grid.resolution
return int(round(self._tech_params['col_pitch'] / res))
@property
def std_row_height(self):
# type: () -> float
"""Returns the standard cell row height."""
return self._tech_params['height']
@property
def std_row_height_unit(self):
# type: () -> float
"""Returns the standard cell row height in resolution units."""
res = self.grid.resolution
return int(round(self._tech_params['height'] / res))
@property
def std_size(self):
# type: () -> Optional[Tuple[int, int]]
"""Returns the number of columns/rows that this standard cell occupies."""
return self._std_size
@property
def std_routing_layers(self):
# type: () -> List[int]
"""Returns the routing layers used by this standard cell."""
return self._tech_params['layers']
def get_num_columns(self, layer_id, num_tr):
# type: (int, int) -> int
"""Returns the number of standard cell columns needed to contain the given amount of tracks.
Parameters
----------
layer_id : int
the track layer ID.
num_tr : int
number of tracks.
Returns
-------
num_col : int
number of standard cell columns that span the given number of tracks.
"""
col_width_unit = int(round(self._tech_params['col_pitch'] / self.grid.resolution))
tr_pitch = int(self.grid.get_track_pitch(layer_id, unit_mode=True)) # type: int
return -(-(tr_pitch * num_tr) // col_width_unit) # ceiling division
def set_draw_boundaries(self, draw_boundaries):
# type: (bool) -> None
"""Sets whether this standard cell have boundaries drawn around it.
To draw boundaries around a standard cell, first call this method
with draw_boundaries=True, then call set_std_size() method when
all blocks have been placed. Finally, call draw_boundaries()
to draw the bounded cells.
Parameters
----------
draw_boundaries : bool
True to draw boundaries around this standard cell.
"""
self._draw_boundaries = draw_boundaries
def get_space_blocks(self):
# type: () -> List[Dict[str, Any]]
"""Returns the space blocks parameters. Used internally."""
return self._spaces
def get_cell_params(self, cell_name):
# type: (str) -> Dict[str, Any]
"""Returns parameters for the given standard cell. Used internally.
Parameters
----------
cell_name : str
the standard cell name.
"""
for key, val in self._cells.items():
if key == cell_name:
return val
raise ValueError('Cannot find standard cell with name %s' % cell_name)
def set_std_size(self, std_size, top_layer=-1):
# type: (Tuple[int, int], int) -> None
"""Sets the size of this standard cell.
This method computes self.size, self.array_box, and self.std_size.
If you will draw boundaries around this standard cell,
self.set_draw_boundaries(True) should be called first.
Parameters
----------
std_size : Tuple[int, int]
the standard cell size as (number of std. columns, number of std. rows) Tuple.
top_layer : int
the top level routing layer. If negative, default to standard cell top routing layer.
"""
num_col, num_row = std_size
self._std_size_bare = std_size
if self._draw_boundaries:
dx = self._bound_params['lr_width'] * self.std_col_width
dy = self._bound_params['tb_height'] * self.std_row_height
self._std_size = (int(std_size[0] + 2 * self._bound_params['lr_width']),
int(std_size[1] + 2 * self._bound_params['tb_height']))
else:
self._std_size = std_size
dx, dy = 0, 0
self.array_box = BBox(0.0, 0.0, num_col * self.std_col_width + 2 * dx,
num_row * self.std_row_height + 2 * dy, self.grid.resolution)
if top_layer < 0:
top_layer = self.std_routing_layers[-1]
if self.grid.size_defined(top_layer):
self.set_size_from_array_box(top_layer)
else:
self.prim_top_layer = top_layer
self.prim_bound_box = self.array_box
def update_routing_grid(self):
# type: () -> None
"""Register standard cell routing layers in the RoutingGrid.
This method must be called first in draw_layout().
"""
layers = self._tech_params['layers']
widths = self._tech_params['widths']
spaces = self._tech_params['spaces']
directions = self._tech_params['directions']
self.grid = self.grid.copy()
for lay_id, w, sp, tdir in zip(layers, widths, spaces, directions):
self.grid.add_new_layer(lay_id, sp, w, tdir, override=True)
self.grid.update_block_pitch()
def get_num_tracks(self, layer_id):
# type: (int) -> int
"""Get number of tracks in this standard cell.
Parameters
----------
layer_id : int
the layer ID.
Returns
-------
num_tracks : int
number of tracks on the given layer in this standard cell.
"""
std_size = self.std_size
if std_size is None:
raise ValueError("std_size is unset. Try calling set_std_size()?")
ncol, nrow = std_size
tdir = self.grid.get_direction(layer_id)
pitch = int(self.grid.get_track_pitch(layer_id, unit_mode=True))
if tdir == 'x':
tot_dim = nrow * int(round(self.std_row_height / self.grid.resolution))
else:
tot_dim = ncol * int(round(self.std_col_width / self.grid.resolution))
return tot_dim // pitch
def add_std_instance(self, master, inst_name=None, loc=(0, 0), nx=1, ny=1,
spx=0, spy=0, flip_lr=False):
# type: (StdCellBase, Optional[str], Tuple[int, int], int, int, int, int, bool) -> Instance
"""Add a new standard cell instance.
Parameters
----------
master : StdCellBase
the standard cell template master to add.
inst_name : Optional[str]
the instance name.
loc : Tuple[int, int]
lower-left corner of the instance in number of standard cell columns/rows.
nx : int
horizontal array count.
ny : int
vertical array count.
spx : int
horizontal pitch in number of standard cell columns.
spy : int
vertical pitch in number of standard cell rows. Must be even.
flip_lr : bool
True to flip the standard cell over Y axis.
Returns
-------
inst : Instance
the standard cell instance.
"""
if spy % 2 != 0:
raise ValueError('row pitch must be even')
# update self._used_blocks
master_std_size = master.std_size
if master_std_size is None:
raise ValueError("master.std_size is unset. Try calling master.set_std_size()?")
inst_ncol, inst_nrow = master_std_size
cur_nrow = loc[1] + inst_nrow + (ny - 1) * spy
while len(self._used_blocks) < cur_nrow:
self._used_blocks.append(IntervalSet())
for col_off in range(nx):
xoff = col_off * spx + loc[0]
for row_off in range(ny):
yoff = row_off * spy + loc[1]
for std_row_idx in range(yoff, yoff + inst_nrow):
success = self._used_blocks[std_row_idx].add((xoff, xoff + inst_ncol))
if not success:
raise ValueError('Cannot add instance at std loc (%d, %d)' % (xoff, yoff))
col_pitch = self.std_col_width
row_pitch = self.std_row_height
if loc[1] % 2 == 0:
orient = 'R0'
dy = loc[1] * row_pitch
else:
orient = 'MX'
dy = (loc[1] + 1) * row_pitch
dx = loc[0] * col_pitch
if flip_lr:
dx += inst_ncol * col_pitch
if orient == 'R0':
orient = 'MY'
else:
orient = 'R180'
spx_new = spx * col_pitch
spy_new = spy * row_pitch
if self._draw_boundaries:
dx += self._bound_params['lr_width'] * self.std_col_width
dy += self._bound_params['tb_height'] * self.std_row_height
return self.add_instance(master, inst_name=inst_name, loc=(dx, dy),
orient=orient, nx=nx, ny=ny, spx=spx_new, spy=spy_new)
def draw_boundaries(self):
# type: () -> None
"""Draw the boundary cells around this standard cell."""
lib_name = self._bound_params['lib_name']
suffix = self._bound_params.get('suffix', '')
std_size_bare = self._std_size_bare
if std_size_bare is None:
raise ValueError("std_size_bare is unset. Try calling set_std_size()?")
num_col, num_row = std_size_bare
num_row_even = (num_row + 1) // 2
num_row_odd = num_row - num_row_even
wcol, hrow = self.std_col_width, self.std_row_height
dx = self._bound_params['lr_width'] * wcol
dy = self._bound_params['tb_height'] * hrow
# add bottom-left
self.add_instance_primitive(lib_name, 'boundary_bottomleft' + suffix, (0, 0))
# add left
self.add_instance_primitive(lib_name, 'boundary_left' + suffix, (0, dy), ny=num_row_even,
spy=hrow * 2)
if num_row_odd > 0:
self.add_instance_primitive(lib_name, 'boundary_left' + suffix, (0, dy + 2 * hrow),
orient='MX', ny=num_row_odd, spy=hrow * 2)
# add top-left
if num_row % 2 == 1:
yc = dy + num_row * hrow
self.add_instance_primitive(lib_name, 'boundary_topleft' + suffix, (0, yc))
else:
yc = 2 * dy + num_row * hrow
self.add_instance_primitive(lib_name, 'boundary_bottomleft' + suffix, (0, yc),
orient='MX')
# add bottom
self.add_instance_primitive(lib_name, 'boundary_bottom' + suffix, (dx, 0), nx=num_col,
spx=wcol)
# add top
if num_row % 2 == 1:
self.add_instance_primitive(lib_name, 'boundary_top' + suffix, (dx, yc), nx=num_col,
spx=wcol)
else:
self.add_instance_primitive(lib_name, 'boundary_bottom' + suffix, (dx, yc), orient='MX',
nx=num_col, spx=wcol)
# add bottom right
xc = dx + num_col * wcol
self.add_instance_primitive(lib_name, 'boundary_bottomright' + suffix, (xc, 0))
# add right
self.add_instance_primitive(lib_name, 'boundary_right' + suffix, (xc, dy), ny=num_row_even,
spy=hrow * 2)
if num_row_odd > 0:
self.add_instance_primitive(lib_name, 'boundary_right' + suffix, (xc, dy + 2 * hrow),
orient='MX', ny=num_row_odd, spy=hrow * 2)
# add top right
if num_row % 2 == 1:
self.add_instance_primitive(lib_name, 'boundary_topright' + suffix, (xc, yc))
else:
self.add_instance_primitive(lib_name, 'boundary_bottomright' + suffix, (xc, yc),
orient='MX')
def fill_space(self):
# type: () -> None
"""Fill all unused blocks with spaces."""
std_size_bare = self._std_size_bare
if std_size_bare is None:
raise ValueError("std_size_bare is unset. Try calling set_std_size()?")
tot_intv = (0, std_size_bare[0])
for row_idx, intv_set in enumerate(self._used_blocks):
for intv in intv_set.get_complement(tot_intv).intervals():
loc = (intv[0], row_idx)
num_spaces = intv[1] - intv[0]
self.add_std_space(loc, num_spaces, update_used_blks=False)
def add_std_space(self, loc, num_col, update_used_blks=True):
# type: (Tuple[int, int], int, bool) -> None
"""Add standard cell spaces at the given location.
Parameters
----------
loc : Tuple[int, int]
the lower-left corner of the space block.
num_col : int
the space block width in number of columns.
update_used_blks : bool
True to register space blocks. This flag is for internal use only.
"""
if update_used_blks:
# update self._used_blocks
while len(self._used_blocks) < loc[1] + 1:
self._used_blocks.append(IntervalSet())
success = self._used_blocks[loc[1]].add((loc[0], loc[0] + num_col))
if not success:
raise ValueError('Cannot add space at std loc (%d, %d)' % (loc[0], loc[1]))
col_pitch = self.std_col_width
xcur = loc[0] * col_pitch
if loc[1] % 2 == 0:
orient = 'R0'
ycur = loc[1] * self.std_row_height
else:
orient = 'MX'
ycur = (loc[1] + 1) * self.std_row_height
if self._draw_boundaries:
dx = self._bound_params['lr_width'] * self.std_col_width
dy = self._bound_params['tb_height'] * self.std_row_height
else:
dx = dy = 0
for blk_params in self.get_space_blocks():
lib_name = blk_params['lib_name']
cell_name = blk_params['cell_name']
blk_col = blk_params['num_col']
num_blk, num_col = divmod(num_col, blk_col)
blk_width = blk_col * col_pitch
if num_blk > 0:
self.add_instance_primitive(lib_name, cell_name, (xcur + dx, ycur + dy),
orient=orient, nx=num_blk, spx=blk_width)
xcur += num_blk * blk_width
if num_col > 0:
raise ValueError('has %d columns remaining' % num_col)
class StdCellTemplate(StdCellBase):
"""A template wrapper around a standard cell block.
Parameters
----------
temp_db : TemplateDB
the template database.
lib_name : str
the layout library name.
params : Dict[str, Any]
the parameter values.
used_names : Set[str]
a set of already used cell names.
**kwargs :
dictionary of optional parameters. See documentation of
:class:`bag.layout.template.TemplateBase` for details.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None
StdCellBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)
self._sch_params = None
@property
def sch_params(self):
return self._sch_params
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
"""Returns a dictionary containing parameter descriptions.
Override this method to return a dictionary from parameter names to descriptions.
Returns
-------
param_info : Dict[str, str]
dictionary from parameter name to description.
"""
return dict(
cell_name='standard cell cell name.',
config_file='standard cell configuration file name.',
)
def get_layout_basename(self):
return 'stdcell_%s' % self.params['cell_name']
def compute_unique_key(self):
cell_params = self.get_cell_params(self.params['cell_name'])
return 'stdcell_%s_%s' % (cell_params['lib_name'], cell_params['cell_name'])
def get_sch_master_info(self):
# type: () -> Tuple[str, str]
"""Returns the schematic master library/cell name tuple."""
cell_params = self.get_cell_params(self.params['cell_name'])
return cell_params['lib_name'], cell_params['cell_name']
def draw_layout(self):
# type: () -> None
cell_params = self.get_cell_params(self.params['cell_name'])
lib_name = cell_params['lib_name']
cell_name = cell_params['cell_name']
size = cell_params['size']
ports = cell_params['ports']
# update routing grid
self.update_routing_grid()
# add instance
self.add_instance_primitive(lib_name, cell_name, (0, 0))
# compute size
self.set_std_size(size)
# add pins
res = self.grid.resolution
for port_name, pin_list in ports.items():
for pin in pin_list:
port_lay_id = pin['layer']
bbox = pin['bbox']
layer_dir = self.grid.get_direction(port_lay_id)
if layer_dir == 'x':
intv = bbox[1], bbox[3]
lower, upper = bbox[0], bbox[2]
else:
intv = bbox[0], bbox[2]
lower, upper = bbox[1], bbox[3]
tr_idx, tr_w = self.grid.interval_to_track(port_lay_id, intv)
warr = WireArray(TrackID(port_lay_id, tr_idx, width=tr_w), lower, upper,
res=res, unit_mode=False)
self.add_pin(port_name, warr, show=False)
# set properties
self._sch_params = cell_params.get('sch_params', None)
================================================
FILE: bag/layout/objects.py
================================================
# -*- coding: utf-8 -*-
"""This module defines various layout objects one can add and manipulate in a template.
"""
from typing import TYPE_CHECKING, Union, List, Tuple, Optional, Dict, Any, Iterator, Iterable, \
Generator
import abc
import numpy as np
from copy import deepcopy
from .util import transform_table, BBox, BBoxArray, transform_point, get_inverse_transform
from .routing.base import Port, WireArray
from .. import io
if TYPE_CHECKING:
from .template import TemplateBase
from .routing.grid import RoutingGrid
ldim = Union[float, int]
loc_type = Tuple[ldim, ldim]
class Figure(object, metaclass=abc.ABCMeta):
"""Base class of all layout objects.
Parameters
----------
resolution : float
layout unit resolution.
"""
def __init__(self, resolution):
# type: (float) -> None
self._res = resolution
self._destroyed = False
@abc.abstractmethod
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Figure
"""Transform this figure."""
pass
@abc.abstractmethod
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
"""Move this path by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if shifts are given in resolution units.
"""
pass
@property
def resolution(self):
# type: () -> float
"""Retuns the layout unit resolution."""
return self._res
@property
def destroyed(self):
# type: () -> bool
"""Returns True if this instance is destroyed"""
return self._destroyed
@property
def valid(self):
# type: () -> bool
"""Returns True if this figure is valid."""
return not self._destroyed
def check_destroyed(self):
# type: () -> None
"""Raises an exception if this object is already destroyed."""
if self._destroyed:
raise Exception('This %s is already destroyed.' % self.__class__.__name__)
def destroy(self):
# type: () -> None
"""Destroy this instance."""
self._destroyed = True
# noinspection PyAbstractClass
class Arrayable(Figure, metaclass=abc.ABCMeta):
"""A layout object with arraying support.
Also handles destroy support.
Parameters
----------
res : float
layout unit resolution.
nx : int
number of columns.
ny : int
number of rows.
spx : Union[float or int]
column pitch.
spy : Union[float or int]
row pitch.
unit_mode : bool
True if spx/spy are specified in resolution units.
"""
def __init__(self, res, nx=1, ny=1, spx=0, spy=0, unit_mode=False):
# type: (float, int, int, ldim, ldim, bool) -> None
Figure.__init__(self, res)
self._nx = nx
self._ny = ny
if unit_mode:
self._spx_unit = spx
self._spy_unit = spy
else:
self._spx_unit = int(round(spx / res))
self._spy_unit = int(round(spy / res))
@property
def nx(self):
# type: () -> int
"""Number of columns."""
return self._nx
@nx.setter
def nx(self, val):
# type: (int) -> None
"""Sets the number of columns."""
self.check_destroyed()
if val <= 0:
raise ValueError('Cannot have non-positive number of columns.')
self._nx = val
@property
def ny(self):
# type: () -> int
"""Number of rows."""
return self._ny
@ny.setter
def ny(self, val):
# type: (int) -> None
"""Sets the number of rows."""
self.check_destroyed()
if val <= 0:
raise ValueError('Cannot have non-positive number of rows.')
self._ny = val
@property
def spx(self):
# type: () -> float
"""The column pitch."""
return self._spx_unit * self.resolution
@spx.setter
def spx(self, val):
# type: (float) -> None
"""Sets the new column pitch."""
self.check_destroyed()
if val < 0:
raise ValueError('Currently does not support negative pitches.')
self._spx_unit = int(round(val / self.resolution))
@property
def spx_unit(self):
# type: () -> int
"""The column pitch in resolution units."""
return self._spx_unit
@spx_unit.setter
def spx_unit(self, val):
# type: (int) -> None
"""Sets the new column pitch in resolution units."""
self.check_destroyed()
if val < 0:
raise ValueError('Currently does not support negative pitches.')
self._spx_unit = val
@property
def spy(self):
# type: () -> float
"""The row pitch."""
return self._spy_unit * self.resolution
@spy.setter
def spy(self, val):
# type: (float) -> None
"""Sets the new row pitch."""
self.check_destroyed()
if val < 0:
raise ValueError('Currently does not support negative pitches.')
self._spy_unit = int(round(val / self.resolution))
@property
def spy_unit(self):
# type: () -> int
"""The row pitch in resolution units."""
return self._spy_unit
@spy_unit.setter
def spy_unit(self, val):
# type: (int) -> None
"""Sets the new row pitch in resolution units."""
self.check_destroyed()
if val < 0:
raise ValueError('Currently does not support negative pitches.')
self._spy_unit = val
@Figure.valid.getter
def valid(self):
# type: () -> bool
"""Returns True if this instance is valid, i.e. not destroyed and nx, ny >= 1."""
return not self.destroyed and self.nx >= 1 and self.ny >= 1
def get_item_location(self, row=0, col=0, unit_mode=False):
# type: (int, int, bool) -> Tuple[ldim, ldim]
"""Returns the location of the given item in the array.
Parameters
----------
row : int
the item row index. 0 is the bottom-most row.
col : int
the item column index. 0 is the left-most column.
unit_mode : bool
True to return coordinates in resolution units
Returns
-------
xo : Union[float, int]
the item X coordinate.
yo : Union[float, int]
the item Y coordinate.
"""
if row < 0 or row >= self.ny or col < 0 or col >= self.nx:
raise ValueError('Invalid row/col index: row=%d, col=%d' % (row, col))
xo = col * self._spx_unit
yo = row * self._spy_unit
if unit_mode:
return xo, yo
return xo * self.resolution, yo * self.resolution
class InstanceInfo(dict):
"""A dictionary that represents a layout instance.
"""
param_list = ['lib', 'cell', 'view', 'name', 'loc', 'orient', 'num_rows',
'num_cols', 'sp_rows', 'sp_cols', 'master_key']
def __init__(self, res, change_orient=True, **kwargs):
kv_iter = ((key, kwargs.get(key, None)) for key in self.param_list)
dict.__init__(self, kv_iter)
self._resolution = res
if 'params' in kwargs:
self.params = kwargs['params']
# skill/OA array before rotation, while we're doing the opposite.
# this is supposed to fix it.
if change_orient:
orient = self['orient']
if orient == 'R180':
self['sp_rows'] *= -1
self['sp_cols'] *= -1
elif orient == 'MX':
self['sp_rows'] *= -1
elif orient == 'MY':
self['sp_cols'] *= -1
elif orient == 'R90':
self['sp_rows'], self['sp_cols'] = self['sp_cols'], -self['sp_rows']
self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']
elif orient == 'MXR90':
self['sp_rows'], self['sp_cols'] = self['sp_cols'], self['sp_rows']
self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']
elif orient == 'MYR90':
self['sp_rows'], self['sp_cols'] = -self['sp_cols'], -self['sp_rows']
self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']
elif orient == 'R270':
self['sp_rows'], self['sp_cols'] = -self['sp_cols'], self['sp_rows']
self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']
elif orient != 'R0':
raise ValueError('Unknown orientation: %s' % orient)
@property
def lib(self):
# type: () -> str
return self['lib']
@property
def cell(self):
# type: () -> str
return self['cell']
@property
def view(self):
# type: () -> str
return self['view']
@property
def name(self):
# type: () -> str
return self['name']
@name.setter
def name(self, new_name):
# type: (str) -> None
self['name'] = new_name
@property
def loc(self):
# type: () -> Tuple[float, float]
loc_list = self['loc']
return loc_list[0], loc_list[1]
@property
def orient(self):
# type: () -> str
return self['orient']
@property
def num_rows(self):
# type: () -> int
return self['num_rows']
@property
def num_cols(self):
# type: () -> int
return self['num_cols']
@property
def sp_rows(self):
# type: () -> float
return self['sp_rows']
@property
def sp_cols(self):
# type: () -> float
return self['sp_cols']
@property
def params(self):
# type: () -> Optional[Dict[str, Any]]
return self.get('params', None)
@params.setter
def params(self, new_params):
# type: (Optional[Dict[str, Any]]) -> None
self['params'] = new_params
@property
def master_key(self):
return self.get('master_key', None)
@master_key.setter
def master_key(self, value):
self['master_key'] = value
@property
def angle_reflect(self):
# type: () -> Tuple[int, bool]
orient = self['orient']
if orient == 'R0':
return 0, False
elif orient == 'R180':
return 180, False
elif orient == 'MX':
return 0, True
elif orient == 'MY':
return 180, True
elif orient == 'R90':
return 90, False
elif orient == 'MXR90':
return 90, True
elif orient == 'MYR90':
return 270, True
elif orient == 'R270':
return 270, False
else:
raise ValueError('Unknown orientation: %s' % orient)
def copy(self):
"""Override copy method of dictionary to return an InstanceInfo instead."""
return InstanceInfo(self._resolution, change_orient=False, **self)
def move_by(self, dx=0, dy=0):
# type: (float, float) -> None
"""Move this instance by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
"""
res = self._resolution
loc = self.loc
self['loc'] = [round((loc[0] + dx) / res) * res,
round((loc[1] + dy) / res) * res]
class Instance(Arrayable):
"""A layout instance, with optional arraying parameters.
Parameters
----------
parent_grid : RoutingGrid
the parent RoutingGrid object.
lib_name : str
the layout library name.
master : TemplateBase
the master template of this instance.
loc : Tuple[Union[float, int], Union[float, int]]
the origin of this instance.
orient : str
the orientation of this instance.
name : Optional[str]
name of this instance.
nx : int
number of columns.
ny : int
number of rows.
spx : Union[float, int]
column pitch.
spy : Union[float, int]
row pitch.
unit_mode : bool
True if layout dimensions are specified in resolution units.
"""
def __init__(self,
parent_grid, # type: RoutingGrid
lib_name, # type: str
master, # type: TemplateBase
loc, # type: Tuple[ldim, ldim]
orient, # type: str
name=None, # type: Optional[str]
nx=1, # type: int
ny=1, # type: int
spx=0, # type: ldim
spy=0, # type: ldim
unit_mode=False, # type: bool
):
# type: (...) -> None
res = parent_grid.resolution
Arrayable.__init__(self, res, nx=nx, ny=ny, spx=spx, spy=spy, unit_mode=unit_mode)
self._parent_grid = parent_grid
self._lib_name = lib_name
self._inst_name = name
self._master = master
if unit_mode:
self._loc_unit = loc[0], loc[1]
else:
self._loc_unit = int(round(loc[0] / res)), int(round(loc[1] / res))
self._orient = orient
def new_master_with(self, **kwargs):
# type: (**Any) -> None
"""Change the master template of this instance.
This method will get the old master template layout parameters, update
the parameter values with the given dictionary, then create a new master
template with those parameters and associate it with this instance.
Parameters
----------
**kwargs
a dictionary of new parameter values.
"""
self._master = self._master.new_template_with(**kwargs)
def blockage_iter(self, layer_id, test_box, spx=0, spy=0):
# type: (int, BBox, int, int) -> Generator[BBox, None, None]
# transform the given BBox to master coordinate
if self.destroyed:
return
base_box = self._master.get_track_bbox(layer_id)
if not base_box.is_physical():
return
base_box = self.translate_master_box(base_box)
test = test_box.expand(dx=spx, dy=spy, unit_mode=True)
inst_spx = max(self.spx_unit, 1)
inst_spy = max(self.spy_unit, 1)
xl = base_box.left_unit
yb = base_box.bottom_unit
xr = base_box.right_unit
yt = base_box.top_unit
nx0 = max(0, -(-(test.left_unit - xr) // inst_spx))
nx1 = min(self.nx - 1, (test.right_unit - xl) // inst_spx)
ny0 = max(0, -(-(test.bottom_unit - yt) // inst_spy))
ny1 = min(self.ny - 1, (test.top_unit - yb) // inst_spy)
orient = self._orient
x0, y0 = self._loc_unit
if (orient == 'R90' or orient == 'R270' or
orient == 'MXR90' or orient == 'MYR90'):
spx, spy = spy, spx
for row in range(ny0, ny1 + 1):
for col in range(nx0, nx1 + 1):
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
loc = dx + x0, dy + y0
inv_loc, inv_orient = get_inverse_transform(loc, orient)
cur_box = test_box.transform(inv_loc, inv_orient, unit_mode=True)
for box in self._master.blockage_iter(layer_id, cur_box, spx=spx, spy=spy):
yield box.transform(loc, orient, unit_mode=True)
def all_rect_iter(self):
# type: () -> Generator[Tuple[BBox, int, int], None, None]
if self.destroyed:
return
orient = self._orient
x0, y0 = self._loc_unit
flip = (orient == 'R90' or orient == 'R270' or orient == 'MXR90' or orient == 'MYR90')
for layer_id, box, sdx, sdy in self._master.all_rect_iter():
if flip:
sdx, sdy = sdy, sdx
for row in range(self.ny):
for col in range(self.nx):
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
loc = dx + x0, dy + y0
yield layer_id, box.transform(loc, orient, unit_mode=True), sdx, sdy
def intersection_rect_iter(self, layer_id, test_box):
# type: (int, BBox) -> Generator[BBox, None, None]
if self.destroyed:
return
base_box = self._master.get_track_bbox(layer_id)
if not base_box.is_physical():
return
base_box = self.translate_master_box(base_box)
inst_spx = max(self.spx_unit, 1)
inst_spy = max(self.spy_unit, 1)
xl = base_box.left_unit
yb = base_box.bottom_unit
xr = base_box.right_unit
yt = base_box.top_unit
nx0 = max(0, -(-(test_box.left_unit - xr) // inst_spx))
nx1 = min(self.nx - 1, (test_box.right_unit - xl) // inst_spx)
ny0 = max(0, -(-(test_box.bottom_unit - yt) // inst_spy))
ny1 = min(self.ny - 1, (test_box.top_unit - yb) // inst_spy)
orient = self._orient
x0, y0 = self._loc_unit
for row in range(ny0, ny1 + 1):
for col in range(nx0, nx1 + 1):
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
loc = dx + x0, dy + y0
inv_loc, inv_orient = get_inverse_transform(loc, orient)
cur_box = test_box.transform(inv_loc, inv_orient, unit_mode=True)
for box in self._master.intersection_rect_iter(layer_id, cur_box):
yield box.transform(loc, orient, unit_mode=True)
def get_rect_bbox(self, layer):
"""Returns the overall bounding box of all rectangles on the given layer.
Note: currently this does not check primitive instances or vias.
"""
bbox = self._master.get_rect_bbox(layer)
if not bbox.is_valid():
return bbox
box_arr = BBoxArray(self.translate_master_box(bbox), nx=self.nx, ny=self.ny,
spx=self.spx_unit, spy=self.spy_unit, unit_mode=True)
return box_arr.get_overall_bbox()
def track_bbox_iter(self):
for layer_id, bbox in self._master.track_bbox_iter():
box_arr = BBoxArray(self.translate_master_box(bbox), nx=self.nx, ny=self.ny,
spx=self.spx_unit, spy=self.spy_unit, unit_mode=True)
yield layer_id, box_arr.get_overall_bbox()
@property
def master(self):
# type: () -> TemplateBase
"""The master template of this instance."""
return self._master
@property
def location(self):
# type: () -> Tuple[float, float]
"""The instance location."""
return self._loc_unit[0] * self.resolution, self._loc_unit[1] * self.resolution
@location.setter
def location(self, new_loc):
# type: (Tuple[float, float]) -> None
"""Sets the instance location."""
self.check_destroyed()
self._loc_unit = (int(round(new_loc[0] / self.resolution)),
int(round(new_loc[1] / self.resolution)))
@property
def location_unit(self):
# type: () -> Tuple[int, int]
"""The instance location."""
return self._loc_unit
@location_unit.setter
def location_unit(self, new_loc):
# type: (Tuple[int, int]) -> None
"""Sets the instance location."""
self.check_destroyed()
self._loc_unit = (new_loc[0], new_loc[1])
@property
def orientation(self):
# type: () -> str
"""The instance orientation"""
return self._orient
@orientation.setter
def orientation(self, val):
# type: (str) -> None
"""Sets the instance orientation."""
self.check_destroyed()
if val not in transform_table:
raise ValueError('Unsupported orientation: %s' % val)
self._orient = val
@property
def content(self):
# type: () -> InstanceInfo
"""A dictionary representation of this instance."""
return InstanceInfo(self.resolution,
lib=self._lib_name,
cell=self.master.cell_name,
view='layout',
name=self._inst_name,
loc=list(self.location),
orient=self.orientation,
num_rows=self.ny,
num_cols=self.nx,
sp_rows=self.spy,
sp_cols=self.spx,
master_key=self.master.key
)
@property
def bound_box(self):
# type: () -> BBox
"""Returns the overall bounding box of this instance."""
box_arr = BBoxArray(self._master.bound_box, nx=self.nx, ny=self.ny,
spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)
return box_arr.get_overall_bbox().transform(self.location_unit, self.orientation,
unit_mode=True)
@property
def array_box(self):
# type: () -> BBox
"""Returns the array box of this instance."""
master_box = getattr(self._master, 'array_box', None) # type: BBox
if master_box is None:
raise ValueError('Master template array box is not defined.')
box_arr = BBoxArray(master_box, nx=self.nx, ny=self.ny,
spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)
return box_arr.get_overall_bbox().transform(self.location_unit, self.orientation,
unit_mode=True)
@property
def fill_box(self):
# type: () -> BBox
"""Returns the array box of this instance."""
master_box = getattr(self._master, 'fill_box', None) # type: BBox
if master_box is None:
raise ValueError('Master template fill box is not defined.')
box_arr = BBoxArray(master_box, nx=self.nx, ny=self.ny,
spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)
return box_arr.get_overall_bbox().transform(self.location_unit, self.orientation,
unit_mode=True)
def get_bound_box_of(self, row=0, col=0):
"""Returns the bounding box of an instance in this mosaic."""
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
xshift, yshift = self._loc_unit
xshift += dx
yshift += dy
return self._master.bound_box.transform((xshift, yshift), self.orientation, unit_mode=True)
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (Union[float, int], Union[float, int], bool) -> None
"""Move this instance by the given amount.
Parameters
----------
dx : Union[float, int]
the X shift.
dy : Union[float, int]
the Y shift.
unit_mode : bool
True if shifts are given in resolution units
"""
if not unit_mode:
dx = int(round(dx / self.resolution))
dy = int(round(dy / self.resolution))
self._loc_unit = self._loc_unit[0] + dx, self._loc_unit[1] + dy
def translate_master_box(self, box):
# type: (BBox) -> BBox
"""Transform the bounding box in master template.
Parameters
----------
box : BBox
the BBox in master template coordinate.
Returns
-------
new_box : BBox
the cooresponding BBox in instance coordinate.
"""
return box.transform(self.location_unit, self.orientation, unit_mode=True)
def translate_master_location(self,
mloc, # type: Tuple[Union[float, int], Union[float, int]]
unit_mode=False, # type: bool
):
# type: (...) -> Tuple[Union[float, int], Union[float, int]]
"""Returns the actual location of the given point in master template.
Parameters
----------
mloc : Tuple[Union[float, int], Union[float, int]]
the location in master coordinate.
unit_mode : bool
True if location is given in resolution units.
Returns
-------
xi : Union[float, int]
the actual X coordinate. Integer if unit_mode is True.
yi : Union[float, int]
the actual Y coordinate. Integer if unit_mode is True.
"""
res = self.resolution
if unit_mode:
mx, my = mloc[0], mloc[1]
else:
mx, my = int(round(mloc[0] / res)), int(round(mloc[1] / res))
p = transform_point(mx, my, self.location_unit, self.orientation)
if unit_mode:
return p[0], p[1]
return p[0] * res, p[1] * res
def translate_master_track(self, layer_id, track_idx):
# type: (int, Union[float, int]) -> Union[float, int]
"""Returns the actual track index of the given track in master template.
Parameters
----------
layer_id : int
the layer ID.
track_idx : Union[float, int]
the track index.
Returns
-------
new_idx : Union[float, int]
the new track index.
"""
dx, dy = self.location_unit
return self._parent_grid.transform_track(layer_id, track_idx, dx=dx, dy=dy,
orient=self.orientation, unit_mode=True)
def get_port(self, name='', row=0, col=0):
# type: (Optional[str], int, int) -> Port
"""Returns the port object of the given instance in the array.
Parameters
----------
name : Optional[str]
the port terminal name. If None or empty, check if this
instance has only one port, then return it.
row : int
the instance row index. Index 0 is the bottom-most row.
col : int
the instance column index. Index 0 is the left-most column.
Returns
-------
port : Port
the port object.
"""
dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)
xshift, yshift = self._loc_unit
loc = (xshift + dx, yshift + dy)
return self._master.get_port(name).transform(self._parent_grid, loc=loc,
orient=self.orientation, unit_mode=True)
def get_pin(self, name='', row=0, col=0, layer=-1):
# type: (Optional[str], int, int, int) -> Union[WireArray, BBox]
"""Returns the first pin with the given name.
This is an efficient method if you know this instance has exactly one pin.
Parameters
----------
name : Optional[str]
the port terminal name. If None or empty, check if this
instance has only one port, then return it.
row : int
the instance row index. Index 0 is the bottom-most row.
col : int
the instance column index. Index 0 is the left-most column.
layer : int
the pin layer. If negative, check to see if the given port has only one layer.
If so then use that layer.
Returns
-------
pin : Union[WireArray, BBox]
the first pin associated with the port of given name.
"""
port = self.get_port(name, row, col)
return port.get_pins(layer)[0]
def get_all_port_pins(self, name='', layer=-1):
# type: (Optional[str], int) -> List[WireArray]
"""Returns a list of all pins of all ports with the given name in this instance array.
This method gathers ports from all instances in this array with the given name,
then find all pins of those ports on the given layer, then return as list of WireArrays.
Parameters
----------
name : Optional[str]
the port terminal name. If None or empty, check if this
instance has only one port, then return it.
layer : int
the pin layer. If negative, check to see if the given port has only one layer.
If so then use that layer.
Returns
-------
pin_list : List[WireArray]
the list of pins as WireArrays.
"""
results = []
for col in range(self.nx):
for row in range(self.ny):
port = self.get_port(name, row, col)
results.extend(port.get_pins(layer))
return results
def port_pins_iter(self, name='', layer=-1):
# type: (Optional[str], int) -> Iterator[WireArray]
"""Iterate through all pins of all ports with the given name in this instance array.
Parameters
----------
name : Optional[str]
the port terminal name. If None or empty, check if this
instance has only one port, then return it.
layer : int
the pin layer. If negative, check to see if the given port has only one layer.
If so then use that layer.
Yields
------
pin : WireArray
the pin as WireArray.
"""
for col in range(self.nx):
for row in range(self.ny):
try:
port = self.get_port(name, row, col)
except KeyError:
return
for warr in port.get_pins(layer):
yield warr
def port_names_iter(self):
# type: () -> Iterable[str]
"""Iterates over port names in this instance.
Yields
------
port_name : str
name of a port in this instance.
"""
return self._master.port_names_iter()
def has_port(self, port_name):
# type: (str) -> bool
"""Returns True if this instance has the given port."""
return self._master.has_port(port_name)
def has_prim_port(self, port_name):
# type: (str) -> bool
"""Returns True if this instance has the given primitive port."""
return self._master.has_prim_port(port_name)
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Optional[Figure]
"""Transform this figure."""
if not unit_mode:
res = self.resolution
loc = int(round(loc[0] / res)), int(round(loc[1] / res))
if not copy:
ans = self
else:
ans = deepcopy(self)
ans._loc_unit = loc
ans._orient = orient
return ans
class Rect(Arrayable):
"""A layout rectangle, with optional arraying parameters.
Parameters
----------
layer : string or (string, string)
the layer name, or a tuple of layer name and purpose name.
If pupose name not given, defaults to 'drawing'.
bbox : ..layout.util.BBox or ..layout.util.BBoxArray
the base bounding box. If this is a BBoxArray, the BBoxArray's
arraying parameters are used.
nx : int
number of columns.
ny : int
number of rows.
spx : float
column pitch.
spy : float
row pitch.
unit_mode : bool
True if layout dimensions are specified in resolution units.
"""
def __init__(self, layer, bbox, nx=1, ny=1, spx=0, spy=0, unit_mode=False):
# python 2/3 compatibility: convert raw bytes to string.
layer = io.fix_string(layer)
if isinstance(layer, str):
layer = (layer, 'drawing')
self._layer = layer[0], layer[1]
if isinstance(bbox, BBoxArray):
self._bbox = bbox.base
Arrayable.__init__(self, self._bbox.resolution, nx=bbox.nx, ny=bbox.ny,
spx=bbox.spx_unit, spy=bbox.spy_unit, unit_mode=True)
else:
self._bbox = bbox
Arrayable.__init__(self, self._bbox.resolution, nx=nx, ny=ny, spx=spx, spy=spy,
unit_mode=unit_mode)
@property
def bbox_array(self):
"""The BBoxArray representing this (Arrayed) rectangle.
Returns
-------
barr : :class:`..layout.util.BBoxArray`
the BBoxArray representing this (Arrayed) rectangle.
"""
return BBoxArray(self._bbox, nx=self.nx, ny=self.ny,
spx=self.spx_unit, spy=self.spy_unit, unit_mode=True)
@property
def layer(self):
"""The rectangle (layer, purpose) pair."""
return self._layer
@layer.setter
def layer(self, val):
"""Sets the rectangle layer."""
self.check_destroyed()
# python 2/3 compatibility: convert raw bytes to string.
val = io.fix_string(val)
if isinstance(val, str):
val = (val, 'drawing')
self._layer = val[0], val[1]
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
@property
def bbox(self):
"""The rectangle bounding box."""
return self._bbox
@bbox.setter
def bbox(self, val):
"""Sets the rectangle bounding box."""
self.check_destroyed()
if not val.is_physical():
raise ValueError('Bounding box %s is not physical' % val)
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
self._bbox = val
@property
def content(self):
"""A dictionary representation of this rectangle."""
content = dict(layer=list(self.layer),
bbox=[[self.bbox.left, self.bbox.bottom], [self.bbox.right, self.bbox.top]],
)
if self.nx > 1 or self.ny > 1:
content['arr_nx'] = self.nx
content['arr_ny'] = self.ny
content['arr_spx'] = self.spx
content['arr_spy'] = self.spy
return content
def move_by(self, dx=0, dy=0, unit_mode=False):
"""Move the base rectangle by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if layout dimensions are specified in resolution units.
"""
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
self._bbox = self._bbox.move_by(dx=dx, dy=dy, unit_mode=unit_mode)
def extend(self, x=None, y=None):
"""extend the base rectangle horizontally or vertically so it overlaps the given X/Y coordinate.
Parameters
----------
x : float or None
if not None, make sure the base rectangle overlaps this X coordinate.
y : float or None
if not None, make sure the base rectangle overlaps this Y coordinate.
"""
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
self._bbox = self._bbox.extend(x=x, y=y)
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Optional[Figure]
"""Transform this figure."""
new_box = self._bbox.transform(loc=loc, orient=orient, unit_mode=unit_mode)
if not copy:
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
ans = self
else:
ans = deepcopy(self)
ans._bbox = new_box
return ans
def destroy(self):
# type: () -> None
"""Destroy this instance."""
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
Arrayable.destroy(self)
class Path(Figure):
"""A layout path. Only 45/90 degree turns are allowed.
Parameters
----------
resolution : float
the layout grid resolution.
layer : string or (string, string)
the layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
width : float
width of this path, in layout units.
points : List[Tuple[float, float]]
list of path points.
end_style : str
the path ends style. Currently support 'truncate', 'extend', and 'round'.
join_style : str
the ends style at intermediate points of the path. Currently support 'extend' and 'round'.
unit_mode : bool
True if width and points are given as resolution units instead of layout units.
"""
def __init__(self,
resolution, # type: float
layer, # type: Union[str, Tuple[str, str]]
width, # type: Union[int, float]
points, # type: List[Tuple[Union[int, float], Union[int, float]]]
end_style='truncate', # type: str
join_style='extend', # type: str
unit_mode=False, # type: bool
):
# type: (...) -> None
layer = io.fix_string(layer)
Figure.__init__(self, resolution)
if isinstance(layer, str):
layer = (layer, 'drawing')
self._layer = layer
self._end_style = end_style
self._join_style = join_style
self._destroyed = False
self._width = 0
self._points = None
if not unit_mode:
self._width = int(round(width / resolution))
pt_list = self.compress_points(((int(round(x / resolution)), int(round(y / resolution)))
for x, y in points))
else:
self._width = width
pt_list = self.compress_points(points)
self._points = np.array(pt_list, dtype=int)
@classmethod
def compress_points(cls, pts_unit):
# remove collinear/duplicate points, and make sure all segments are 45 degrees.
cur_len = 0
pt_list = []
for x, y in pts_unit:
if cur_len == 0:
pt_list.append((x, y))
cur_len += 1
else:
lastx, lasty = pt_list[-1]
# make sure we don't have duplicate points
if x != lastx or y != lasty:
dx, dy = x - lastx, y - lasty
if dx != 0 and dy != 0 and abs(dx) != abs(dy):
# we don't have 45 degree wires
raise ValueError('Cannot have line segment (%d, %d)->(%d, %d) in path'
% (lastx, lasty, x, y))
if cur_len >= 2:
# check for collinearity
dx0, dy0 = lastx - pt_list[-2][0], lasty - pt_list[-2][1]
if (dx == 0 and dx0 == 0) or (dx != 0 and dx0 != 0 and
dy / dx == dy0 / dx0):
# collinear, remove middle point
del pt_list[-1]
cur_len -= 1
pt_list.append((x, y))
cur_len += 1
return pt_list
@property
def layer(self):
# type: () -> Tuple[str, str]
"""The rectangle (layer, purpose) pair."""
return self._layer
@Figure.valid.getter
def valid(self):
# type: () -> bool
"""Returns True if this instance is valid."""
return not self.destroyed and len(self._points) >= 2 and self._width > 0
@property
def width(self):
return self._width * self._res
@property
def points(self):
return [(self._points[idx][0] * self._res, self._points[idx][1] * self._res)
for idx in range(self._points.shape[0])]
@property
def points_unit(self):
return [(self._points[idx][0], self._points[idx][1])
for idx in range(self._points.shape[0])]
@property
def content(self):
# type: () -> Dict[str, Any]
"""A dictionary representation of this path."""
content = dict(layer=list(self.layer),
width=self._width * self._res,
points=self.points,
end_style=self._end_style,
join_style=self._join_style,
)
return content
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
"""Move this path by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if shifts are given in resolution units.
"""
if not unit_mode:
dx = int(round(dx / self._res))
dy = int(round(dy / self._res))
self._points += np.array([dx, dy])
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Figure
"""Transform this figure."""
res = self.resolution
if unit_mode:
dx, dy = loc
else:
dx = int(round(loc[0] / res))
dy = int(round(loc[1] / res))
dvec = np.array([dx, dy])
mat = transform_table[orient]
new_points = np.dot(mat, self._points.T).T + dvec
if not copy:
ans = self
else:
ans = deepcopy(self)
ans._points = new_points
return ans
class PathCollection(Figure):
"""A layout figure that consists of one or more paths.
This class make it easy to draw bus/trasmission line objects.
Parameters
----------
resolution : float
layout unit resolution.
paths : List[Path]
paths in this collection.
"""
def __init__(self, resolution, paths, poly_paths = None):
Figure.__init__(self, resolution)
self._paths = paths
self._poly_paths = poly_paths
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
"""Move this path by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if shifts are given in resolution units.
"""
for path in self._paths:
path.move_by(dx=dx, dy=dy, unit_mode=unit_mode)
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=True):
# type: (Tuple[ldim, ldim], str, bool, bool) -> PathCollection
"""Transform this figure."""
if copy:
ans = deepcopy(self)
else:
ans = self
for p in ans._paths:
p.transform(loc=loc, orient=orient, unit_mode=unit_mode, copy=False)
return ans
class TLineBus(PathCollection):
"""A transmission line bus drawn using Path.
assumes only 45 degree turns are used, and begin and end line segments are straight.
Parameters
----------
resolution : float
layout unit resolution.
layer : Union[str, Tuple[str, str]]
the bus layer.
points : List[Tuple[Union[float, int], Union[float, int]]]
list of center points of the bus.
widths : List[Union[float, int]]
list of wire widths. 0 index is left/bottom most wire.
spaces : List[Union[float, int]]
list of wire spacings.
end_style : str
the path ends style. Currently support 'truncate', 'extend', and 'round'.
unit_mode : bool
True if width and points are given as resolution units instead of layout units.
"""
def __init__(self, resolution, layer, points, widths, spaces, end_style='truncate',
unit_mode=False):
npoints = len(points)
if npoints < 2:
raise ValueError('Must have >= 2 points.')
if not unit_mode:
points = ((int(round(px / resolution)), int(round(py / resolution)))
for px, py in points)
widths = [int(round(v / resolution / 2.0)) * 2 for v in widths]
spaces = [int(round(v / resolution / 2.0)) * 2 for v in spaces]
points = Path.compress_points(points)
self._points = np.array(points, dtype=int)
self._layer = layer
self._widths = widths
self._spaces = spaces
self._end_style = end_style
tot_width = sum(self._widths) + sum(self._spaces)
delta_list = [(-tot_width + self._widths[0]) // 2]
for w0, w1, sp in zip(self._widths, self._widths[1:], self._spaces):
delta_list.append(delta_list[-1] + sp + ((w0 + w1) // 2))
# print(tot_width)
# print(self._widths)
# print(self._spaces)
# print(delta_list)
paths = self.create_paths(delta_list, resolution)
poly_paths = self.create_poly_paths(delta_list, resolution)
PathCollection.__init__(self, resolution, paths, poly_paths)
def paths_iter(self):
return iter(self._paths)
def poly_paths_iter(self):
return iter(self._poly_paths)
def create_paths(self, delta_list, res):
npoints = len(self._points)
npaths = len(self._widths)
path_points = [[] for _ in range(npaths)]
#print(self._points)
# add first point
p0 = self._points[0, :]
s0 = self._points[1, :] - p0
s0 //= np.amax(np.absolute(s0))
s0_norm = np.linalg.norm(s0)
d0 = np.array([-s0[1], s0[0]])
for path, delta in zip(path_points, delta_list):
tmp = p0 + d0 * int(round(delta / s0_norm))
path.append((tmp[0], tmp[1]))
# add intermediate points
for last_idx in range(2, npoints):
p1 = self._points[last_idx - 1, :]
p0 = self._points[last_idx - 2, :]
s0 = p1 - p0
s1 = self._points[last_idx, :] - p1
s0 //= np.amax(np.absolute(s0))
s1 //= np.amax(np.absolute(s1))
s0_norm = np.linalg.norm(s0)
s1_norm = np.linalg.norm(s1)
dir0 = np.array([-s0[1], s0[0]])
dir1 = np.array([-s1[1], s1[0]])
for path, delta in zip(path_points, delta_list):
d0 = p0 + dir0 * int(round(delta / s0_norm))
d1 = p1 + dir1 * int(round(delta / s1_norm))
a = np.array([[-s1[1], s1[0]],
[s0[1], s0[0]]], dtype=int) // (s0[1] * s1[0] - s0[0] * s1[1])
sol = np.dot(a, d1 - d0)
tmp = sol[0] * s0 + d0
path.append((tmp[0], tmp[1]))
# add last points
p1 = self._points[-1, :]
s0 = p1 - self._points[-2, :]
s0 //= np.amax(np.absolute(s0))
s0_norm = np.linalg.norm(s0)
d0 = np.array([-s0[1], s0[0]])
for path, delta in zip(path_points, delta_list):
tmp = p1 + d0 * int(round(delta / s0_norm))
path.append((tmp[0], tmp[1]))
#print(path_points)
paths = [Path(res, self._layer, w, pp, end_style=self._end_style,
join_style='round', unit_mode=True)
for w, pp in zip(self._widths, path_points)]
return paths
def create_poly_paths(self, delta_list, res):
npoints = len(self._points)
npaths = len(self._widths)
path_points = [[] for _ in range(npaths)]
#print(self._points)
# add first point
p0 = self._points[0, :]
s0 = self._points[1, :] - p0
s0 //= np.amax(np.absolute(s0))
s0_norm = np.linalg.norm(s0)
d0 = np.array([-s0[1], s0[0]])
for path, delta in zip(path_points, delta_list):
tmp = p0 + d0 * int(round(delta / s0_norm))
path.append((tmp[0], tmp[1]))
# add intermediate points
for last_idx in range(2, npoints):
p1 = self._points[last_idx - 1, :]
p0 = self._points[last_idx - 2, :]
s0 = p1 - p0
s1 = self._points[last_idx, :] - p1
s0 //= np.amax(np.absolute(s0))
s1 //= np.amax(np.absolute(s1))
s0_norm = np.linalg.norm(s0)
s1_norm = np.linalg.norm(s1)
dir0 = np.array([-s0[1], s0[0]])
dir1 = np.array([-s1[1], s1[0]])
for path, delta in zip(path_points, delta_list):
d0 = p0 + dir0 * int(round(delta / s0_norm))
d1 = p1 + dir1 * int(round(delta / s1_norm))
a = np.array([[-s1[1], s1[0]],
[s0[1], s0[0]]], dtype=int) // (s0[1] * s1[0] - s0[0] * s1[1])
sol = np.dot(a, d1 - d0)
tmp = sol[0] * s0 + d0
path.append((tmp[0], tmp[1]))
# add last points
p1 = self._points[-1, :]
s0 = p1 - self._points[-2, :]
s0 //= np.amax(np.absolute(s0))
s0_norm = np.linalg.norm(s0)
d0 = np.array([-s0[1], s0[0]])
for path, delta in zip(path_points, delta_list):
tmp = p1 + d0 * int(round(delta / s0_norm))
path.append((tmp[0], tmp[1]))
#print(path_points)
path_polygons_points=[]
for w, pp in zip(self._widths, path_points):
pright = []
pleft = []
for point_index in range(0,len(pp)-1):
p0_x = pp[point_index][0]
p0_y = pp[point_index][1]
p1_x = pp[point_index+1][0]
p1_y = pp[point_index+1][1]
if p0_x == p1_x:
#Vert
if p1_y > p0_y:
#print('up')
p0_x_right = p0_x + w //2
p0_y_right = p0_y
p0_x_left = p0_x - w //2
p0_y_left = p0_y
p1_x_right = p1_x + w //2
p1_y_right = p1_y
p1_x_left = p1_x - w //2
p1_y_left = p1_y
else:
#print('down')
p0_x_right = p0_x - w //2
p0_y_right = p0_y
p0_x_left = p0_x + w //2
p0_y_left = p0_y
p1_x_right = p1_x - w //2
p1_y_right = p1_y
p1_x_left = p1_x + w //2
p1_y_left = p1_y
elif p0_y == p1_y:
#horz
if p1_x > p0_x:
#print('right')
p0_x_right = p0_x
p0_y_right = p0_y - w //2
p0_x_left = p0_x
p0_y_left = p0_y + w //2
p1_x_right = p1_x
p1_y_right = p1_y - w //2
p1_x_left = p1_x
p1_y_left = p1_y + w //2
else:
#print('left')
p0_x_right = p0_x
p0_y_right = p0_y + w //2
p0_x_left = p0_x
p0_y_left = p0_y - w //2
p1_x_right = p1_x
p1_y_right = p1_y + w //2
p1_x_left = p1_x
p1_y_left = p1_y - w //2
else:
if (point_index == 0):
pP_x = None
pP_y = None
else:
pP_x = pp[point_index-1][0]
pP_y = pp[point_index-1][1]
if (point_index == len(pp)-2):
pN_x = None
pN_y = None
else:
pN_x = pp[point_index+2][0]
pN_y = pp[point_index+2][1]
if p1_y > p0_y and p1_x > p0_x:
#print('up and right')
if pP_y is None:
p0_x_right = p0_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_y_right = p0_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_x_left = p0_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_y_left = p0_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
elif pP_y != p0_y:
#from right
p0_x_right = p0_x + w //2
p0_y_right = p0_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_x_left = p0_x - w //2
p0_y_left = p0_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
else:
#from up
p0_x_right = p0_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_y_right = p0_y - w //2
p0_x_left = p0_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_y_left = p0_y + w //2
if pN_y is None:
p1_x_right = p1_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_y_right = p1_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_x_left = p1_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_y_left = p1_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
elif pN_y != p1_y:
#to right
p1_x_right = p1_x + w //2
p1_y_right = p1_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_x_left = p1_x - w //2
p1_y_left = p1_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
else:
#to up
p1_x_right = p1_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_y_right = p1_y - w //2
p1_x_left = p1_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_y_left = p1_y + w //2
elif p1_y < p0_y and p1_x > p0_x:
#print('down and right')
if pP_y is None:
p0_x_right = p0_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_y_right = p0_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_x_left = p0_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_y_left = p0_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
elif pP_y != p0_y:
#from right
p0_x_right = p0_x - w //2
p0_y_right = p0_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_x_left = p0_x + w //2
p0_y_left = p0_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
else:
#from up
p0_x_right = p0_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_y_right = p0_y - w //2
p0_x_left = p0_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_y_left = p0_y + w //2
if pN_y is None:
p1_x_right = p1_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_y_right = p1_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_x_left = p1_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_y_left = p1_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
elif pN_y != p1_y:
#to right
p1_x_right = p1_x - w //2
p1_y_right = p1_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_x_left = p1_x + w //2
p1_y_left = p1_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
else:
#to up
p1_x_right = p1_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_y_right = p1_y - w //2
p1_x_left = p1_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_y_left = p1_y + w //2
elif p1_y < p0_y and p1_x < p0_x:
#print('down and left')
if pP_y is None:
p0_x_right = p0_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_y_right = p0_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_x_left = p0_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_y_left = p0_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
elif pP_y != p0_y:
#from right
p0_x_right = p0_x - w //2
p0_y_right = p0_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_x_left = p0_x + w //2
p0_y_left = p0_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
else:
#from up
p0_x_right = p0_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_y_right = p0_y + w //2
p0_x_left = p0_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_y_left = p0_y - w //2
if pN_y is None:
p1_x_right = p1_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_y_right = p1_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_x_left = p1_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_y_left = p1_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
elif pN_y != p1_y:
#to right
p1_x_right = p1_x - w //2
p1_y_right = p1_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_x_left = p1_x + w //2
p1_y_left = p1_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
else:
#to up
p1_x_right = p1_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_y_right = p1_y + w //2
p1_x_left = p1_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_y_left = p1_y - w //2
elif p1_y > p0_y and p1_x < p0_x:
#print('up and left')
if pP_y is None:
p0_x_right = p0_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_y_right = p0_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_x_left = p0_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p0_y_left = p0_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
elif pP_y != p0_y:
#from right
p0_x_right = p0_x + w //2
p0_y_right = p0_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_x_left = p0_x - w //2
p0_y_left = p0_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
else:
#from up
p0_x_right = p0_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_y_right = p0_y + w //2
p0_x_left = p0_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p0_y_left = p0_y - w //2
if pN_y is None:
p1_x_right = p1_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_y_right = p1_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_x_left = p1_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
p1_y_left = p1_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))
elif pN_y != p1_y:
#to right
p1_x_right = p1_x + w //2
p1_y_right = p1_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_x_left = p1_x - w //2
p1_y_left = p1_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
else:
#to up
p1_x_right = p1_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_y_right = p1_y + w //2
p1_x_left = p1_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))
p1_y_left = p1_y - w //2
else:
raise RuntimeError
# if (p0_x_right%2==1) or (p0_y_right%2==1) or (p0_x_left%2==1) or (p0_y_left%2==1) or \
# (p1_x_right%2==1) or (p1_y_right%2==1) or (p1_x_left%2==1) or (p1_y_left%2==1):
# pdb.set_trace()
pright.append( ( p0_x_right, p0_y_right) )
pleft.append( ( p0_x_left, p0_y_left) )
pright.append( ( p1_x_right, p1_y_right) )
pleft.append( ( p1_x_left , p1_y_left) )
current_path_polygons_points = pright + pleft[::-1]
current_path_polygons_array = np.array(current_path_polygons_points)
current_path_diff = np.diff(np.diff(current_path_polygons_array,axis=0),axis=0)
current_path_diff_x = current_path_diff[:,0]
current_path_diff_x_0_ind = np.where(current_path_diff_x == 0)[0] + 1
current_path_polygons_array = np.delete(current_path_polygons_array, current_path_diff_x_0_ind,axis=0)
current_path_diff = np.diff(np.diff(current_path_polygons_array,axis=0),axis=0)
current_path_diff_y = current_path_diff[:,1]
current_path_diff_y_0_ind = np.where(current_path_diff_y == 0)[0] + 1
current_path_polygons_array = np.delete(current_path_polygons_array, current_path_diff_y_0_ind,axis=0)
current_path_polygons_points = [(np_point[0],np_point[1]) for np_point in current_path_polygons_array.tolist()]
path_polygons_points.append(current_path_polygons_points)
paths = [Polygon(res, self._layer, pp ,unit_mode=True) for pp in path_polygons_points]
return paths
class Polygon(Figure):
"""A layout polygon object.
Parameters
----------
resolution : float
the layout grid resolution.
layer : Union[str, Tuple[str, str]]
the layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
points : List[Tuple[Union[float, int], Union[float, int]]]
the points defining the polygon.
unit_mode : bool
True if the points are given in resolution units.
"""
def __init__(self,
resolution, # type: float
layer, # type: Union[str, Tuple[str, str]]
points, # type: List[Tuple[Union[float, int], Union[float, int]]]
unit_mode=False, # type: bool
):
# type: (...) -> None
Figure.__init__(self, resolution)
layer = io.fix_string(layer)
if isinstance(layer, str):
layer = (layer, 'drawing')
self._layer = layer
if not unit_mode:
self._points = np.array(points) / resolution
self._points = self._points.astype(int)
else:
self._points = np.array(points, dtype=int)
@property
def layer(self):
# type: () -> str
"""The blockage layer."""
return self._layer
@property
def points(self):
return [(self._points[idx][0] * self._res, self._points[idx][1] * self._res)
for idx in range(self._points.shape[0])]
@property
def points_unit(self):
return [(self._points[idx][0], self._points[idx][1])
for idx in range(self._points.shape[0])]
@property
def content(self):
# type: () -> Dict[str, Any]
"""A dictionary representation of this blockage."""
content = dict(layer=self.layer,
points=self.points,
)
return content
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
if not unit_mode:
dx = int(round(dx / self._res))
dy = int(round(dy / self._res))
self._points += np.array([dx, dy])
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Figure
"""Transform this figure."""
res = self.resolution
if unit_mode:
dx, dy = loc
else:
dx = int(round(loc[0] / res))
dy = int(round(loc[1] / res))
dvec = np.array([dx, dy])
mat = transform_table[orient]
new_points = np.dot(mat, self._points.T).T + dvec
if not copy:
ans = self
else:
ans = deepcopy(self)
ans._points = new_points
return ans
class Blockage(Polygon):
"""A blockage object.
Subclass Polygon for code reuse.
Parameters
----------
resolution : float
the layout grid resolution.
block_type : str
the blockage type. Currently supports 'routing' and 'placement'.
block_layer : str
the blockage layer. This value is ignored if blockage type is 'placement'.
points : List[Tuple[Union[float, int], Union[float, int]]]
the points defining the blockage.
unit_mode : bool
True if the points are given in resolution units.
"""
def __init__(self, resolution, block_type, block_layer, points, unit_mode=False):
# type: (float, str, str, List[Tuple[Union[float, int], Union[float, int]]], bool) -> None
Polygon.__init__(self, resolution, block_layer, points, unit_mode=unit_mode)
self._type = block_type
self._block_layer = block_layer
@property
def layer(self):
"""The blockage layer."""
return self._block_layer
@property
def type(self):
# type: () -> str
"""The blockage type."""
return self._type
@property
def content(self):
# type: () -> Dict[str, Any]
"""A dictionary representation of this blockage."""
content = dict(layer=self.layer,
btype=self.type,
points=self.points,
)
return content
class Boundary(Polygon):
"""A boundary object.
Subclass Polygon for code reuse.
Parameters
----------
resolution : float
the layout grid resolution.
boundary_type : str
the boundary type. Currently supports 'PR', 'snap', and 'area'.
points : List[Tuple[Union[float, int], Union[float, int]]]
the points defining the blockage.
unit_mode : bool
True if the points are given in resolution units.
"""
def __init__(self, resolution, boundary_type, points, unit_mode=False):
# type: (float, str, List[Tuple[Union[float, int], Union[float, int]]], bool) -> None
Polygon.__init__(self, resolution, ('', ''), points, unit_mode=unit_mode)
self._type = boundary_type
@property
def type(self):
# type: () -> str
"""The blockage type."""
return self._type
@property
def content(self):
# type: () -> Dict[str, Any]
"""A dictionary representation of this blockage."""
content = dict(btype=self.type,
points=self.points,
)
return content
class ViaInfo(dict):
"""A dictionary that represents a layout via.
"""
param_list = ['id', 'loc', 'orient', 'num_rows', 'num_cols', 'sp_rows', 'sp_cols',
'enc1', 'enc2']
def __init__(self, res, **kwargs):
kv_iter = ((key, kwargs[key]) for key in self.param_list)
dict.__init__(self, kv_iter)
for opt_par in ['cut_width', 'cut_height', 'arr_nx', 'arr_ny', 'arr_spx', 'arr_spy']:
if opt_par in kwargs:
self[opt_par] = kwargs[opt_par]
self._resolution = res
@property
def id(self):
# type: () -> str
return self['id']
@property
def loc(self):
# type: () -> Tuple[float, float]
loc_list = self['loc']
return loc_list[0], loc_list[1]
@property
def orient(self):
# type: () -> str
return self['orient']
@property
def num_rows(self):
# type: () -> int
return self['num_rows']
@property
def num_cols(self):
# type: () -> int
return self['num_cols']
@property
def sp_rows(self):
# type: () -> float
return self['sp_rows']
@property
def sp_cols(self):
# type: () -> float
return self['sp_cols']
@property
def enc1(self):
# type: () -> Tuple[float, float, float, float]
enc_list = self['enc1']
return enc_list[0], enc_list[1], enc_list[2], enc_list[3]
@property
def enc2(self):
# type: () -> Tuple[float, float, float, float]
enc_list = self['enc2']
return enc_list[0], enc_list[1], enc_list[2], enc_list[3]
@property
def cut_width(self):
# type: () -> float
return self.get('cut_width', -1)
@property
def cut_height(self):
# type: () -> float
return self.get('cut_height', -1)
@property
def arr_nx(self):
# type: () -> int
return self.get('arr_nx', 1)
@property
def arr_ny(self):
# type: () -> int
return self.get('arr_ny', 1)
@property
def arr_spx(self):
# type: () -> float
return self.get('arr_spx', 0)
@property
def arr_spy(self):
# type: () -> float
return self.get('arr_spy', 0)
def move_by(self, dx=0, dy=0):
# type: (float, float) -> None
"""Move this instance by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
"""
res = self._resolution
loc = self.loc
self['loc'] = [round((loc[0] + dx) / res) * res,
round((loc[1] + dy) / res) * res]
class Via(Arrayable):
"""A layout via, with optional arraying parameters.
Parameters
----------
tech : ..layout.core.TechInfo
the technology class used to calculate via information.
bbox : ..layout.util.BBox or ..layout.util.BBoxArray
the via bounding box, not including extensions.
If this is a BBoxArray, the BBoxArray's arraying parameters are used.
bot_layer : str or (str, str)
the bottom layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
top_layer : str or (str, str)
the top layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
bot_dir : str
the bottom layer extension direction. Either 'x' or 'y'.
nx : int
arraying parameter. Number of columns.
ny : int
arraying parameter. Mumber of rows.
spx : float
arraying parameter. Column pitch.
spy : float
arraying parameter. Row pitch.
extend : bool
True if via extension can be drawn outside of bounding box.
top_dir : Optional[str]
top layer extension direction. Can force to extend in same direction as bottom.
unit_mode : bool
True if array pitches are given in resolution units.
"""
def __init__(self, tech, bbox, bot_layer, top_layer, bot_dir,
nx=1, ny=1, spx=0, spy=0, extend=True, top_dir=None, unit_mode=False):
if isinstance(bbox, BBoxArray):
self._bbox = bbox.base
Arrayable.__init__(self, tech.resolution, nx=bbox.nx, ny=bbox.ny,
spx=bbox.spx_unit, spy=bbox.spy_unit, unit_mode=True)
else:
self._bbox = bbox
Arrayable.__init__(self, tech.resolution, nx=nx, ny=ny, spx=spx, spy=spy,
unit_mode=unit_mode)
# python 2/3 compatibility: convert raw bytes to string.
bot_layer = io.fix_string(bot_layer)
top_layer = io.fix_string(top_layer)
if isinstance(bot_layer, str):
bot_layer = (bot_layer, 'drawing')
if isinstance(top_layer, str):
top_layer = (top_layer, 'drawing')
self._tech = tech
self._bot_layer = bot_layer[0], bot_layer[1]
self._top_layer = top_layer[0], top_layer[1]
self._bot_dir = bot_dir
self._top_dir = top_dir
self._extend = extend
self._info = self._tech.get_via_info(self._bbox, bot_layer, top_layer, bot_dir,
top_dir=top_dir, extend=extend)
if self._info is None:
raise ValueError('Cannot make via with bounding box %s' % self._bbox)
def _update(self):
"""Update via parameters."""
self._info = self._tech.get_via_info(self.bbox, self.bot_layer, self.top_layer,
self.bottom_direction, top_dir=self.top_direction,
extend=self.extend)
@property
def top_box(self):
# type: () -> BBox
"""the top via layer bounding box."""
return self._info['top_box']
@property
def bottom_box(self):
# type: () -> BBox
"""the bottom via layer bounding box."""
return self._info['bot_box']
@property
def bot_layer(self):
"""The bottom via (layer, purpose) pair."""
return self._bot_layer
@property
def top_layer(self):
"""The top via layer."""
return self._top_layer
@property
def bottom_direction(self):
"""the bottom via extension direction."""
return self._bot_dir
@bottom_direction.setter
def bottom_direction(self, new_bot_dir):
"""Sets the bottom via extension direction."""
self.check_destroyed()
self._bot_dir = new_bot_dir
self._update()
@property
def top_direction(self):
"""the bottom via extension direction."""
if not self._top_dir:
return 'x' if self._bot_dir == 'y' else 'y'
return self._top_dir
@top_direction.setter
def top_direction(self, new_top_dir):
"""Sets the bottom via extension direction."""
self.check_destroyed()
self._top_dir = new_top_dir
self._update()
@property
def extend(self):
"""True if via extension can grow beyond bounding box."""
return self._extend
@extend.setter
def extend(self, new_val):
self._extend = new_val
@property
def bbox(self):
"""The via bounding box not including extensions."""
return self._bbox
@property
def bbox_array(self):
"""The via bounding box array, not including extensions.
Returns
-------
barr : :class:`..layout.util.BBoxArray`
the BBoxArray representing this (Arrayed) rectangle.
"""
return BBoxArray(self._bbox, nx=self.nx, ny=self.ny, spx=self.spx_unit,
spy=self.spy_unit, unit_mode=True)
@bbox.setter
def bbox(self, new_bbox):
"""Sets the via bounding box. Will redraw the via."""
self.check_destroyed()
if not new_bbox.is_physical():
raise ValueError('Bounding box %s is not physical' % new_bbox)
self._bbox = new_bbox
self._update()
@property
def content(self):
"""A dictionary representation of this via."""
via_params = self._info['params']
content = ViaInfo(self._tech.resolution, **via_params)
if self.nx > 1 or self.ny > 1:
content['arr_nx'] = self.nx
content['arr_ny'] = self.ny
content['arr_spx'] = self.spx
content['arr_spy'] = self.spy
return content
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (ldim, ldim, bool) -> None
"""Move this path by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
unit_mode : bool
True if shifts are given in resolution units.
"""
self._bbox = self._bbox.move_by(dx=dx, dy=dy, unit_mode=unit_mode)
self._info['top_box'] = self._info['top_box'].move_by(dx=dx, dy=dy, unit_mode=unit_mode)
self._info['bot_box'] = self._info['bot_box'].move_by(dx=dx, dy=dy, unit_mode=unit_mode)
self._info['params']['loc'] = [self._bbox.xc, self._bbox.yc]
def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):
# type: (Tuple[ldim, ldim], str, bool, bool) -> Figure
"""Transform this figure."""
new_box = self._bbox.transform(loc=loc, orient=orient, unit_mode=unit_mode)
if copy:
return Via(self._tech, new_box, self._bot_layer, self._top_layer, self._bot_dir,
nx=self.nx, ny=self.ny, spx=self.spx_unit, spy=self.spy_unit,
unit_mode=True)
else:
self._bbox = new_box
self._info['top_box'] = self._info['top_box'].transform(loc=loc, orient=orient,
unit_mode=unit_mode)
self._info['bot_box'] = self._info['bot_box'].transform(loc=loc, orient=orient,
unit_mode=unit_mode)
self._info['params']['loc'] = [self._bbox.xc, self._bbox.yc]
class PinInfo(dict):
"""A dictionary that represents a layout pin.
"""
param_list = ['net_name', 'pin_name', 'label', 'layer', 'bbox', 'make_rect']
def __init__(self, res, **kwargs):
kv_iter = ((key, kwargs[key]) for key in self.param_list)
dict.__init__(self, kv_iter)
self._resolution = res
@property
def net_name(self):
# type: () -> str
return self['net_name']
@property
def pin_name(self):
# type: () -> str
return self['pin_name']
@property
def label(self):
# type: () -> str
return self['label']
@property
def layer(self):
# type: () -> Tuple[str, str]
lay_list = self['layer']
return lay_list[0], lay_list[1]
@property
def bbox(self):
# type: () -> BBox
bbox_list = self['bbox']
return BBox(bbox_list[0][0], bbox_list[0][1], bbox_list[1][0], bbox_list[1][1],
self._resolution)
@property
def make_rect(self):
# type: () -> bool
return self['make_rect']
def move_by(self, dx=0, dy=0):
# type: (float, float) -> None
"""Move this instance by the given amount.
Parameters
----------
dx : float
the X shift.
dy : float
the Y shift.
"""
new_box = self.bbox.move_by(dx=dx, dy=dy)
self['bbox'] = [[new_box.left, new_box.bottom], [new_box.right, new_box.top]]
================================================
FILE: bag/layout/routing/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/layout/routing/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package provide routing classes.
"""
from .base import TrackID, WireArray, Port, TrackManager
from .grid import RoutingGrid
from .fill import UsedTracks
================================================
FILE: bag/layout/routing/base.py
================================================
# -*- coding: utf-8 -*-
"""This module provides basic routing classes.
"""
from typing import Tuple, Union, Generator, Dict, List, Sequence
import numbers
from ...util.search import BinaryIterator
from ..util import BBox, BBoxArray
from .grid import RoutingGrid
class TrackID(object):
"""A class that represents locations of track(s) on the routing grid.
Parameters
----------
layer_id : int
the layer ID.
track_idx : Union[float, int]
the smallest middle track index in the array. Multiples of 0.5
width : int
width of one track in number of tracks.
num : int
number of tracks in this array.
pitch : Union[float, int]
pitch between adjacent tracks, in number of track pitches.
"""
def __init__(self, layer_id, track_idx, width=1, num=1, pitch=0.0):
# type: (int, Union[float, int], int, int, Union[float, int]) -> None
if num < 1:
raise ValueError('TrackID must have 1 or more tracks.')
self._layer_id = layer_id
self._hidx = int(round(2 * track_idx)) + 1
self._w = width
self._n = num
self._hpitch = 0 if num == 1 else int(pitch * 2)
def __repr__(self):
arg_list = ['layer=%d' % self._layer_id]
if self._hidx % 2 == 1:
arg_list.append('track=%d' % ((self._hidx - 1) // 2))
else:
arg_list.append('track=%.1f' % ((self._hidx - 1) / 2))
if self._w != 1:
arg_list.append('width=%d' % self._w)
if self._n != 1:
arg_list.append('num=%d' % self._n)
if self._hpitch % 2 == 0:
arg_list.append('pitch=%d' % (self._hpitch // 2))
else:
arg_list.append('pitch=%.1f' % (self._hpitch / 2))
return '%s(%s)' % (self.__class__.__name__, ', '.join(arg_list))
def __str__(self):
return repr(self)
@property
def layer_id(self):
# type: () -> int
return self._layer_id
@property
def width(self):
# type: () -> int
return self._w
@property
def base_index(self):
# type: () -> Union[float, int]
if self._hidx % 2 == 1:
return (self._hidx - 1) // 2
return (self._hidx - 1) / 2
@property
def index_htr(self):
# type: () -> int
return self._hidx
@property
def num(self):
# type: () -> int
return self._n
@property
def pitch(self):
# type: () -> Union[float, int]
if self._hpitch % 2 == 0:
return self._hpitch // 2
return self._hpitch / 2
@property
def pitch_htr(self):
# type: () -> int
return self._hpitch
def get_immutable_key(self):
return self.__class__.__name__, self._layer_id, self._hidx, self._w, self._n, self._hpitch
def get_bounds(self, grid, unit_mode=False):
# type: (RoutingGrid, bool) -> Tuple[Union[float, int], Union[float, int]]
"""Calculate the track bounds coordinate.
Parameters
----------
grid : RoutingGrid
the RoutingGrid object.
unit_mode : bool
True to return coordinates in resolution units.
Returns
-------
lower : Union[float, int]
the lower bound coordinate perpendicular to track direction.
upper : Union[float, int]
the upper bound coordinate perpendicular to track direction.
"""
lower, upper = grid.get_wire_bounds(self.layer_id, self.base_index,
width=self.width, unit_mode=True)
pitch_dim = (self._hpitch * grid.get_track_pitch(self._layer_id, unit_mode=True)) // 2
upper += (self.num - 1) * pitch_dim
if unit_mode:
return lower, upper
else:
res = grid.resolution
return lower * res, upper * res
def __iter__(self):
# type: () -> Generator[Union[float, int]]
"""Iterate over all middle track indices in this TrackID."""
for idx in range(self._n):
num = self._hidx + idx * self._hpitch
if num % 2 == 1:
yield (num - 1) // 2
else:
yield (num - 1) / 2
def sub_tracks_iter(self, grid):
# type: (RoutingGrid) -> Generator[TrackID]
"""Iterate through sub-TrackIDs where every track in sub-TrackID has the same layer name.
This method is used to deal with double patterning layer. If this TrackID is not
on a double patterning layer, it simply yields itself.
Parameters
----------
grid : RoutingGrid
the RoutingGrid object.
Yields
------
sub_id : TrackID
a TrackID where all tracks has the same layer name.
"""
layer_id = self._layer_id
layer_names = grid.tech_info.get_layer_name(layer_id)
if isinstance(layer_names, tuple):
den = 2 * len(layer_names)
if self._hpitch % den == 0:
# layer name will never change
yield self
else:
# TODO: have more robust solution than just yielding tracks one by one?
for tr_idx in self:
yield TrackID(layer_id, tr_idx, width=self.width)
else:
yield self
def transform(self, grid, loc=(0, 0), orient="R0", unit_mode=False):
# type: (RoutingGrid, Tuple[Union[float, int], Union[float, int]], str, bool) -> TrackID
"""returns a transformation of this TrackID."""
layer_id = self._layer_id
is_x = grid.get_direction(layer_id) == 'x'
if orient == 'R0':
base_hidx = self._hidx
elif orient == 'MX':
if is_x:
base_hidx = -self._hidx - (self._n - 1) * self._hpitch
else:
base_hidx = self._hidx
elif orient == 'MY':
if is_x:
base_hidx = self._hidx
else:
base_hidx = -self._hidx - (self._n - 1) * self._hpitch
elif orient == 'R180':
base_hidx = -self._hidx - (self._n - 1) * self._hpitch
else:
raise ValueError('Unsupported orientation: %s' % orient)
delta = loc[1] if is_x else loc[0]
delta = grid.coord_to_track(layer_id, delta, unit_mode=unit_mode) + 0.5
return TrackID(layer_id, (base_hidx - 1) / 2 + delta, width=self._w,
num=self._n, pitch=self.pitch)
class WireArray(object):
"""An array of wires on the routing grid.
Parameters
----------
track_id : :class:`bag.layout.routing.TrackID`
TrackArray representing the track locations of this wire array.
lower : Union[float, int]
the lower coordinate along the track direction.
upper : Union[float, int]
the upper coordinate along the track direction.
res : Optional[float]
the resolution unit.
unit_mode : bool
True if lower/upper are specified in resolution units.
"""
def __init__(self, track_id, lower, upper, res=None, unit_mode=False):
# type: (TrackID, Union[float, int], Union[float, int], Optional[float], bool) -> None
if res is None:
raise ValueError('Please specify the layout distance resolution.')
self._track_id = track_id
self._res = res
if unit_mode:
self._lower_unit = int(lower) # type: int
self._upper_unit = int(upper) # type: int
else:
self._lower_unit = int(round(lower / res))
self._upper_unit = int(round(upper / res))
def __repr__(self):
return '%s(%s, %.d, %.d, %.4g)' % (self.__class__.__name__, self._track_id,
self._lower_unit, self._upper_unit, self._res)
def __str__(self):
return repr(self)
@property
def resolution(self):
return self._res
@property
def lower(self):
return self._lower_unit * self._res
@property
def upper(self):
return self._upper_unit * self._res
@property
def middle(self):
return (self._lower_unit + self._upper_unit) // 2 * self._res
@property
def lower_unit(self):
return self._lower_unit
@property
def upper_unit(self):
return self._upper_unit
@property
def middle_unit(self):
return (self._lower_unit + self._upper_unit) // 2
@property
def track_id(self):
# type: () -> TrackID
"""Returns the TrackID of this WireArray."""
return self._track_id
@property
def layer_id(self):
# type: () -> int
"""Returns the layer ID of this WireArray."""
return self.track_id.layer_id
@property
def width(self):
return self.track_id.width
@classmethod
def list_to_warr(cls, warr_list):
# type: (List[WireArray]) -> WireArray
"""Convert a list of WireArrays to a single WireArray.
this method assumes all WireArrays have the same layer, width, and lower/upper coordinates.
Overlapping WireArrays will be compacted.
"""
if len(warr_list) == 1:
return warr_list[0]
tid0 = warr_list[0].track_id
layer = tid0.layer_id
width = tid0.width
res = warr_list[0].resolution
lower, upper = warr_list[0].lower_unit, warr_list[0].upper_unit
tid_list = sorted(set((int(idx * 2) for warr in warr_list for idx in warr.track_id)))
base_idx2 = tid_list[0]
base_idx = base_idx2 // 2 if base_idx2 % 2 == 0 else base_idx2 / 2
if len(tid_list) < 2:
return WireArray(TrackID(layer, base_idx, width=width), lower, upper,
res=res, unit_mode=True)
diff = tid_list[1] - tid_list[0]
for idx in range(1, len(tid_list) - 1):
if tid_list[idx + 1] - tid_list[idx] != diff:
raise ValueError('pitch mismatch.')
pitch = diff // 2 if diff % 2 == 0 else diff / 2
return WireArray(TrackID(layer, base_idx, width=width, num=len(tid_list), pitch=pitch),
lower, upper, res=res, unit_mode=True)
@classmethod
def single_warr_iter(cls, warr):
if isinstance(warr, WireArray):
yield from warr.warr_iter()
else:
for w in warr:
yield from w.warr_iter()
def get_immutable_key(self):
return (self.__class__.__name__, self._track_id.get_immutable_key(), self._lower_unit,
self._upper_unit, self._res)
def to_warr_list(self):
return list(self.warr_iter())
def warr_iter(self):
tid = self._track_id
layer = tid.layer_id
width = tid.width
for tr in tid:
yield WireArray(TrackID(layer, tr, width=width), self._lower_unit,
self._upper_unit, res=self._res, unit_mode=True)
def get_bbox_array(self, grid):
# type: ('RoutingGrid') -> BBoxArray
"""Returns the BBoxArray representing this WireArray.
Parameters
----------
grid : RoutingGrid
the RoutingGrid of this WireArray.
Returns
-------
bbox_arr : BBoxArray
the BBoxArray of the wires.
"""
track_id = self.track_id
tr_w = track_id.width
layer_id = track_id.layer_id
base_idx = track_id.base_index
num = track_id.num
base_box = grid.get_bbox(layer_id, base_idx, self._lower_unit, self._upper_unit,
width=tr_w, unit_mode=True)
tot_pitch = (track_id.pitch_htr * grid.get_track_pitch(layer_id, unit_mode=True)) // 2
if grid.get_direction(layer_id) == 'x':
return BBoxArray(base_box, ny=num, spy=tot_pitch, unit_mode=True)
else:
return BBoxArray(base_box, nx=num, spx=tot_pitch, unit_mode=True)
def wire_iter(self, grid):
"""Iterate over all wires in this WireArray as layer/BBox pair.
Parameters
----------
grid : :class:`bag.layout.routing.RoutingGrid`
the RoutingGrid of this WireArray.
Yields
------
layer : string
the wire layer name.
bbox : :class:`bag.layout.util.BBox`
the wire bounding box.
"""
tr_w = self.track_id.width
layer_id = self.layer_id
for tr_idx in self.track_id:
layer_name = grid.get_layer_name(layer_id, tr_idx)
bbox = grid.get_bbox(layer_id, tr_idx, self._lower_unit, self._upper_unit,
width=tr_w, unit_mode=True)
yield layer_name, bbox
def wire_arr_iter(self, grid):
"""Iterate over all wires in this WireArray as layer/BBoxArray pair.
This method group all rectangles in the same layer together.
Parameters
----------
grid : :class:`bag.layout.routing.RoutingGrid`
the RoutingGrid of this WireArray.
Yields
------
layer : string
the wire layer name.
bbox : :class:`bag.layout.util.BBoxArray`
the wire bounding boxes.
"""
res = self._res
tid = self.track_id
layer_id = tid.layer_id
tr_width = tid.width
track_pitch = grid.get_track_pitch(layer_id, unit_mode=True)
is_x = grid.get_direction(layer_id) == 'x'
for track_idx in tid.sub_tracks_iter(grid):
base_idx = track_idx.base_index
cur_layer = grid.get_layer_name(layer_id, base_idx)
cur_num = track_idx.num
wire_pitch = (track_idx.pitch_htr * track_pitch) // 2
tl, tu = grid.get_wire_bounds(layer_id, base_idx, width=tr_width, unit_mode=True)
if is_x:
base_box = BBox(self._lower_unit, tl, self._upper_unit, tu, res, unit_mode=True)
box_arr = BBoxArray(base_box, ny=cur_num, spy=wire_pitch, unit_mode=True)
else:
base_box = BBox(tl, self._lower_unit, tu, self._upper_unit, res, unit_mode=True)
box_arr = BBoxArray(base_box, nx=cur_num, spx=wire_pitch, unit_mode=True)
yield cur_layer, box_arr
def transform(self, grid, loc=(0, 0), orient='R0', unit_mode=False):
"""Return a new transformed WireArray.
Parameters
----------
grid : :class:`bag.layout.routing.RoutingGrid`
the RoutingGrid of this WireArray.
loc : Tuple[Union[float, int], Union[float, int]]
the X/Y coordinate shift.
orient : str
the new orientation.
unit_mode : bool
True if location is given in unit mode.
"""
res = self._res
if not unit_mode:
loc = int(round(loc[0] / res)), int(round(loc[1] / res))
layer_id = self.layer_id
is_x = grid.get_direction(layer_id) == 'x'
if orient == 'R0':
lower, upper = self._lower_unit, self._upper_unit
elif orient == 'MX':
if is_x:
lower, upper = self._lower_unit, self._upper_unit
else:
lower, upper = -self._upper_unit, -self._lower_unit
elif orient == 'MY':
if is_x:
lower, upper = -self._upper_unit, -self._lower_unit
else:
lower, upper = self._lower_unit, self._upper_unit
elif orient == 'R180':
lower, upper = -self._upper_unit, -self._lower_unit
else:
raise ValueError('Unsupported orientation: %s' % orient)
delta = loc[0] if is_x else loc[1]
return WireArray(self.track_id.transform(grid, loc=loc, orient=orient, unit_mode=True),
lower + delta, upper + delta, res=res, unit_mode=True)
class Port(object):
"""A layout port.
a port is a group of pins that represent the same net.
The pins can be on different layers.
Parameters
----------
term_name : str
the terminal name of the port.
pin_dict : dict[int, list[bag.layout.routing.WireArray]]
a dictionary from layer ID to pin geometries on that layer.
"""
def __init__(self, term_name, pin_dict, label=''):
self._term_name = term_name
self._pin_dict = pin_dict
self._label = label or term_name
def __iter__(self):
"""Iterate through all pin geometries in this port.
the iteration order is not guaranteed.
"""
for geo_list in self._pin_dict.values():
yield from geo_list
def get_single_layer(self):
# type: () -> Union[int, str]
"""Returns the layer of this port if it only has a single layer."""
if len(self._pin_dict) > 1:
raise ValueError('This port has more than one layer.')
return next(iter(self._pin_dict))
def _get_layer(self, layer):
"""Get the layer number."""
if isinstance(layer, numbers.Integral):
return self.get_single_layer() if layer < 0 else layer
else:
return self.get_single_layer() if not layer else layer
@property
def net_name(self):
"""Returns the net name of this port."""
return self._term_name
@property
def label(self):
"""Returns the label of this port."""
return self._label
def get_pins(self, layer=-1):
"""Returns the pin geometries on the given layer.
Parameters
----------
layer : int
the layer ID. If Negative, check if this port is on a single layer,
then return the result.
Returns
-------
track_bus_list : Union[WireArray, BBox]
pins on the given layer representing as WireArrays.
"""
layer = self._get_layer(layer)
return self._pin_dict.get(layer, [])
def get_bounding_box(self, grid, layer=-1):
"""Calculate the overall bounding box of this port on the given layer.
Parameters
----------
grid : :class:`~bag.layout.routing.RoutingGrid`
the RoutingGrid of this Port.
layer : int
the layer ID. If Negative, check if this port is on a single layer,
then return the result.
Returns
-------
bbox : BBox
the bounding box.
"""
layer = self._get_layer(layer)
box = BBox.get_invalid_bbox()
for geo in self._pin_dict[layer]:
if isinstance(geo, BBox):
box = box.merge(geo)
else:
box = box.merge(geo.get_bbox_array(grid).get_overall_bbox())
return box
def transform(self, grid, loc=(0, 0), orient='R0', unit_mode=False):
# type: (RoutingGrid, Tuple[Union[float, int], Union[float, int]], str, bool) -> Port
"""Return a new transformed Port.
Parameters
----------
grid : RoutingGrid
the RoutingGrid of this Port.
loc : Tuple[Union[float, int], Union[float, int]]
the X/Y coordinate shift.
orient : str
the new orientation.
unit_mode: bool
True if location is in resolution units.
"""
if not unit_mode:
res = grid.resolution
loc = (int(round(loc[0] / res)), int(round(loc[1] / res)))
new_pin_dict = {}
for lay, geo_list in self._pin_dict.items():
new_geo_list = []
for geo in geo_list:
if isinstance(geo, BBox):
new_geo_list.append(geo.transform(loc=loc, orient=orient, unit_mode=True))
else:
new_geo_list.append(geo.transform(grid, loc=loc, orient=orient, unit_mode=True))
new_pin_dict[lay] = new_geo_list
return Port(self._term_name, new_pin_dict, label=self._label)
class TrackManager(object):
"""A class that makes it easy to compute track locations.
This class provides many helper methods for computing track locations and spacing when
each track could have variable width. All methods in this class accepts a "track_type",
which is either a string in the track dictionary or an integer representing the track
width.
Parameters
----------
grid : RoutingGrid
the RoutingGrid object.
tr_widths : Dict[str, Dict[int, int]]
dictionary from wire types to its width on each layer.
tr_spaces : Dict[Union[str, Tuple[str, str]], Dict[int, Union[float, int]]]
dictionary from wire types to its spaces on each layer.
**kwargs :
additional options.
"""
def __init__(self,
grid, # type: RoutingGrid
tr_widths, # type: Dict[str, Dict[int, int]]
tr_spaces, # type: Dict[Union[str, Tuple[str, str]], Dict[int, Union[float, int]]]
**kwargs
):
# type: (...) -> None
half_space = kwargs.get('half_space', False)
self._grid = grid
self._tr_widths = tr_widths
self._tr_spaces = tr_spaces
self._half_space = half_space
@property
def grid(self):
# type: () -> RoutingGrid
return self._grid
@property
def half_space(self):
# type: () -> bool
return self._half_space
def get_width(self, layer_id, track_type):
# type: (int, Union[str, int]) -> int
"""Returns the track width.
Parameters
----------
layer_id : int
the track layer ID.
track_type : Union[str, int]
the track type.
"""
if isinstance(track_type, int):
return track_type
if track_type not in self._tr_widths:
return 1
return self._tr_widths[track_type].get(layer_id, 1)
def get_space(self, # type: TrackManager
layer_id, # type: int
type_tuple, # type: Union[str, int, Tuple[Union[str, int], Union[str, int]]]
**kwargs):
# type: (...) -> Union[int, float]
"""Returns the track spacing.
Parameters
----------
layer_id : int
the track layer ID.
type_tuple : Union[str, int, Tuple[Union[str, int], Union[str, int]]]
If a single track type is given, will return the minimum spacing needed around that
track type. If a tuple of two types are given, will return the specific spacing
between those two track types if specified. Otherwise, returns the maximum of all the
valid spacing.
**kwargs:
optional parameters.
"""
half_space = kwargs.get('half_space', self._half_space)
sp_override = kwargs.get('sp_override', None)
if isinstance(type_tuple, tuple):
# if two specific wires are given, first check if any specific rules exist
ans = self._get_space_from_tuple(layer_id, type_tuple, sp_override)
if ans is not None:
return ans
ans = self._get_space_from_tuple(layer_id, type_tuple, self._tr_spaces)
if ans is not None:
return ans
# no specific rules, so return max of wire spacings.
ans = 0
for wtype in type_tuple:
cur_space = self._get_space_from_type(layer_id, wtype, sp_override)
if cur_space is None:
cur_space = self._get_space_from_type(layer_id, wtype, self._tr_spaces)
if cur_space is None:
cur_space = 0
cur_width = self.get_width(layer_id, wtype)
ans = max(ans, cur_space, self._grid.get_num_space_tracks(layer_id, cur_width,
half_space=half_space))
return ans
else:
cur_space = self._get_space_from_type(layer_id, type_tuple, sp_override)
if cur_space is None:
cur_space = self._get_space_from_type(layer_id, type_tuple, self._tr_spaces)
if cur_space is None:
cur_space = 0
cur_width = self.get_width(layer_id, type_tuple)
return max(cur_space, self._grid.get_num_space_tracks(layer_id, cur_width,
half_space=half_space))
@classmethod
def _get_space_from_tuple(cls, layer_id, ntup, sp_dict):
if sp_dict is not None:
if ntup in sp_dict:
return sp_dict[ntup].get(layer_id, None)
ntup = (ntup[1], ntup[0])
if ntup in sp_dict:
return sp_dict[ntup].get(layer_id, None)
return None
@classmethod
def _get_space_from_type(cls, layer_id, wtype, sp_dict):
if sp_dict is None:
return None
if wtype in sp_dict:
test = sp_dict[wtype]
else:
key = (wtype, '')
if key in sp_dict:
test = sp_dict[key]
else:
key = ('', wtype)
if key in sp_dict:
test = sp_dict[key]
else:
test = None
if test is None:
return None
return test.get(layer_id, None)
def get_next_track(self, # type: TrackManager
layer_id, # type: int
cur_idx, # type: Union[float, int]
cur_type, # type: Union[str, int]
next_type, # type: Union[str, int]
up=True, # type: bool
**kwargs):
# type: (...) -> Union[float, int]
"""Compute the track location of a wire next to a given one.
Parameters
----------
layer_id : int
the layer ID.
cur_idx : Union[float, int]
the current wire track index.
cur_type : Union[str, int]
the current wire type.
next_type : Union[str, int]
the next wire type.
up : bool
True to return the next track index that is larger than cur_idx.
**kwargs :
optional parameters.
Returns
-------
next_int : Union[float, int]
the next track index.
"""
cur_width = self.get_width(layer_id, cur_type)
next_width = self.get_width(layer_id, next_type)
space = self.get_space(layer_id, (cur_type, next_type), **kwargs)
if up:
par_test = int(round(2 * cur_idx + 2 * space + cur_width + next_width))
else:
par_test = int(round(2 * cur_idx - 2 * space - cur_width - next_width))
return par_test // 2 if par_test % 2 == 0 else par_test / 2
def place_wires(self, # type: TrackManager
layer_id, # type: int
type_list, # type: Sequence[Union[str, int]]
start_idx=0, # type: Union[float, int]
**kwargs):
# type: (...) -> Tuple[Union[float, int], List[Union[float, int]]]
"""Place the given wires next to each other.
Parameters
----------
layer_id : int
the layer of the tracks.
type_list : Sequence[Union[str, int]]
list of wire types.
start_idx : Union[float, int]
the starting track index.
**kwargs:
optional parameters for get_num_space_tracks() method of RoutingGrid.
Returns
-------
num_tracks : Union[float, int]
number of tracks used.
locations : List[Union[float, int]]
the center track index of each wire.
"""
if not type_list:
return 0, []
prev_type = type_list[0]
w0 = self.get_width(layer_id, prev_type)
par_test = int(round(2 * start_idx + w0 - 1))
mid_idx = par_test // 2 if par_test % 2 == 0 else par_test / 2
ans = [mid_idx]
for idx in range(1, len(type_list)):
ans.append(self.get_next_track(layer_id, ans[-1], type_list[idx - 1],
type_list[idx], up=True, **kwargs))
w1 = self.get_width(layer_id, type_list[-1])
par_test = int(round(w0 + w1 + 2 * (ans[-1] - ans[0])))
ntr = par_test // 2 if par_test % 2 == 0 else par_test / 2
return ntr, ans
@classmethod
def _get_align_delta(cls, tot_ntr, num_used, alignment):
if alignment == -1 or num_used == tot_ntr:
# we already aligned to left
return 0
elif alignment == 0:
# center tracks
delta_htr = int((tot_ntr - num_used) * 2) // 2
return delta_htr / 2 if delta_htr % 2 == 1 else delta_htr // 2
elif alignment == 1:
# align to right
return tot_ntr - num_used
else:
raise ValueError('Unknown alignment code: %d' % alignment)
def align_wires(self, # type: TrackManager
layer_id, # type: int
type_list, # type: Sequence[Union[str, int]]
tot_ntr, # type: Union[float, int]
alignment=0, # type: int
start_idx=0, # type: Union[float, int]
**kwargs):
# type: (...) -> List[Union[float, int]]
"""Place the given wires in the given space with the specified alignment.
Parameters
----------
layer_id : int
the layer of the tracks.
type_list : Sequence[Union[str, int]]
list of wire types.
tot_ntr : Union[float, int]
total available space in number of tracks.
alignment : int
If alignment == -1, will "left adjust" the wires (left is the lower index direction).
If alignment == 0, will center the wires in the middle.
If alignment == 1, will "right adjust" the wires.
start_idx : Union[float, int]
the starting track index.
**kwargs:
optional parameters for place_wires().
Returns
-------
locations : List[Union[float, int]]
the center track index of each wire.
"""
num_used, idx_list = self.place_wires(layer_id, type_list, start_idx=start_idx, **kwargs)
if num_used > tot_ntr:
raise ValueError('Given tracks occupy more space than given.')
delta = self._get_align_delta(tot_ntr, num_used, alignment)
return [idx + delta for idx in idx_list]
def spread_wires(self, # type: TrackManager
layer_id, # type: int
type_list, # type: Sequence[Union[str, int]]
tot_ntr, # type: Union[float, int]
sp_type, # type: Union[str, int, Tuple[Union[str, int], Union[str, int]]]
alignment=0, # type: int
start_idx=0, # type: Union[float, int]
max_sp=10000, # type: int
sp_override=None,
):
# type: (...) -> List[Union[float, int]]
"""Spread out the given wires in the given space.
This method tries to spread out wires by increasing the space around the given
wire/combination of wires.
Parameters
----------
layer_id : int
the layer of the tracks.
type_list : Sequence[Union[str, int]]
list of wire types.
tot_ntr : Union[float, int]
total available space in number of tracks.
sp_type : Union[str, Tuple[str, str]]
The space to increase.
alignment : int
If alignment == -1, will "left adjust" the wires (left is the lower index direction).
If alignment == 0, will center the wires in the middle.
If alignment == 1, will "right adjust" the wires.
start_idx : Union[float, int]
the starting track index.
max_sp : int
maximum space.
sp_override :
tracking spacing override dictionary.
Returns
-------
locations : List[Union[float, int]]
the center track index of each wire.
"""
if not sp_override:
sp_override = {sp_type: {layer_id: 0}}
else:
sp_override = sp_override.copy()
sp_override[sp_type] = {layer_id: 0}
cur_sp = int(round(2 * self.get_space(layer_id, sp_type)))
bin_iter = BinaryIterator(cur_sp, None)
while bin_iter.has_next():
new_sp = bin_iter.get_next()
if new_sp > 2 * max_sp:
break
sp_override[sp_type][layer_id] = new_sp / 2 if new_sp % 2 == 1 else new_sp // 2
tmp = self.place_wires(layer_id, type_list, start_idx=start_idx,
sp_override=sp_override)
if tmp[0] > tot_ntr:
bin_iter.down()
else:
bin_iter.save_info(tmp)
bin_iter.up()
if bin_iter.get_last_save_info() is None:
raise ValueError('No solution found.')
num_used, idx_list = bin_iter.get_last_save_info()
delta = self._get_align_delta(tot_ntr, num_used, alignment)
return [idx + delta for idx in idx_list]
================================================
FILE: bag/layout/routing/fill.py
================================================
# -*- coding: utf-8 -*-
"""This module defines classes that provides automatic fill utility on a grid.
"""
from typing import TYPE_CHECKING, Optional, Union, List, Tuple, Any, Generator
from rtree.index import Index, Property
from ...layout.util import BBox
from ...util.search import BinaryIterator, minimize_cost_golden
if TYPE_CHECKING:
from ...layout.util import BBoxArray
from .grid import RoutingGrid
class RectIndex(object):
"""A R-tree that stores all tracks on a layer."""
def __init__(self, resolution, basename=None, overwrite=False):
# type: (float, Optional[str], bool) -> None
self._res = resolution
self._cnt = 0
if basename is None:
self._index = Index(interleaved=True)
else:
p = Property(overwrite=overwrite)
self._index = Index(basename, interleaved=True, properties=p)
@property
def bound_box(self):
# type: () -> BBox
xl, yb, xr, yt = self._index.bounds
return BBox(int(xl), int(yb), int(xr), int(yt), self._res, unit_mode=True)
def close(self):
self._index.close()
def record_box(self, box, dx, dy):
# type: (BBox, int, int) -> None
"""Record the given BBox."""
sp_box = box.expand(dx=dx, dy=dy, unit_mode=True)
bnds = sp_box.get_bounds(unit_mode=True)
obj = (box.left_unit, box.bottom_unit, box.right_unit, box.top_unit, dx, dy)
self._index.insert(self._cnt, bnds, obj=obj)
self._cnt += 1
def rect_iter(self):
# type: () -> Generator[Tuple[BBox, int, int], None, None]
for xl, yb, xr, yt, sdx, sdy in self._index.intersection(self._index.bounds, objects='raw'):
box_real = BBox(xl, yb, xr, yt, self._res, unit_mode=True)
yield box_real, sdx, sdy
def intersection_iter(self, box, dx=0, dy=0):
# type: (BBox, int, int) -> Generator[BBox, None, None]
"""Finds all bounding box that intersects the given box."""
res = self._res
test_box = box.expand(dx=dx, dy=dy, unit_mode=True)
box_iter = self._index.intersection(test_box.get_bounds(unit_mode=True), objects='raw')
for xl, yb, xr, yt, sdx, sdy in box_iter:
box_real = BBox(xl, yb, xr, yt, res, unit_mode=True)
box_sp = box_real.expand(dx=sdx, dy=sdy, unit_mode=True)
if box_sp.overlaps(box) or test_box.overlaps(box_real):
yield box_real.expand(dx=max(dx, sdx), dy=max(dy, sdy), unit_mode=True)
def intersection_rect_iter(self, box):
# type: (BBox) -> Generator[BBox, None, None]
"""Finds all bounding box that intersects the given box."""
res = self._res
box_iter = self._index.intersection(box.get_bounds(unit_mode=True), objects='raw')
for xl, yb, xr, yt, sdx, sdy in box_iter:
yield BBox(xl, yb, xr, yt, res, unit_mode=True)
class UsedTracks(object):
"""A R-tree that stores all tracks in a template.
"""
def __init__(self, save_file_basename=None, overwrite=False):
# type: (Optional[str], bool) -> None
self._idx_table = {}
self._save_file_basename = save_file_basename
self._overwrite = overwrite
def __iter__(self):
return self._idx_table.keys()
def get_track_bbox(self, layer_id):
# type: (int) -> BBox
if layer_id not in self._idx_table:
return BBox.get_invalid_bbox()
return self._idx_table[layer_id].bound_box
def track_box_iter(self):
# type: () -> Generator[Tuple[int, BBox], None, None]
for layer_id, rect_idx in self._idx_table.items():
yield layer_id, rect_idx.bound_box
def record_box(self, layer_id, box, dx, dy, res):
# type: (int, BBox, int, int, float) -> None
if layer_id not in self._idx_table:
if self._save_file_basename is None:
basename = None
else:
basename = self._save_file_basename + ('_%d' % layer_id)
index = self._idx_table[layer_id] = RectIndex(res, basename, self._overwrite)
else:
index = self._idx_table[layer_id]
index.record_box(box, dx, dy)
def close(self):
for index in self._idx_table.values():
index.close()
def record_rect(self, grid, layer_name, box_arr, dx=-1, dy=-1):
# type: (RoutingGrid, Union[Tuple[str, str], str], BBoxArray, int, int) -> Optional[int]
"""Record the given bounding box array. Returns the added layer ID."""
tech_info = grid.tech_info
if isinstance(layer_name, tuple):
# TODO: find more process-portable fix?
if layer_name[1] == 'exclude':
return None
layer_name = layer_name[0]
try:
layer_id = tech_info.get_layer_id(layer_name)
except ValueError:
return None
if layer_id not in grid:
return None
if layer_id not in self._idx_table:
if self._save_file_basename is None:
basename = None
else:
basename = self._save_file_basename + ('_%d' % layer_id)
index = self._idx_table[layer_id] = RectIndex(grid.resolution, basename,
self._overwrite)
else:
index = self._idx_table[layer_id]
layer_type = tech_info.get_layer_type(layer_name)
if grid.get_direction(layer_id) == 'x':
w = box_arr.base.height_unit
dx0 = tech_info.get_min_line_end_space(layer_type, w, unit_mode=True)
dy0 = tech_info.get_min_space(layer_type, w, unit_mode=True, same_color=False)
else:
w = box_arr.base.width_unit
dy0 = tech_info.get_min_line_end_space(layer_type, w, unit_mode=True)
dx0 = tech_info.get_min_space(layer_type, w, unit_mode=True, same_color=False)
if dx < 0:
dx = dx0
if dy < 0:
dy = dy0
for box in box_arr:
index.record_box(box, dx, dy)
return layer_id
def all_rect_iter(self):
# type: () -> Generator[Tuple[int, BBox, int, int], None, None]
for layer_id, index in self._idx_table.items():
for box, dx, dy, in index.rect_iter():
yield layer_id, box, dx, dy
def intersection_rect_iter(self, layer_id, box):
# type: (int, BBox) -> Generator[BBox, None, None]
"""Finds all bounding box that intersects the given box."""
if layer_id in self._idx_table:
yield from self._idx_table[layer_id].intersection_rect_iter(box)
def blockage_iter(self, layer_id, test_box, spx=0, spy=0):
# type: (int, BBox, int, int) -> Generator[BBox, None, None]
if layer_id in self._idx_table:
yield from self._idx_table[layer_id].intersection_iter(test_box, dx=spx, dy=spy)
def fill_symmetric_const_space(area, sp_max, n_min, n_max, offset=0):
# type: (int, int, int, int, int) -> List[Tuple[int, int]]
"""Fill the given 1-D area given maximum space spec alone.
The method draws the minimum number of fill blocks needed to satisfy maximum spacing spec.
The given area is filled with the following properties:
1. all spaces are as close to the given space as possible (differ by at most 1),
without exceeding it.
2. the filled area is as uniform as possible.
3. the filled area is symmetric about the center.
4. fill is drawn as much as possible given the above constraints.
fill is drawn such that space blocks abuts both area boundaries.
Parameters
----------
area : int
the 1-D area to fill.
sp_max : int
the maximum space.
n_min : int
minimum fill length.
n_max : int
maximum fill length
offset : int
the fill area starting coordinate.
Returns
-------
fill_intv : List[Tuple[int, int]]
list of fill intervals.
"""
if n_min > n_max:
raise ValueError('min fill length = %d > %d = max fill length' % (n_min, n_max))
# suppose we draw N fill blocks, then the filled area is A - (N + 1) * sp.
# therefore, to maximize fill, with A and sp given, we need to minimize N.
# since N = (A - sp) / (f + sp), where f is length of the fill, this tells
# us we want to try filling with max block.
# so we calculate the maximum number of fill blocks we'll use if we use
# largest fill block.
num_fill = -(-(area - sp_max) // (n_max + sp_max))
if num_fill == 0:
# we don't need fill; total area is less than sp_max.
return []
# at this point, using (num_fill - 1) max blocks is not enough, but num_fill
# max blocks either fits perfectly or exceeds area.
# calculate the fill block length if we use num_fill fill blocks, and sp_max
# between blocks.
blk_len = (area - (num_fill + 1) * sp_max) // num_fill
if blk_len >= n_min:
# we can draw fill using num_fill fill blocks.
return fill_symmetric_helper(area, num_fill, sp_max, offset=offset, inc_sp=False,
invert=False, fill_on_edge=False, cyclic=False)[0]
# trying to draw num_fill fill blocks with sp_max between them results in fill blocks
# that are too small. This means we need to reduce the space between fill blocks.
sp_max, remainder = divmod(area - num_fill * n_min, num_fill + 1)
# we can achieve the new sp_max using fill with length n_min or n_min + 1.
if n_max > n_min or remainder == 0:
# if everything divides evenly or we can use two different fill lengths,
# then we're done.
return fill_symmetric_helper(area, num_fill, sp_max, offset=offset, inc_sp=False,
invert=False, fill_on_edge=False, cyclic=False)[0]
# If we're here, then we must use only one fill length
# fill by inverting fill/space to try to get only one fill length
sol, num_diff_sp = fill_symmetric_helper(area, num_fill + 1, n_max, offset=offset, inc_sp=False,
invert=True, fill_on_edge=True, cyclic=False)
if num_diff_sp == 0:
# we manage to fill using only one fill length
return sol
# If we're here, that means num_fill + 1 is even. So using num_fill + 2 will
# guarantee solution.
return fill_symmetric_helper(area, num_fill + 2, n_max, offset=offset, inc_sp=False,
invert=True, fill_on_edge=True, cyclic=False)[0]
def fill_symmetric_min_density_info(area, targ_area, n_min, n_max, sp_min,
sp_max=None, fill_on_edge=True, cyclic=False):
# type: (int, int, int, int, int, Optional[int], bool, bool) -> Tuple[Tuple[Any, ...], bool]
"""Fill the given 1-D area as little as possible.
Compute fill location such that the given area is filled with the following properties:
1. the area is as uniform as possible.
2. the area is symmetric with respect to the center
3. all fill blocks have lengths between n_min and n_max.
4. all fill blocks are at least sp_min apart.
Parameters
----------
area : int
total number of space we need to fill.
targ_area : int
target minimum fill area. If not achievable, will do the best that we can.
n_min : int
minimum length of the fill block. Must be less than or equal to n_max.
n_max : int
maximum length of the fill block.
sp_min : int
minimum space between each fill block.
sp_max : Optional[int]
if given, make sure space between blocks does not exceed this value.
Must be greater than sp_min
fill_on_edge : bool
If True, we put fill blocks on area boundary. Otherwise, we put space block on
area boundary.
cyclic : bool
If True, we assume we're filling in a cyclic area (it wraps around).
Returns
-------
info : Tuple[Any, ...]
the fill information tuple.
invert : bool
True if space/fill is inverted.
"""
# first, fill as much as possible
max_result = fill_symmetric_max_density_info(area, targ_area, n_min, n_max, sp_min,
sp_max=sp_max, fill_on_edge=fill_on_edge,
cyclic=cyclic)
fill_area, nfill_opt = max_result[0][:2]
if fill_area <= targ_area:
# we cannot/barely meet area spec; return max result
return max_result
# now, reduce fill by doing binary search on n_max
n_max_iter = BinaryIterator(n_min, n_max)
while n_max_iter.has_next():
n_max_cur = n_max_iter.get_next()
try:
info, invert = fill_symmetric_max_num_info(area, nfill_opt, n_min, n_max_cur, sp_min,
fill_on_edge=fill_on_edge, cyclic=cyclic)
fill_area_cur = area - info[0] if invert else info[0]
if invert:
_, sp_cur = _get_min_max_blk_len(info)
else:
sp_cur = sp_min if info[1][2] == 0 else sp_min + 1
if fill_area_cur >= targ_area and (sp_max is None or sp_cur <= sp_max):
# both specs passed
n_max_iter.save_info((info, invert))
n_max_iter.down()
else:
# reduce n_max too much
n_max_iter.up()
except ValueError:
# get here if n_min == n_max and there's no solution.
n_max_iter.up()
last_save = n_max_iter.get_last_save_info()
if last_save is None:
# no solution, return max result
return max_result
# return new minimum solution
info, invert = last_save
fill_area = area - info[0] if invert else info[0]
return (fill_area, nfill_opt, info[1]), invert
def fill_symmetric_max_density_info(area, targ_area, n_min, n_max, sp_min,
sp_max=None, fill_on_edge=True, cyclic=False):
# type: (int, int, int, int, int, Optional[int], bool, bool) -> Tuple[Tuple[Any, ...], bool]
"""Fill the given 1-D area as much as possible.
Compute fill location such that the given area is filled with the following properties:
1. the area is as uniform as possible.
2. the area is symmetric with respect to the center
3. all fill blocks have lengths between n_min and n_max.
4. all fill blocks are at least sp_min apart.
Parameters
----------
area : int
total number of space we need to fill.
targ_area : int
target minimum fill area. If not achievable, will do the best that we can.
n_min : int
minimum length of the fill block. Must be less than or equal to n_max.
n_max : int
maximum length of the fill block.
sp_min : int
minimum space between each fill block.
sp_max : Optional[int]
if given, make sure space between blocks does not exceed this value.
Must be greater than sp_min
fill_on_edge : bool
If True, we put fill blocks on area boundary. Otherwise, we put space block on
area boundary.
cyclic : bool
If True, we assume we're filling in a cyclic area (it wraps around).
Returns
-------
info : Tuple[Any, ...]
the fill information tuple.
invert : bool
True if space/fill is inverted.
"""
# min area test
nfill_min = 1
try:
try:
fill_symmetric_max_num_info(area, nfill_min, n_min, n_max, sp_min,
fill_on_edge=fill_on_edge, cyclic=cyclic)
except (NoFillAbutEdgeError, NoFillChoiceError):
# we need at least 2 fiils
nfill_min = 2
fill_symmetric_max_num_info(area, nfill_min, n_min, n_max, sp_min,
fill_on_edge=fill_on_edge, cyclic=cyclic)
except InsufficientAreaError:
# cannot fill at all
info, invert = fill_symmetric_max_num_info(area, 0, n_min, n_max, sp_min,
fill_on_edge=fill_on_edge, cyclic=cyclic)
return (0, 0, info[1]), invert
# fill area first monotonically increases with number of fill blocks, then monotonically
# decreases (as we start adding more space than fill). Therefore, a golden section search
# can be done on the number of fill blocks to determine the optimum.
def golden_fun(nfill):
try:
info2, invert2 = fill_symmetric_max_num_info(area, nfill, n_min, n_max, sp_min,
fill_on_edge=fill_on_edge, cyclic=cyclic)
except ValueError:
return 0
if invert2:
return area - info2[0]
else:
return info2[0]
if sp_max is not None:
if sp_max <= sp_min:
raise ValueError('Cannot have sp_max = %d <= %d = sp_min' % (sp_max, sp_min))
# find minimum nfill that meets sp_max spec
def golden_fun2(nfill):
try:
info2, invert2 = fill_symmetric_max_num_info(area, nfill, n_min, n_max, sp_min,
fill_on_edge=fill_on_edge,
cyclic=cyclic)
if invert2:
_, sp_cur = _get_min_max_blk_len(info2)
else:
sp_cur = sp_min if info2[1][2] == 0 else sp_min + 1
return -sp_cur
except ValueError:
return -sp_max - 1
min_result = minimize_cost_golden(golden_fun2, -sp_max, offset=nfill_min, maxiter=None)
if min_result.x is None:
# try even steps
min_result = minimize_cost_golden(golden_fun2, -sp_max, offset=nfill_min,
step=2, maxiter=None)
nfill_min = min_result.x
if nfill_min is None:
# should never get here...
raise ValueError('No solution for sp_max = %d' % sp_max)
else:
nfill_min = min_result.x
min_result = minimize_cost_golden(golden_fun, targ_area, offset=nfill_min, maxiter=None)
nfill_opt = min_result.x
if nfill_opt is None:
nfill_opt = min_result.xmax
info, invert = fill_symmetric_max_num_info(area, nfill_opt, n_min, n_max, sp_min,
fill_on_edge=fill_on_edge, cyclic=cyclic)
fill_area = area - info[0] if invert else info[0]
return (fill_area, nfill_opt, info[1]), invert
def fill_symmetric_max_density(area, # type: int
targ_area, # type: int
n_min, # type: int
n_max, # type: int
sp_min, # type: int
offset=0, # type: int
sp_max=None, # type: Optional[int]
fill_on_edge=True, # type: bool
cyclic=False, # type: bool
):
# type: (...) -> Tuple[List[Tuple[int, int]], int]
"""Fill the given 1-D area as much as possible.
Compute fill location such that the given area is filled with the following properties:
1. the area is as uniform as possible.
2. the area is symmetric with respect to the center
3. all fill blocks have lengths between n_min and n_max.
4. all fill blocks are at least sp_min apart.
Parameters
----------
area : int
total number of space we need to fill.
targ_area : int
target minimum fill area. If not achievable, will do the best that we can.
n_min : int
minimum length of the fill block. Must be less than or equal to n_max.
n_max : int
maximum length of the fill block.
sp_min : int
minimum space between each fill block.
offset : int
the starting coordinate of the total interval.
sp_max : Optional[int]
if given, make sure space between blocks does not exceed this value.
Must be greater than sp_min
fill_on_edge : bool
If True, we put fill blocks on area boundary. Otherwise, we put space block on
area boundary.
cyclic : bool
If True, we assume we're filling in a cyclic area (it wraps around).
Returns
-------
fill_interval : List[Tuple[int, int]]
a list of [start, stop) intervals that needs to be filled.
fill_area : int
total filled area. May or may not meet minimum density requirement.
"""
max_result = fill_symmetric_max_density_info(area, targ_area, n_min, n_max, sp_min,
sp_max=sp_max, fill_on_edge=fill_on_edge,
cyclic=cyclic)
(fill_area, _, args), invert = max_result
return fill_symmetric_interval(*args, offset=offset, invert=invert)[0], fill_area
class InsufficientAreaError(ValueError):
pass
class FillTooSmallError(ValueError):
pass
class NoFillAbutEdgeError(ValueError):
pass
class NoFillChoiceError(ValueError):
pass
class EmptyRegionError(ValueError):
pass
def fill_symmetric_max_num_info(tot_area, nfill, n_min, n_max, sp_min,
fill_on_edge=True, cyclic=False):
# type: (int, int, int, int, int, bool, bool) -> Tuple[Tuple[Any, ...], bool]
"""Fill the given 1-D area as much as possible with given number of fill blocks.
Compute fill location such that the given area is filled with the following properties:
1. the area is as uniform as possible.
2. the area is symmetric with respect to the center
3. the area is filled as much as possible with exactly nfill blocks,
with lengths between n_min and n_max.
4. all fill blocks are at least sp_min apart.
Parameters
----------
tot_area : int
total number of space we need to fill.
nfill : int
number of fill blocks to draw.
n_min : int
minimum length of the fill block. Must be less than or equal to n_max.
n_max : int
maximum length of the fill block.
sp_min : int
minimum space between each fill block.
fill_on_edge : bool
If True, we put fill blocks on area boundary. Otherwise, we put space block on
area boundary.
cyclic : bool
If True, we assume we're filling in a cyclic area (it wraps around).
Returns
-------
info : Tuple[Any, ...]
the fill information tuple.
invert : bool
True if space/fill is inverted.
"""
# error checking
if nfill < 0:
raise ValueError('nfill = %d < 0' % nfill)
if n_min > n_max:
raise ValueError('n_min = %d > %d = n_max' % (n_min, n_max))
if n_min <= 0:
raise ValueError('n_min = %d <= 0' % n_min)
if nfill == 0:
# no fill at all
return _fill_symmetric_info(tot_area, 0, tot_area, inc_sp=False,
fill_on_edge=False, cyclic=False), False
# check no solution
sp_delta = 0 if cyclic else (-1 if fill_on_edge else 1)
nsp = nfill + sp_delta
if n_min * nfill + nsp * sp_min > tot_area:
raise InsufficientAreaError('Cannot draw %d fill blocks with n_min = %d' % (nfill, n_min))
# first, try drawing nfill blocks without block length constraint.
# may throw exception if no solution
info = _fill_symmetric_info(tot_area, nfill, sp_min, inc_sp=True,
fill_on_edge=fill_on_edge, cyclic=cyclic)
bmin, bmax = _get_min_max_blk_len(info)
if bmin < n_min:
# could get here if cyclic = True, fill_on_edge = True, n_min is odd
# in this case actually no solution
raise FillTooSmallError('Cannot draw %d fill blocks with n_min = %d' % (nfill, n_min))
if bmax <= n_max:
# we satisfy block length constraint, just return
return info, False
# we broke maximum block length constraint, so we flip
# space and fill to have better control on fill length
if nsp == 0 and n_max != tot_area and n_max - 1 != tot_area:
# we get here only if nfill = 1 and fill_on_edge is True.
# In this case there's no way to draw only one fill and abut both edges
raise NoFillAbutEdgeError('Cannot draw only one fill abutting both edges.')
info = _fill_symmetric_info(tot_area, nsp, n_max, inc_sp=False,
fill_on_edge=not fill_on_edge, cyclic=cyclic)
num_diff_sp = info[1][2]
if num_diff_sp > 0 and n_min == n_max:
# no solution with same fill length, but we must have same fill length everywhere.
raise NoFillChoiceError('Cannot draw %d fill blocks with '
'n_min = n_max = %d' % (nfill, n_min))
return info, True
def _fill_symmetric_info(tot_area, num_blk_tot, sp, inc_sp=True, fill_on_edge=True, cyclic=False):
# type: (int, int, int, bool, bool, bool) -> Tuple[int, Tuple[Any, ...]]
"""Calculate symmetric fill information.
This method computes fill information without generating fill interval list. This makes
it fast to explore various fill settings. See fill_symmetric_helper() to see a description
of the fill algorithm.
Parameters
----------
tot_area : int
the fill area length.
num_blk_tot : int
total number of fill blocks to use.
sp : int
space between blocks. We will try our best to keep this spacing constant.
inc_sp : bool
If True, then we use sp + 1 if necessary. Otherwise, we use sp - 1
if necessary.
fill_on_edge : bool
If True, we put fill blocks on area boundary. Otherwise, we put space block on
area boundary.
cyclic : bool
If True, we assume we're filling in a cyclic area (it wraps around).
Returns
-------
fill_area : int
total filled area.
args : Tuple[Any, ...]
input arguments to _fill_symmetric_interval()
"""
# error checking
if num_blk_tot < 0:
raise ValueError('num_blk_tot = %d < 0' % num_blk_tot)
adj_sp_sgn = 1 if inc_sp else -1
if num_blk_tot == 0:
# special case, no fill at all
if sp == tot_area:
return 0, (tot_area, tot_area, 0, tot_area, 0, 0, 0, 0, -1, tot_area, False, False)
elif sp == tot_area - adj_sp_sgn:
return 0, (tot_area, tot_area, 1, tot_area, 0, 0, 0, 0, -1, tot_area, False, False)
else:
raise EmptyRegionError('Cannot have empty region = %d with sp = %d' % (tot_area, sp))
# determine the number of space blocks
if cyclic:
num_sp_tot = num_blk_tot
else:
if fill_on_edge:
num_sp_tot = num_blk_tot - 1
else:
num_sp_tot = num_blk_tot + 1
# compute total fill area
fill_area = tot_area - num_sp_tot * sp
# find minimum fill length
blk_len, num_blk1 = divmod(fill_area, num_blk_tot)
# find number of fill intervals
if cyclic and fill_on_edge:
# if cyclic and fill on edge, number of intervals = number of blocks + 1,
# because the interval on the edge double counts.
num_blk_interval = num_blk_tot + 1
else:
num_blk_interval = num_blk_tot
# find space length on edge, if applicable
num_diff_sp = 0
sp_edge = sp
if cyclic and not fill_on_edge and sp_edge % 2 == 1:
# edge space must be even. To fix, we convert space to fill
num_diff_sp += 1
sp_edge += adj_sp_sgn
num_blk1 += -adj_sp_sgn
fill_area += -adj_sp_sgn
if num_blk1 == num_blk_tot:
blk_len += 1
num_blk1 = 0
elif num_blk1 < 0:
blk_len -= 1
num_blk1 += num_blk_tot
mid_blk_len = mid_sp_len = -1
# now we have num_blk_tot blocks with length blk0. We have num_blk1 fill units
# remaining that we need to distribute to the fill blocks
if num_blk_interval % 2 == 0:
# we have even number of fill intervals, so we have a space block in the middle
mid_sp_len = sp
# test condition for cyclic and fill_on_edge is different than other cases
test_val = num_blk1 + blk_len if cyclic and fill_on_edge else num_blk1
if test_val % 2 == 1:
# we cannot distribute remaining fill units evenly, have to convert to space
num_diff_sp += 1
mid_sp_len += adj_sp_sgn
num_blk1 += -adj_sp_sgn
fill_area += -adj_sp_sgn
if num_blk1 == num_blk_tot:
blk_len += 1
num_blk1 = 0
elif num_blk1 < 0:
blk_len -= 1
num_blk1 += num_blk_tot
if num_blk1 % 2 == 1:
# the only way we get here is if cyclic and fill_on_edge is True.
# in this case, we need to add one to fill unit to account
# for edge fill double counting.
num_blk1 += 1
# get number of half fill intervals
m = num_blk_interval // 2
else:
# we have odd number of fill intervals, so we have a fill block in the middle
mid_blk_len = blk_len
if cyclic and fill_on_edge:
# special handling for this case, because edge fill block must be even
if blk_len % 2 == 0 and num_blk1 % 2 == 1:
# assign one fill unit to middle block
mid_blk_len += 1
num_blk1 -= 1
elif blk_len % 2 == 1:
# edge fill block is odd; we need odd number of fill units so we can
# correct this.
if num_blk1 % 2 == 0:
# we increment middle fill block to get odd number of fill units
mid_blk_len += 1
num_blk1 -= 1
if num_blk1 < 0:
# we get here only if num_blk1 == 0. This means middle blk
# borrow one unit from edge block. So we set num_blk1 to
# num_blk_tot - 2 to make sure rest of the blocks are one
# larger than edge block.
blk_len -= 1
num_blk1 = num_blk_tot - 2
else:
# Add one to account for edge fill double counting.
num_blk1 += 1
else:
# Add one to account for edge fill double counting.
num_blk1 += 1
elif num_blk1 % 2 == 1:
# assign one fill unit to middle block
mid_blk_len += 1
num_blk1 -= 1
m = (num_blk_interval - 1) // 2
if blk_len <= 0:
raise InsufficientAreaError('Insufficent area; cannot draw fill with length <= 0.')
# now we need to distribute the fill units evenly. We do so using cumulative modding
num_large = num_blk1 // 2
num_small = m - num_large
if cyclic and fill_on_edge:
# if cyclic and fill is on the edge, we need to make sure left-most block is even length
if blk_len % 2 == 0:
blk1, blk0 = blk_len, blk_len + 1
k = num_small
else:
blk0, blk1 = blk_len, blk_len + 1
k = num_large
else:
# make left-most fill interval be the most frequenct fill length
if num_large >= num_small:
blk0, blk1 = blk_len, blk_len + 1
k = num_large
else:
blk1, blk0 = blk_len, blk_len + 1
k = num_small
return fill_area, (tot_area, sp, num_diff_sp, sp_edge, blk0, blk1, k, m,
mid_blk_len, mid_sp_len, fill_on_edge, cyclic)
def _get_min_max_blk_len(fill_info):
"""Helper method to get minimum/maximum fill lengths used."""
blk0, blk1, blkm = fill_info[1][4], fill_info[1][5], fill_info[1][8]
if blkm < 0:
blkm = blk0
return min(blk0, blk1, blkm), max(blk0, blk1, blkm)
def fill_symmetric_interval(tot_area, sp, num_diff_sp, sp_edge, blk0, blk1, k, m, mid_blk_len,
mid_sp_len, fill_on_edge, cyclic, offset=0, invert=False):
"""Helper function, construct interval list from output of _fill_symmetric_info().
num_diff_sp = number of space blocks that has length different than sp
sp_edge = if cyclic and not fill on edge, the edge space length.
m = number of half fill blocks.
blk1 = length of left-most fill block.
blk0 = the second possible fill block length.
k = number of half fill blocks with length = blk1.
mid_blk_len = if > 0, length of middle fill block. This is either blk0 or blk1.
"""
ans = []
if cyclic:
if fill_on_edge:
marker = offset - blk1 // 2
else:
marker = offset - sp_edge // 2
else:
marker = offset
cur_sum = 0
prev_sum = 1
for fill_idx in range(m):
# determine current fill length from cumulative modding result
if cur_sum <= prev_sum:
cur_len = blk1
else:
cur_len = blk0
cur_sp = sp_edge if fill_idx == 0 else sp
# record fill/space interval
if invert:
if fill_on_edge:
ans.append((marker + cur_len, marker + cur_sp + cur_len))
else:
ans.append((marker, marker + cur_sp))
else:
if fill_on_edge:
ans.append((marker, marker + cur_len))
else:
ans.append((marker + cur_sp, marker + cur_sp + cur_len))
marker += cur_len + cur_sp
prev_sum = cur_sum
cur_sum = (cur_sum + k) % m
# add middle fill or space
if mid_blk_len >= 0:
# fill in middle
if invert:
if not fill_on_edge:
# we have one more space block before reaching middle block
cur_sp = sp_edge if m == 0 else sp
ans.append((marker, marker + cur_sp))
half_len = len(ans)
else:
# we don't want to replicate middle fill, so get half length now
half_len = len(ans)
if fill_on_edge:
ans.append((marker, marker + mid_blk_len))
else:
cur_sp = sp_edge if m == 0 else sp
ans.append((marker + cur_sp, marker + cur_sp + mid_blk_len))
else:
# space in middle
if invert:
if fill_on_edge:
# the last space we added is wrong, we need to remove
del ans[-1]
marker -= sp
# we don't want to replicate middle space, so get half length now
half_len = len(ans)
ans.append((marker, marker + mid_sp_len))
else:
# don't need to do anything if we're recording blocks
half_len = len(ans)
# now add the second half of the list
shift = tot_area + offset * 2
for idx in range(half_len - 1, -1, -1):
start, stop = ans[idx]
ans.append((shift - stop, shift - start))
return ans, num_diff_sp
def fill_symmetric_helper(tot_area, num_blk_tot, sp, offset=0, inc_sp=True, invert=False,
fill_on_edge=True, cyclic=False):
# type: (int, int, int, int, bool, bool, bool, bool) -> Tuple[List[Tuple[int, int]], int]
"""Helper method for all fill symmetric methods.
This method fills an area with given number of fill blocks such that the space between
blocks is equal to the given space. Other fill_symmetric methods basically transpose
the constraints into this problem, with the proper options.
The solution has the following properties:
1. it is symmetric about the center.
2. it is as uniform as possible.
3. it uses at most 3 consecutive values of fill lengths.
4. it uses at most 2 consecutive values of space lengths. If inc_sp is True,
we use sp and sp + 1. If inc_sp is False, we use sp - 1 and sp. In addition,
at most two space blocks have length different than sp.
Here are all the scenarios that affect the number of different fill/space lengths:
1. All spaces will be equal to sp under the following condition:
i. cyclic is False, and num_blk_tot is odd.
ii. cyclic is True, fill_on_edge is True, and num_blk_tot is even.
iii. cyclic is True, fill_on_edge is False, sp is even, and num_blk_tot is odd.
In particular, this means if you must have the same space between fill blocks, you
can change num_blk_tot by 1.
2. The only case where at most 2 space blocks have length different than sp is
when cyclic is True, fill_on_edge is False, sp is odd, and num_blk_tot is even.
3. In all other cases, at most 1 space block have legnth different than sp.
4, The only case where at most 3 fill lengths are used is when cyclic is True,
fill_on_edge is True, and num_blk_tot is even,
Parameters
----------
tot_area : int
the fill area length.
num_blk_tot : int
total number of fill blocks to use.
sp : int
space between blocks. We will try our best to keep this spacing constant.
offset : int
the starting coordinate of the area interval.
inc_sp : bool
If True, then we use sp + 1 if necessary. Otherwise, we use sp - 1
if necessary.
invert : bool
If True, we return space intervals instead of fill intervals.
fill_on_edge : bool
If True, we put fill blocks on area boundary. Otherwise, we put space block on
area boundary.
cyclic : bool
If True, we assume we're filling in a cyclic area (it wraps around).
Returns
-------
ans : List[(int, int)]
list of fill or space intervals.
num_diff_sp : int
number of space intervals with length different than sp. This is an integer
between 0 and 2.
"""
fill_info = _fill_symmetric_info(tot_area, num_blk_tot, sp, inc_sp=inc_sp,
fill_on_edge=fill_on_edge, cyclic=cyclic)
_, args = fill_info
return fill_symmetric_interval(*args, offset=offset, invert=invert)
================================================
FILE: bag/layout/routing/grid.py
================================================
# -*- coding: utf-8 -*-
"""This module defines the RoutingGrid class.
"""
from typing import TYPE_CHECKING, Sequence, Union, Tuple, List, Optional, Dict, Any
import numpy as np
from ..util import BBox
from ...util.search import BinaryIterator
from ...math import lcm
if TYPE_CHECKING:
from ...layout.core import TechInfo
class RoutingGrid(object):
"""A class that represents the routing grid.
This class provides various methods to convert between Cartesian coordinates and
routing tracks. This class assumes the lower-left coordinate is (0, 0)
the track numbers are at half-track pitch. That is, even track numbers corresponds
to physical tracks, and odd track numbers corresponds to middle between two tracks.
This convention is chosen so it is easy to locate a via for 2-track wide wires, for
example.
Assumptions:
1. the pitch of all layers evenly divides the largest pitch.
Parameters
----------
tech_info : bag.layout.core.TechInfo
the TechInfo instance used to create metals and vias.
layers : list[int]
list of available routing layers. Must be in increasing order.
spaces : list[float]
list of track spacings for each layer.
widths : list[float]
list of minimum track widths for each layer.
bot_dir : str
the direction of the bottom-most layer. Either 'x' for horizontal tracks or 'y' for
vertical tracks.
max_num_tr : int or list[int]
maximum track width in number of tracks. Can be given as an integer (which applies to
all layers), our a list to specify maximum width per layer.
"""
def __init__(self, # type: RoutingGrid
tech_info, # type: TechInfo
layers, # type: Sequence[int]
spaces, # type: Sequence[float]
widths, # type: Sequence[float]
bot_dir, # type: str
max_num_tr=1000, # type: Union[int, Sequence[int]]
width_override=None, # type: Dict[int, Dict[int, float]]
):
# type: (...) -> None
# error checking
num_layer = len(layers)
if len(spaces) != num_layer:
raise ValueError('spaces length = %d != %d' % (len(spaces), num_layer))
if len(widths) != num_layer:
raise ValueError('spaces length = %d != %d' % (len(widths), num_layer))
if isinstance(max_num_tr, int):
max_num_tr = [max_num_tr] * num_layer
elif len(max_num_tr) != num_layer:
raise ValueError('max_num_tr length = %d != %d' % (len(max_num_tr), num_layer))
self._tech_info = tech_info
self._resolution = tech_info.resolution
self._layout_unit = tech_info.layout_unit
self._flip_parity = {}
self._ignore_layers = set()
self.layers = []
self.sp_tracks = {}
self.w_tracks = {}
self.offset_tracks = {}
self.dir_tracks = {}
self.max_num_tr_tracks = {}
self.block_pitch = {}
self.w_override = {}
self.private_layers = []
cur_dir = bot_dir
for lay, sp, w, max_num in zip(layers, spaces, widths, max_num_tr):
self.add_new_layer(lay, sp, w, cur_dir, max_num_tr=max_num, is_private=False)
# alternate track direction
cur_dir = 'y' if cur_dir == 'x' else 'x'
self.update_block_pitch()
# add width overrides
if width_override is not None:
for layer_id, w_info in width_override.items():
for width_ntr, tr_w in w_info.items():
self.add_width_override(layer_id, width_ntr, tr_w)
def __contains__(self, layer):
# type: (int) -> bool
"""Returns True if this RoutingGrid contains the given layer. """
return layer in self.sp_tracks
@classmethod
def get_middle_track(cls, tr1, tr2, round_up=False):
# type: (Union[float, int], Union[float, int], bool) -> Union[float, int]
test = int(round((tr1 + tr2) * 2))
if test % 4 == 0:
return test // 4
if test % 4 == 1:
return (test + 1) / 4 if round_up else (test - 1) // 4
if test % 4 == 2:
return test / 4
return (test + 1) // 4 if round_up else (test - 1) / 4
def _get_track_offset(self, layer_id):
# type: (int) -> int
"""Returns the track offset in resolution units on the given layer."""
track_pitch = self.get_track_pitch(layer_id, unit_mode=True)
return self.offset_tracks.get(layer_id, track_pitch // 2)
def get_flip_parity(self):
# type: () -> Dict[int, Tuple[int, int]]
"""Returns a copy of the flip parity dictionary."""
return self._flip_parity.copy()
def get_bot_common_layer(self, inst_grid, inst_top_layer):
# type: (RoutingGrid, int) -> int
"""Given an instance's RoutingGrid, return the bottom common layer ID.
Parameters
----------
inst_grid : RoutingGrid
the instance's RoutingGrid object.
inst_top_layer : int
the instance top layer ID.
Returns
-------
bot_layer : int
the bottom common layer ID.
"""
my_bot_layer = self.layers[0]
for bot_layer in range(inst_top_layer, my_bot_layer - 1, -1):
has_bot = (bot_layer in self.layers)
inst_has_bot = (bot_layer in inst_grid.layers)
if has_bot and inst_has_bot:
w_par, sp_par = self.get_track_info(bot_layer, unit_mode=True)
w_inst, sp_inst = inst_grid.get_track_info(bot_layer, unit_mode=True)
if w_par != w_inst or sp_par != sp_inst or \
self.get_direction(bot_layer) != inst_grid.get_direction(bot_layer):
return bot_layer + 1
elif has_bot != inst_has_bot:
return bot_layer + 1
return my_bot_layer
def get_flip_parity_at(self, # type: RoutingGrid
bot_layer, # type: int
top_layer, # type: int
loc, # type: Tuple[Union[int, float], Union[int, float]]
orient, # type: str
unit_mode=False, # type: bool
):
# type: (...) -> Dict[int, Tuple[int, int]]
"""Compute the flip parity dictionary for an instance placed at the given location.
Parameters
----------
bot_layer : int
the bottom layer ID, inclusive.
top_layer : int
the top layer ID, inclusive.
loc : Tuple[Union[int, float], Union[int, float]]
the instance origin location.
orient : str
the instance orientation.
unit_mode : bool
True if loc is given in resolution units.
Returns
-------
flip_parity : Dict[int, Tuple[int, int]]
the flip_parity dictionary.
"""
if unit_mode:
xo, yo = loc
else:
res = self._resolution
xo, yo = int(round(loc[0] / res)), int(round(loc[1] / res))
if orient == 'R0':
xscale, yscale = 1, 1
elif orient == 'MX':
xscale, yscale = -1, 1
elif orient == 'MY':
xscale, yscale = 1, -1
elif orient == 'R180':
xscale, yscale = -1, -1
else:
raise ValueError('Unknown orientation: %s' % orient)
flip_par = {}
for lay in range(bot_layer, top_layer + 1):
if lay in self.layers:
tdir = self.dir_tracks[lay]
# find the track in top level that corresponds to the track at instance origin
if tdir == 'y':
coord, scale = xo, yscale
else:
coord, scale = yo, xscale
tr_idx = self.coord_to_track(lay, coord, unit_mode=True)
offset_htr = int(round(tr_idx * 2 + 1))
cur_scale, cur_offset = self._flip_parity.get(lay, (1, 0))
new_scale = cur_scale * scale
new_offset = (cur_scale * offset_htr + cur_offset) % 4
flip_par[lay] = (new_scale, new_offset)
return flip_par
def set_flip_parity(self, fp):
# type: (Dict[int, Tuple[int, int]]) -> None
"""set the flip track parity dictionary."""
for lay in fp:
self._flip_parity[lay] = fp[lay]
@property
def tech_info(self):
# type: () -> TechInfo
"""The TechInfo technology object."""
return self._tech_info
@property
def resolution(self):
# type: () -> float
"""Returns the grid resolution."""
return self._resolution
@property
def layout_unit(self):
# type: () -> float
"""Returns the layout unit length, in meters."""
return self._layout_unit
@property
def top_private_layer(self):
# type: () -> int
"""Returns the top private layer ID."""
return -99 if not self.private_layers else self.private_layers[-1]
def update_block_pitch(self):
# type: () -> None
"""Update block pitch."""
self.block_pitch.clear()
top_private_layer = self.top_private_layer
# update private block pitches
lay_list = [lay for lay in self.layers
if lay <= top_private_layer and lay not in self._ignore_layers]
self._update_block_pitch_helper(lay_list)
# update public block pitches
lay_list = [lay for lay in self.layers
if lay > top_private_layer and lay not in self._ignore_layers]
self._update_block_pitch_helper(lay_list)
def _update_block_pitch_helper(self, lay_list):
# type: (Sequence[int]) -> None
"""helper method for updating block pitch."""
pitch_list = []
for lay in lay_list:
cur_bp = self.get_track_pitch(lay, unit_mode=True)
cur_bp2 = cur_bp // 2
cur_dir = self.dir_tracks[lay]
if pitch_list:
# the pitch of each layer = LCM of all layers below with same direction
for play, (bp, bp2) in zip(lay_list, pitch_list):
if self.dir_tracks[play] == cur_dir:
cur_bp = lcm([cur_bp, bp])
cur_bp2 = lcm([cur_bp2, bp2])
result = (cur_bp, cur_bp2)
pitch_list.append(result)
self.block_pitch[lay] = result
def get_direction(self, layer_id):
# type: (int) -> str
"""Returns the track direction of the given layer.
Parameters
----------
layer_id : int
the layer ID.
Returns
-------
tdir : str
'x' for horizontal tracks, 'y' for vertical tracks.
"""
return self.dir_tracks[layer_id]
def get_track_pitch(self, layer_id, unit_mode=False):
# type: (int, bool) -> Union[float, int]
"""Returns the routing track pitch on the given layer.
Parameters
----------
layer_id : int
the routing layer ID.
unit_mode : bool
True to return block pitch in resolution units.
Returns
-------
track_pitch : Union[float, int]
the track pitch in layout units.
"""
pitch = self.w_tracks[layer_id] + self.sp_tracks[layer_id]
return pitch if unit_mode else pitch * self._resolution
def get_track_width(self, layer_id, width_ntr, unit_mode=False):
# type: (int, int, bool) -> Union[float, int]
"""Calculate track width in layout units from number of tracks.
Parameters
----------
layer_id : int
the track layer ID
width_ntr : int
the track width in number of tracks.
unit_mode : bool
True to return track width in resolution units.
Returns
-------
width : Union[float, int]
the track width in layout units.
"""
w = self.w_tracks[layer_id]
sp = self.sp_tracks[layer_id]
w_unit = width_ntr * (w + sp) - sp
w_unit = self.w_override[layer_id].get(width_ntr, w_unit)
if unit_mode:
return w_unit
return w_unit * self._resolution
def get_track_width_inverse(self, layer_id, width, mode=-1, unit_mode=False):
# type: (int, Union[float, int], int, bool) -> int
"""Given track width in layout/resolution units, compute equivalent number of tracks.
This is the inverse function of get_track_width().
Parameters
----------
layer_id : int
the track layer ID
width : Union[float, int]
the track width in layout or resolution units.
mode : int
If negative, the result wire will have width less than or equal to the given width.
If positive, the result wire will have width greater than or equal to the given width.
unit_mode : bool
True if width is specified in resolution units.
Returns
-------
width_ntr : int
number of tracks needed to achieve the given width.
"""
if not unit_mode:
width = int(round(width / self.resolution))
# use binary search to find the minimum track width
bin_iter = BinaryIterator(1, None)
while bin_iter.has_next():
ntr = bin_iter.get_next()
w_test = self.get_track_width(layer_id, ntr, unit_mode=True)
if w_test == width:
return ntr
elif w_test < width:
if mode < 0:
bin_iter.save()
bin_iter.up()
else:
if mode > 0:
bin_iter.save()
bin_iter.down()
ans = bin_iter.get_last_save()
if ans is None:
return 0
return ans
def get_num_tracks(self, size, layer_id):
# type: (Tuple[int, Union[int, float], Union[int, float]], int) -> Union[int, float]
"""Returns the number of tracks on the given layer for a block with the given size.
Parameters
----------
size : Tuple[int, Union[int, float], Union[int, float]]
the block size tuple.
layer_id : int
the layer ID.
Returns
-------
num_tracks : Union[int, float]
number of tracks on that given layer.
"""
tr_dir = self.get_direction(layer_id)
blk_w, blk_h = self.get_size_dimension(size, unit_mode=True)
tr_half_pitch = self.get_track_pitch(layer_id, unit_mode=True) // 2
if tr_dir == 'x':
val = blk_h // tr_half_pitch
else:
val = blk_w // tr_half_pitch
if val % 2 == 0:
return val // 2
return val / 2
def get_min_length(self, layer_id, width_ntr, unit_mode=False):
# type: (int, int, bool) -> Union[float, int]
"""Returns the minimum length for the given track.
Parameters
----------
layer_id : int
the track layer ID
width_ntr : int
the track width in number of tracks.
unit_mode : bool
True to return the minimum length in resolution units.
Returns
-------
min_length : Union[float, int]
the minimum length.
"""
layer_name = self.tech_info.get_layer_name(layer_id)
if isinstance(layer_name, tuple):
layer_name = layer_name[0]
layer_type = self.tech_info.get_layer_type(layer_name)
width = self.get_track_width(layer_id, width_ntr)
min_length = self.tech_info.get_min_length(layer_type, width)
if unit_mode:
return int(round(min_length / self._resolution))
else:
return min_length
def get_space(self, layer_id, width_ntr, same_color=False, unit_mode=False):
# type: (int, int, bool, bool) -> Union[int, float]
"""Returns the space needed around a track, in layout/resolution units.
Parameters
----------
layer_id : int
the track layer ID
width_ntr : int
the track width in number of tracks.
same_color : bool
True to use same-color spacing.
unit_mode : bool
True to return resolution units.
Returns
-------
sp : Union[int, float]
minimum space needed around the given track in layout/resolution units.
"""
layer_name = self.tech_info.get_layer_name(layer_id)
if isinstance(layer_name, tuple):
layer_name = layer_name[0]
layer_type = self.tech_info.get_layer_type(layer_name)
width = self.get_track_width(layer_id, width_ntr, unit_mode=True)
sp_min_unit = self.tech_info.get_min_space(layer_type, width, unit_mode=True,
same_color=same_color)
if unit_mode:
return sp_min_unit
return sp_min_unit * self._resolution
def get_num_space_tracks(self, layer_id, width_ntr, half_space=False, same_color=False):
# type: (int, int, bool, bool) -> Union[int, float]
"""Returns the number of tracks needed for space around a track of the given width.
In advance technologies, metal spacing is often a function of the metal width, so for a
a wide track we may need to reserve empty tracks next to this. This method computes the
minimum number of empty tracks needed.
Parameters
----------
layer_id : int
the track layer ID
width_ntr : int
the track width in number of tracks.
half_space : bool
True to allow half-integer spacing.
same_color : bool
True to use same-color spacing.
Returns
-------
num_sp_tracks : Union[int, float]
minimum space needed around the given track in number of tracks.
"""
width = self.get_track_width(layer_id, width_ntr, unit_mode=True)
sp_min_unit = self.get_space(layer_id, width_ntr, same_color=same_color, unit_mode=True)
w_unit = self.w_tracks[layer_id]
sp_unit = self.sp_tracks[layer_id]
# if this width is overridden, we may have extra space
width_normal = w_unit * width_ntr + sp_unit * (width_ntr - 1)
extra_space = (width_normal - width) // 2
half_pitch = (w_unit + sp_unit) // 2
num_half_pitch = -(-(sp_min_unit - sp_unit - extra_space) // half_pitch)
if num_half_pitch % 2 == 0:
return num_half_pitch // 2
elif half_space:
return num_half_pitch / 2.0
else:
return (num_half_pitch + 1) // 2
def get_line_end_space(self, layer_id, width_ntr, unit_mode=False):
# type: (int, int, bool) -> Union[float, int]
"""Returns the minimum line end spacing for the given wire.
Parameters
----------
layer_id : int
wire layer ID.
width_ntr : int
wire width, in number of tracks.
unit_mode : bool
True to return line-end space in resolution units.
Returns
-------
space : Union[float, int]
the line-end spacing.
"""
layer_name = self.tech_info.get_layer_name(layer_id)
if isinstance(layer_name, tuple):
layer_name = layer_name[0]
layer_type = self.tech_info.get_layer_type(layer_name)
width = self.get_track_width(layer_id, width_ntr, unit_mode=True)
ans = self.tech_info.get_min_line_end_space(layer_type, width, unit_mode=True)
if not unit_mode:
return ans * self._resolution
return ans
def get_line_end_space_tracks(self, wire_layer, space_layer, width_ntr, half_space=False):
# type: (int, int, int, bool) -> Union[float, int]
"""Returns the minimum line end spacing in number of space tracks.
Parameters
----------
wire_layer : int
line-end wire layer ID.
space_layer : int
the layer used to measure line-end space. Must be adjacent to wire_layer, and its
direction must be orthogonal to the wire layer.
width_ntr : int
wire width, in number of tracks.
half_space : bool
True to allow half-track spacing.
Returns
-------
space_ntr : Union[float, int]
number of tracks needed to reserve as space.
"""
if space_layer == wire_layer - 1:
_, conn_ext = self.get_via_extensions(space_layer, 1, width_ntr, unit_mode=True)
elif space_layer == wire_layer + 1:
conn_ext, _ = self.get_via_extensions(wire_layer, width_ntr, 1, unit_mode=True)
else:
raise ValueError('space_layer must be adjacent to wire_layer')
if self.get_direction(space_layer) == self.get_direction(wire_layer):
raise ValueError('space_layer must be orthogonal to wire_layer.')
wire_sp = self.get_line_end_space(wire_layer, width_ntr, unit_mode=True)
margin = 2 * conn_ext + wire_sp
w, sp = self.get_track_info(space_layer, unit_mode=True)
half_pitch = (w + sp) // 2
space_ntr = max(-(-(margin - sp) // half_pitch), 0)
if space_ntr % 2 == 0:
return space_ntr // 2
elif half_space:
return space_ntr / 2
else:
return (space_ntr + 1) // 2
def get_max_track_width(self, layer_id, num_tracks, tot_space, half_end_space=False):
# type: (int, int, int, bool) -> int
"""Compute maximum track width and space that satisfies DRC rule.
Given available number of tracks and numbers of tracks needed, returns
the maximum possible track width and spacing.
Parameters
----------
layer_id : int
the track layer ID.
num_tracks : int
number of tracks to draw.
tot_space : int
avilable number of tracks.
half_end_space : bool
True if end spaces can be half of minimum spacing. This is true if you're
these tracks will be repeated, or there are no adjacent tracks.
Returns
-------
tr_w : int
track width.
"""
bin_iter = BinaryIterator(1, None)
num_space = num_tracks if half_end_space else num_tracks + 1
while bin_iter.has_next():
tr_w = bin_iter.get_next()
tr_sp = self.get_num_space_tracks(layer_id, tr_w, half_space=False)
used_tracks = tr_w * num_tracks + tr_sp * num_space
if used_tracks > tot_space:
bin_iter.down()
else:
bin_iter.save()
bin_iter.up()
opt_w = bin_iter.get_last_save()
return opt_w
@staticmethod
def get_evenly_spaced_tracks(num_tracks, tot_space, track_width, half_end_space=False):
# type: (int, int, int, bool) -> List[Union[float, int]]
"""Evenly space given number of tracks in the available space.
Currently this method may return half-integer tracks.
Parameters
----------
num_tracks : int
number of tracks to draw.
tot_space : int
avilable number of tracks.
track_width : int
track width in number of tracks.
half_end_space : bool
True if end spaces can be half of minimum spacing. This is true if you're
these tracks will be repeated, or there are no adjacent tracks.
Returns
-------
idx_list : List[float]
list of track indices. 0 is the left-most track.
"""
if half_end_space:
tot_space_htr = 2 * tot_space
scale = 2 * tot_space_htr
offset = tot_space_htr + num_tracks
den = 2 * num_tracks
else:
tot_space_htr = 2 * tot_space
width_htr = 2 * track_width - 2
# magic math. You can work it out
scale = 2 * (tot_space_htr + width_htr)
offset = 2 * tot_space_htr - width_htr * (num_tracks - 1) + (num_tracks + 1)
den = 2 * (num_tracks + 1)
hidx_arr = (scale * np.arange(num_tracks, dtype=int) + offset) // den
# convert from half indices to actual indices
idx_list = ((hidx_arr - 1) / 2.0).tolist() # type: List[float]
return idx_list
def get_block_size(self, layer_id, unit_mode=False, include_private=False,
half_blk_x=True, half_blk_y=True):
# type: (int, bool, bool, bool, bool) -> Tuple[Union[float, int], Union[float, int]]
"""Returns unit block size given the top routing layer.
Parameters
----------
layer_id : int
the routing layer ID.
unit_mode : bool
True to return block dimension in resolution units.
include_private : bool
True to include private layers in block size calculation.
half_blk_x : bool
True to allow half-block widths.
half_blk_y : bool
True to allow half-block heights.
Returns
-------
block_width : Union[float, int]
the block width in layout units.
block_height : Union[float, int]
the block height in layout units.
"""
top_private_layer = self.top_private_layer
top_dir = self.dir_tracks[layer_id]
# get bottom layer that has different direction
bot_layer = layer_id - 1
while bot_layer in self.block_pitch and self.dir_tracks[bot_layer] == top_dir:
bot_layer -= 1
if bot_layer not in self.block_pitch:
bot_pitch = (2, 1)
else:
bot_pitch = self.block_pitch[bot_layer]
top_pitch = self.block_pitch[layer_id]
if layer_id > top_private_layer >= bot_layer and not include_private:
# if top layer not private but bottom layer is, then bottom is not quantized.
bot_pitch = (2, 1)
if top_dir == 'y':
w_pitch, h_pitch = top_pitch, bot_pitch
else:
w_pitch, h_pitch = bot_pitch, top_pitch
w_pitch = w_pitch[1] if half_blk_x else w_pitch[0]
h_pitch = h_pitch[1] if half_blk_y else h_pitch[0]
if unit_mode:
return w_pitch, h_pitch
else:
return w_pitch * self.resolution, h_pitch * self.resolution
def get_fill_size(self, # type: RoutingGrid
top_layer, # type: int
fill_config, # type: Dict[int, Tuple[int, int, int, int]]
unit_mode=False, # type: bool
include_private=False, # type: bool
half_blk_x=True, # type: bool
half_blk_y=True, # type: bool
):
# type: (...) -> Tuple[Union[float, int], Union[float, int]]
"""Returns unit block size given the top routing layer and power fill configuration.
Parameters
----------
top_layer : int
the top layer ID.
fill_config : Dict[int, Tuple[int, int, int, int]]
the fill configuration dictionary.
unit_mode : bool
True to return block dimension in resolution units.
include_private : bool
True to include private layers in block size calculation.
half_blk_x : bool
True to allow half-block widths.
half_blk_y : bool
True to allow half-block heights.
Returns
-------
block_width : Union[float, int]
the block width in layout units.
block_height : Union[float, int]
the block height in layout units.
"""
blk_w, blk_h = self.get_block_size(top_layer, unit_mode=True,
include_private=include_private,
half_blk_x=half_blk_x, half_blk_y=half_blk_y)
w_list = [blk_w]
h_list = [blk_h]
for lay, (tr_w, tr_sp, _, _) in fill_config.items():
if lay <= top_layer:
cur_pitch = self.get_track_pitch(lay, unit_mode=True)
cur_dim = (tr_w + tr_sp) * cur_pitch * 2
if self.get_direction(lay) == 'x':
h_list.append(cur_dim)
else:
w_list.append(cur_dim)
blk_w = lcm(w_list)
blk_h = lcm(h_list)
if unit_mode:
return blk_w, blk_h
return blk_w * self._resolution, blk_h * self._resolution
def size_defined(self, layer_id):
# type: (int) -> bool
"""Returns True if size is defined on the given layer."""
return layer_id >= self.top_private_layer + 2
def get_size_pitch(self, layer_id, unit_mode=False):
# type: (int, bool) -> Tuple[Union[float, int], Union[float, int]]
"""Returns the horizontal/vertical pitch that defines template size.
Parameters
----------
layer_id : int
the size layer.
unit_mode : bool
True to return pitches in resolution units.
Returns
-------
w_pitch : Union[float, int]
the width pitch.
h_pitch : Union[float, int]
the height pitch.
"""
if not self.size_defined(layer_id):
raise ValueError('Size tuple is undefined for layer = %d' % layer_id)
top_dir = self.dir_tracks[layer_id]
bot_layer = layer_id - 1
while bot_layer in self.dir_tracks and self.dir_tracks[bot_layer] == top_dir:
bot_layer -= 1
h_pitch = self.get_track_pitch(layer_id, unit_mode=unit_mode)
w_pitch = self.get_track_pitch(bot_layer, unit_mode=unit_mode)
if top_dir == 'y':
return h_pitch, w_pitch
return w_pitch, h_pitch
def get_size_tuple(self, # type: RoutingGrid
layer_id, # type: int
width, # type: Union[float, int]
height, # type: Union[float, int]
round_up=False, # type: bool
unit_mode=False, # type: bool
half_blk_x=True, # type: bool
half_blk_y=True, # type: bool
):
# type: (...) -> Tuple[int, Union[float, int], Union[float, int]]
"""Compute the size tuple corresponding to the given width and height from block pitch.
Parameters
----------
layer_id : int
the layer ID.
width : Union[float, int]
width of the block, in layout units.
height : Union[float, int]
height of the block, in layout units.
round_up : bool
True to round up instead of raising an error if the given width and height
are not on pitch.
unit_mode : bool
True if the given layout dimensions are in resolution units.
half_blk_x : bool
True to allow half-block widths.
half_blk_y : bool
True to allow half-block heights.
Returns
-------
size : Tuple[int, int, int]
the size tuple. the first element is the top layer ID, second element is the width in
number of vertical tracks, and third element is the height in number of
horizontal tracks.
"""
if not unit_mode:
res = self._resolution
width = int(round(width / res))
height = int(round(height / res))
w_pitch, h_pitch = self.get_size_pitch(layer_id, unit_mode=True)
wblk, hblk = self.get_block_size(layer_id, unit_mode=True,
half_blk_x=half_blk_x, half_blk_y=half_blk_y)
if width % wblk != 0:
if round_up:
width = -(-width // wblk) * wblk
else:
raise ValueError('width = %d not on block pitch (%d)' % (width, wblk))
if height % hblk != 0:
if round_up:
height = -(-height // hblk) * hblk
else:
raise ValueError('height = %d not on block pitch (%d)' % (height, hblk))
w_size = width // w_pitch if width % w_pitch == 0 else width / w_pitch
h_size = height // h_pitch if height % h_pitch == 0 else height / h_pitch
return layer_id, w_size, h_size
def get_size_dimension(self, # type: RoutingGrid
size, # type: Tuple[int, Union[float, int], Union[float, int]]
unit_mode=False, # type: bool
):
# type: (...) -> Tuple[Union[float, int], Union[float, int]]
"""Compute width and height from given size.
Parameters
----------
size : Tuple[int, Union[float, int], Union[float, int]]
size of a block.
unit_mode : bool
True to return width/height in resolution units.
Returns
-------
width : Union[float, int]
the width in layout units.
height : Union[float, int]
the height in layout units.
"""
w_pitch, h_pitch = self.get_size_pitch(size[0], unit_mode=True)
w_unit = int(round(size[1] * 2)) * w_pitch // 2
h_unit = int(round(size[2] * 2)) * h_pitch // 2
if unit_mode:
return w_unit, h_unit
else:
return w_unit * self.resolution, h_unit * self.resolution
def convert_size(self, size, new_top_layer):
# type: (Tuple[int, Union[float, int], Union[float, int]], int) -> Tuple[int, int, int]
"""Convert the given size to a new top layer.
Parameters
----------
size : Tuple[int, Union[float, int], Union[float, int]]
size of a block.
new_top_layer : int
the new top level layer ID.
Returns
-------
new_size : Tuple[int, int, int]
the new size tuple.
"""
wblk, hblk = self.get_size_dimension(size, unit_mode=True)
return self.get_size_tuple(new_top_layer, wblk, hblk, unit_mode=True)
def get_track_info(self, layer_id, unit_mode=False):
# type: (int, bool) -> Tuple[Union[float, int], Union[float, int]]
"""Returns the routing track width and spacing on the given layer.
Parameters
----------
layer_id : int
the routing layer ID.
unit_mode : bool
True to return track width/spacing in resolution units.
Returns
-------
track_width : Union[float, int]
the track width in layout/resolution units.
track_spacing : Union[float, int]
the track spacing in layout/resolution units
"""
w, sp = self.w_tracks[layer_id], self.sp_tracks[layer_id]
if unit_mode:
return w, sp
return w * self._resolution, sp * self._resolution
def get_track_parity(self, layer_id, tr_idx):
# type: (int, Union[float, int]) -> int
"""Returns the parity of the given track.
Parameters
----------
layer_id : int
the layer ID.
tr_idx : Union[float, int]
the track index.
Returns
-------
parity : int
the track parity, either 0 or 1.
"""
# multiply then divide by 2 makes sure negative tracks are colored correctly.
htr = int(round(tr_idx * 2 + 1))
scale, offset = self._flip_parity[layer_id]
par_htr = scale * htr + offset
if par_htr % 4 < 2:
return 0
return 1
def get_layer_name(self, layer_id, tr_idx):
# type: (int, Union[float, int]) -> str
"""Returns the layer name of the given track.
Parameters
----------
layer_id : int
the layer ID.
tr_idx : Union[float, int]
the track index.
Returns
-------
layer_name : str
the layer name.
"""
layer_name = self.tech_info.get_layer_name(layer_id)
if isinstance(layer_name, tuple):
# round down half integer track
tr_parity = self.get_track_parity(layer_id, tr_idx)
return layer_name[tr_parity]
else:
return layer_name
def get_wire_bounds(self, layer_id, tr_idx, width=1, unit_mode=False):
# type: (int, Union[int, float], int, bool) -> Tuple[Union[float, int], Union[float, int]]
"""Calculate the wire bounds coordinate.
Parameters
----------
layer_id : int
the layer ID.
tr_idx : Union[int, float]
the center track index.
width : int
width of wire in number of tracks.
unit_mode : bool
True to return coordinates in resolution units.
Returns
-------
lower : Union[float, int]
the lower bound coordinate perpendicular to wire direction.
upper : Union[float, int]
the upper bound coordinate perpendicular to wire direction.
"""
width_unit = self.get_track_width(layer_id, width, unit_mode=True)
center = self.track_to_coord(layer_id, tr_idx, unit_mode=True)
lower, upper = center - width_unit // 2, center + width_unit // 2
if unit_mode:
return lower, upper
else:
return lower * self._resolution, upper * self._resolution
def get_bbox(self, layer_id, tr_idx, lower, upper, width=1, unit_mode=False):
# type: (int, Union[int, float], Union[int, float], Union[int, float], int, bool) -> BBox
"""Compute bounding box for the given wire.
Parameters
----------
layer_id : int
the layer ID.
tr_idx : Union[int, float]
the center track index.
lower : Union[int, float]
the lower coordinate along track direction.
upper : Union[int, float]
the upper coordinate along track direction.
width : int
width of wire in number of tracks.
unit_mode : bool
True if lower and upper are specified in resolution units.
Returns
-------
bbox : bag.layout.util.BBox
the bounding box.
"""
if not unit_mode:
lower = int(round(lower / self._resolution))
upper = int(round(upper / self._resolution))
cl, cu = self.get_wire_bounds(layer_id, tr_idx, width=width, unit_mode=True)
if self.get_direction(layer_id) == 'x':
bbox = BBox(lower, cl, upper, cu, self._resolution, unit_mode=True)
else:
bbox = BBox(cl, lower, cu, upper, self._resolution, unit_mode=True)
return bbox
def get_min_track_width(self, layer_id, idc=0, iac_rms=0, iac_peak=0, l=-1,
bot_w=-1, top_w=-1, unit_mode=False, **kwargs):
# type: (int, float, float, float, float, float, float, bool, **Any) -> int
"""Returns the minimum track width required for the given EM specs.
Parameters
----------
layer_id : int
the layer ID.
idc : float
the DC current spec.
iac_rms : float
the AC RMS current spec.
iac_peak : float
the AC peak current spec.
l : float
the length of the wire in layout units. Use negative length
to disable length enhancement factor.
bot_w : float
the bottom layer track width in layout units. If given, will make sure
that the via between the two tracks meet EM specs too.
top_w : float
the top layer track width in layout units. If given, will make sure
that the via between the two tracks meet EM specs too.
unit_mode : bool
True if l/bot_w/top_w are given in resolution units.
**kwargs : Any
override default EM spec parameters.
Returns
-------
track_width : int
the minimum track width in number of tracks.
"""
res = self._resolution
if not unit_mode:
if l > 0:
l = int(round(l / res))
if bot_w > 0:
bot_w = int(round(bot_w / res))
if top_w > 0:
top_w = int(round(top_w / res))
# if double patterning layer, just use any name.
layer_name = self.tech_info.get_layer_name(layer_id)
if isinstance(layer_name, tuple):
layer_name = layer_name[0]
if bot_w > 0:
bot_layer_name = self.tech_info.get_layer_name(layer_id - 1)
if isinstance(bot_layer_name, tuple):
bot_layer_name = bot_layer_name[0]
else:
bot_layer_name = None
if top_w > 0:
top_layer_name = self.tech_info.get_layer_name(layer_id + 1)
if isinstance(top_layer_name, tuple):
top_layer_name = top_layer_name[0]
else:
top_layer_name = None
# use binary search to find the minimum track width
bin_iter = BinaryIterator(1, None)
tr_dir = self.dir_tracks[layer_id]
alt_dir = 'x' if tr_dir == 'y' else 'y'
bot_dir = self.dir_tracks.get(layer_id - 1, alt_dir)
top_dir = self.dir_tracks.get(layer_id + 1, alt_dir)
while bin_iter.has_next():
ntr = bin_iter.get_next()
width = self.get_track_width(layer_id, ntr, unit_mode=True)
idc_max, irms_max, ipeak_max = self.tech_info.get_metal_em_specs(layer_name,
width * res,
l=l * res, **kwargs)
if idc > idc_max or iac_rms > irms_max or iac_peak > ipeak_max:
# check metal satisfies EM spec
bin_iter.up()
continue
if bot_w > 0 and bot_dir != tr_dir:
if tr_dir == 'x':
bbox = BBox(0, 0, bot_w, width, res, unit_mode=True)
else:
bbox = BBox(0, 0, width, bot_w, res, unit_mode=True)
vinfo = self.tech_info.get_via_info(bbox, bot_layer_name, layer_name,
bot_dir, **kwargs)
if (vinfo is None or idc > vinfo['idc'] or iac_rms > vinfo['iac_rms'] or
iac_peak > vinfo['iac_peak']):
bin_iter.up()
continue
if top_w > 0 and top_dir != tr_dir:
if tr_dir == 'x':
bbox = BBox(0, 0, top_w, width, res, unit_mode=True)
else:
bbox = BBox(0, 0, width, top_w, res, unit_mode=True)
vinfo = self.tech_info.get_via_info(bbox, layer_name, top_layer_name,
tr_dir, **kwargs)
if (vinfo is None or idc > vinfo['idc'] or iac_rms > vinfo['iac_rms'] or
iac_peak > vinfo['iac_peak']):
bin_iter.up()
continue
# we got here, so all EM specs passed
bin_iter.save()
bin_iter.down()
return bin_iter.get_last_save()
def get_min_track_width_for_via(self,
bot_layer: int,
next_ntr: int = 1,
**kwargs: Any,
) -> int:
"""Returns the minimum track width required to fit a via to the next layer.
Parameters
----------
bot_layer : int
the layer ID.
next_ntr : int
the width of the track on the next layer, in track widths.
**kwargs : Any
Override the default EM specs and pass additional arguments that are accepted by get_min_track_width
Returns
-------
track_width : int
the minimum track width in number of tracks
"""
next_layer_min_width_unit = self.get_track_width(layer_id=bot_layer + 1, width_ntr=next_ntr, unit_mode=True)
return self.get_min_track_width(layer_id=bot_layer, top_w=next_layer_min_width_unit, unit_mode=True, **kwargs)
def get_track_index_range(self, # type: RoutingGrid
layer_id, # type: int
lower, # type: Union[float, int]
upper, # type: Union[float, int]
num_space=0, # type: Union[float, int]
edge_margin=0, # type: Union[float, int]
half_track=False, # type: bool
unit_mode=False # type: bool
):
# type: (...) -> Tuple[Optional[Union[float, int]], Optional[Union[float, int]]]
""" Returns the first and last track index strictly in the given range.
Parameters
----------
layer_id : int
the layer ID.
lower : Union[float, int]
the lower coordinate.
upper : Union[float, int]
the upper coordinate.
num_space : Union[float, int]
number of space tracks to the tracks right outside of the given range.
edge_margin : Union[float, int]
minimum space from outer tracks to given range.
half_track : bool
True to allow half-integer tracks.
unit_mode : bool
True if lower/upper/edge_margin are given in resolution units.
Returns
-------
start_track : Optional[Union[float, int]]
the first track index. None if no solution.
end_track : Optional[Union[float, int]]
the last track index. None if no solution.
"""
if not unit_mode:
lower = int(round(lower / self._resolution))
upper = int(round(upper / self._resolution))
edge_margin = int(round(edge_margin / self._resolution))
tr_w = self.get_track_width(layer_id, 1, unit_mode=True)
tr_ph = self.get_track_pitch(layer_id, unit_mode=True) // 2
tr_wh = tr_w // 2
# get start track half index
lower_bnd = self.coord_to_nearest_track(layer_id, lower, half_track=True,
mode=-1, unit_mode=True)
start_track = self.coord_to_nearest_track(layer_id, lower + edge_margin, half_track=True,
mode=2, unit_mode=True)
hstart_track = int(round(2 * max(start_track, lower_bnd + num_space) + 1))
# check strictly in range
if hstart_track * tr_ph - tr_wh < lower + edge_margin:
hstart_track += 1
# check if half track is allowed
if not half_track and hstart_track % 2 == 0:
hstart_track += 1
# get end track half index
upper_bnd = self.coord_to_nearest_track(layer_id, upper, half_track=True,
mode=1, unit_mode=True)
end_track = self.coord_to_nearest_track(layer_id, upper - edge_margin, half_track=True,
mode=-2, unit_mode=True)
hend_track = int(round(2 * min(end_track, upper_bnd - num_space) + 1))
# check strictly in range
if hend_track * tr_ph + tr_wh > upper - edge_margin:
hend_track -= 1
# check if half track is allowed
if not half_track and hend_track % 2 == 0:
hend_track -= 1
if hend_track < hstart_track:
# no solution
return None, None
# convert to track
if hstart_track % 2 == 1:
start_track = (hstart_track - 1) // 2
else:
start_track = (hstart_track - 1) / 2
if hend_track % 2 == 1:
end_track = (hend_track - 1) // 2
else:
end_track = (hend_track - 1) / 2
return start_track, end_track
def get_overlap_tracks(self, # type: RoutingGrid
layer_id, # type: int
lower, # type: Union[float, int]
upper, # type: Union[float, int]
half_track=False, # type: bool
unit_mode=False # type: bool
):
# type: (...) -> Tuple[Optional[Union[float, int]], Optional[Union[float, int]]]
""" Returns the first and last track index that overlaps with the given range.
Parameters
----------
layer_id : int
the layer ID.
lower : Union[float, int]
the lower coordinate.
upper : Union[float, int]
the upper coordinate.
half_track : bool
True to allow half-integer tracks.
unit_mode : bool
True if lower/upper are given in resolution units.
Returns
-------
start_track : Optional[Union[float, int]]
the first track index. None if no solution.
end_track : Optional[Union[float, int]]
the last track index. None if no solution.
"""
if not unit_mode:
lower = int(round(lower / self._resolution))
upper = int(round(upper / self._resolution))
wtr = self.w_tracks[layer_id]
lower_tr = self.find_next_track(layer_id, lower - wtr, half_track=half_track,
mode=1, unit_mode=True)
upper_tr = self.find_next_track(layer_id, upper + wtr, half_track=half_track,
mode=-1, unit_mode=True)
return lower_tr, upper_tr
def get_via_extensions_dim(self, # type: RoutingGrid
bot_layer_id, # type: int
bot_dim, # type: Union[float, int]
top_dim, # type: Union[float, int]
unit_mode=False, # type: bool
):
# type: (...) -> Tuple[Union[float, int], Union[float, int]]
"""Returns the via extension.
Parameters
----------
bot_layer_id : int
the via bottom layer ID.
bot_dim : Union[float, int]
the bottom track width in layout/resolution units.
top_dim : Union[float, int]
the top track width in layout/resolution units.
unit_mode : bool
True if given widths are in resolution units.
Returns
-------
bot_ext : Union[float, int]
via extension on the bottom layer.
top_ext : Union[float, int]
via extension on the top layer.
"""
res = self._resolution
if not unit_mode:
bot_dim = int(round(bot_dim / res))
top_dim = int(round(top_dim / res))
bot_lay_name = self.get_layer_name(bot_layer_id, 0)
top_lay_name = self.get_layer_name(bot_layer_id + 1, 0)
bot_dir = self.get_direction(bot_layer_id)
top_dir = self.get_direction(bot_layer_id + 1)
if top_dir == bot_dir:
raise ValueError('This method only works if top and bottom layers are orthogonal.')
if bot_dir == 'x':
vbox = BBox(0, 0, top_dim, bot_dim, res, unit_mode=True)
vinfo = self._tech_info.get_via_info(vbox, bot_lay_name, top_lay_name, bot_dir)
if vinfo is None:
raise ValueError('Cannot create via')
bot_ext = (vinfo['bot_box'].width_unit - top_dim) // 2
top_ext = (vinfo['top_box'].height_unit - bot_dim) // 2
else:
vbox = BBox(0, 0, bot_dim, top_dim, res, unit_mode=True)
vinfo = self._tech_info.get_via_info(vbox, bot_lay_name, top_lay_name, bot_dir)
if vinfo is None:
raise ValueError('Cannot create via')
bot_ext = (vinfo['bot_box'].height_unit - top_dim) // 2
top_ext = (vinfo['top_box'].width_unit - bot_dim) // 2
if unit_mode:
return bot_ext, top_ext
else:
return bot_ext * res, top_ext * res
def get_via_extensions(self, bot_layer_id, bot_width, top_width, unit_mode=False):
# type: (int, int, int, bool) -> Tuple[Union[float, int], Union[float, int]]
"""Returns the via extension.
Parameters
----------
bot_layer_id : int
the via bottom layer ID.
bot_width : int
the bottom track width in number of tracks.
top_width : int
the top track width in number of tracks.
unit_mode : bool
True to return extensions in resolution units.
Returns
-------
bot_ext : Union[float, int]
via extension on the bottom layer.
top_ext : Union[float, int]
via extension on the top layer.
"""
bot_dim = self.get_track_width(bot_layer_id, bot_width, unit_mode=unit_mode)
top_dim = self.get_track_width(bot_layer_id + 1, top_width, unit_mode=unit_mode)
return self.get_via_extensions_dim(bot_layer_id, bot_dim, top_dim, unit_mode=unit_mode)
def coord_to_track(self, layer_id, coord, unit_mode=False):
# type: (int, Union[float, int], bool) -> Union[float, int]
"""Convert given coordinate to track number.
Parameters
----------
layer_id : int
the layer number.
coord : Union[float, int]
the coordinate perpendicular to the track direction.
unit_mode : bool
True if coordinate is given in resolution units.
Returns
-------
track : float or int
the track number
"""
if not unit_mode:
coord = int(round(coord / self._resolution))
pitch = self.get_track_pitch(layer_id, unit_mode=True)
q, r = divmod(coord - self._get_track_offset(layer_id), pitch)
if r == 0:
return q
elif r == (pitch // 2):
return q + 0.5
else:
raise ValueError('coordinate %.4g is not on track.' % coord)
def find_next_track(self, layer_id, coord, tr_width=1, half_track=False,
mode=1, unit_mode=False):
# type: (int, Union[float, int], int, bool, int, bool) -> Union[float, int]
"""Find the track such that its edges are on the same side w.r.t. the given coordinate.
Parameters
----------
layer_id : int
the layer number.
coord : float
the coordinate perpendicular to the track direction.
tr_width : int
the track width, in number of tracks.
half_track : bool
True to allow half integer track center numbers.
mode : int
1 to find track with both edge coordinates larger than or equal to the given one,
-1 to find track with both edge coordinates less than or equal to the given one.
unit_mode : bool
True if coordinate is given in resolution units.
Returns
-------
tr_idx : int or float
the center track index.
"""
if not unit_mode:
coord = int(round(coord / self._resolution))
tr_w = self.get_track_width(layer_id, tr_width, unit_mode=True)
if mode > 0:
return self.coord_to_nearest_track(layer_id, coord + tr_w // 2, half_track=half_track,
mode=mode, unit_mode=True)
else:
return self.coord_to_nearest_track(layer_id, coord - tr_w // 2, half_track=half_track,
mode=mode, unit_mode=True)
def coord_to_nearest_track(self, layer_id, coord, half_track=False, mode=0,
unit_mode=False):
# type: (int, Union[float, int], bool, int, bool) -> Union[float, int]
"""Returns the track number closest to the given coordinate.
Parameters
----------
layer_id : int
the layer number.
coord : Union[float, int]
the coordinate perpendicular to the track direction.
half_track : bool
if True, allow half integer track numbers.
mode : int
the "rounding" mode.
If mode == 0, return the nearest track (default).
If mode == -1, return the nearest track with coordinate less
than or equal to coord.
If mode == -2, return the nearest track with coordinate less
than coord.
If mode == 1, return the nearest track with coordinate greater
than or equal to coord.
If mode == 2, return the nearest track with coordinate greater
than coord.
unit_mode : bool
True if the given coordinate is in resolution units.
Returns
-------
track : Union[float, int]
the track number
"""
if not unit_mode:
coord = int(round(coord / self._resolution))
pitch = self.get_track_pitch(layer_id, unit_mode=True)
if half_track:
pitch //= 2
q, r = divmod(coord - self._get_track_offset(layer_id), pitch)
if r == 0:
# exactly on track
if mode == -2:
# move to lower track
q -= 1
elif mode == 2:
# move to upper track
q += 1
else:
# not on track
if mode > 0 or (mode == 0 and r >= pitch / 2):
# round up
q += 1
if not half_track:
return q
elif q % 2 == 0:
return q // 2
else:
return q / 2
def coord_to_nearest_fill_track(self, layer_id, coord, fill_config, mode=0,
unit_mode=False):
# type: (int, Union[float, int], Dict[int, Any], int, bool) -> Union[float, int]
if not unit_mode:
coord = int(round(coord / self._resolution))
tr_w, tr_sp, _, _ = fill_config[layer_id]
num_htr = int(round(2 * (tr_w + tr_sp)))
fill_pitch = num_htr * self.get_track_pitch(layer_id, unit_mode=True) // 2
fill_pitch2 = fill_pitch // 2
fill_q, fill_r = divmod(coord - fill_pitch2, fill_pitch)
if fill_r == 0:
# exactly on track
if mode == -2:
# move to lower track
fill_q -= 1
elif mode == 2:
# move to upper track
fill_q += 1
else:
# not on track
if mode > 0 or (mode == 0 and fill_r >= fill_pitch2):
# round up
fill_q += 1
return self.coord_to_track(layer_id, fill_q * fill_pitch + fill_pitch2, unit_mode=True)
def transform_track(self, # type: RoutingGrid
layer_id, # type: int
track_idx, # type: Union[float, int]
dx=0, # type: Union[float, int]
dy=0, # type: Union[float, int]
orient='R0', # type: str
unit_mode=False, # type: bool
):
# type: (...) -> Union[float, int]
"""Transform the given track index.
Parameters
----------
layer_id : int
the layer ID.
track_idx : Union[float, int]
the track index.
dx : Union[float, int]
X shift.
dy : Union[float, int]
Y shift.
orient : str
orientation.
unit_mode : bool
True if dx/dy are given in resolution units.
Returns
-------
new_track_idx : Union[float, int]
the transformed track index.
"""
if not unit_mode:
dx = int(round(dx / self._resolution))
dy = int(round(dy / self._resolution))
is_x = self.get_direction(layer_id) == 'x'
if is_x:
hidx_shift = int(2 * self.coord_to_track(layer_id, dy, unit_mode=True)) + 1
else:
hidx_shift = int(2 * self.coord_to_track(layer_id, dx, unit_mode=True)) + 1
if orient == 'R0':
hidx_scale = 1
elif orient == 'R180':
hidx_scale = -1
elif orient == 'MX':
hidx_scale = -1 if is_x else 1
elif orient == 'MY':
hidx_scale = 1 if is_x else -1
else:
raise ValueError('Unsupported orientation: %s' % orient)
old_hidx = int(track_idx * 2 + 1)
new_hidx = old_hidx * hidx_scale + hidx_shift
if new_hidx % 2 == 1:
return (new_hidx - 1) // 2
else:
return (new_hidx - 1) / 2
def track_to_coord(self, layer_id, track_idx, unit_mode=False):
# type: (int, Union[float, int], bool) -> Union[float, int]
"""Convert given track number to coordinate.
Parameters
----------
layer_id : int
the layer number.
track_idx : Union[float, int]
the track number.
unit_mode : bool
True to return coordinate in resolution units.
Returns
-------
coord : Union[float, int]
the coordinate perpendicular to track direction.
"""
pitch = self.get_track_pitch(layer_id, unit_mode=True)
coord_unit = int(pitch * track_idx + self._get_track_offset(layer_id))
if unit_mode:
return coord_unit
return coord_unit * self._resolution
def interval_to_track(self, # type: RoutingGrid
layer_id, # type: int
intv, # type: Tuple[Union[float, int], Union[float, int]]
unit_mode=False, # type: bool
):
# type: (...) -> Tuple[Union[float, int], int]
"""Convert given coordinates to track number and width.
Parameters
----------
layer_id : int
the layer number.
intv : Tuple[Union[float, int], Union[float, int]]
lower and upper coordinates perpendicular to the track direction.
unit_mode : bool
True if dimensions are given in resolution units.
Returns
-------
track : Union[float, int]
the track number
width : int
the track width, in number of tracks.
"""
res = self._resolution
start, stop = intv
if not unit_mode:
start = int(round(start / res))
stop = int(round(stop / res))
track = self.coord_to_track(layer_id, (start + stop) // 2, unit_mode=True)
width = stop - start
# binary search to take width override into account
bin_iter = BinaryIterator(1, None)
while bin_iter.has_next():
cur_ntr = bin_iter.get_next()
cur_w = self.get_track_width(layer_id, cur_ntr, unit_mode=True)
if cur_w == width:
return track, cur_ntr
elif cur_w > width:
bin_iter.down()
else:
bin_iter.up()
# never found solution; width is not quantized.
raise ValueError('Interval {} on layer {} width not quantized'.format(intv, layer_id))
def copy(self):
# type: () -> RoutingGrid
"""Returns a deep copy of this RoutingGrid."""
cls = self.__class__
result = cls.__new__(cls)
attrs = result.__dict__
attrs['_tech_info'] = self._tech_info
attrs['_resolution'] = self._resolution
attrs['_layout_unit'] = self._layout_unit
attrs['_flip_parity'] = self._flip_parity.copy()
attrs['_ignore_layers'] = self._ignore_layers.copy()
attrs['layers'] = list(self.layers)
attrs['sp_tracks'] = self.sp_tracks.copy()
attrs['dir_tracks'] = self.dir_tracks.copy()
attrs['offset_tracks'] = {}
attrs['w_tracks'] = self.w_tracks.copy()
attrs['max_num_tr_tracks'] = self.max_num_tr_tracks.copy()
attrs['block_pitch'] = self.block_pitch.copy()
attrs['w_override'] = self.w_override.copy()
attrs['private_layers'] = list(self.private_layers)
for lay in self.layers:
attrs['w_override'][lay] = self.w_override[lay].copy()
return result
def ignore_layers_under(self, layer_id):
# type: (int) -> None
"""Ignore all layers under the given layer (inclusive) when calculating block pitches.
Parameters
----------
layer_id : int
ignore this layer and below.
"""
for lay in self.layers:
if lay > layer_id:
break
self._ignore_layers.add(lay)
def add_new_layer(self, layer_id, tr_space, tr_width, direction,
max_num_tr=100, override=False, unit_mode=False, is_private=True):
# type: (int, float, float, str, int, bool, bool, bool) -> None
"""Add a new private layer to this RoutingGrid.
This method is used to add customized routing grid per template on lower level layers.
The new layers doesn't necessarily need to follow alternating track direction, however,
if you do this you cannot connect to adjacent level metals.
Note: do not use this method to add/modify top level layers, as it does not calculate
block pitch.
Parameters
----------
layer_id : int
the new layer ID.
tr_space : float
the track spacing, in layout units.
tr_width : float
the track width, in layout units.
direction : str
track direction. 'x' for horizontal, 'y' for vertical.
max_num_tr : int
maximum track width in number of tracks.
override : bool
True to override existing layers if they already exist.
unit_mode : bool
True if given lengths are in resolution units
is_private : bool
True if this is a private layer.
"""
self._ignore_layers.discard(layer_id)
if not unit_mode:
sp_unit = 2 * int(round(tr_space / (2 * self.resolution)))
w_unit = 2 * int(round(tr_width / (2 * self.resolution)))
else:
sp_unit = -(-tr_space // 2) * 2
w_unit = -(-tr_width // 2) * 2
if layer_id in self.sp_tracks:
# double check to see if we actually need to modify layer
w_cur = self.w_tracks[layer_id]
sp_cur = self.sp_tracks[layer_id]
dir_cur = self.dir_tracks[layer_id]
if w_cur == w_unit and sp_cur == sp_unit and dir_cur == direction:
# everything is the same, just return
return
if not override:
raise ValueError('Layer %d already on routing grid.' % layer_id)
else:
self.layers.append(layer_id)
self.layers.sort()
if is_private and layer_id not in self.private_layers:
self.private_layers.append(layer_id)
self.private_layers.sort()
self.sp_tracks[layer_id] = sp_unit
self.w_tracks[layer_id] = w_unit
self.dir_tracks[layer_id] = direction
self.w_override[layer_id] = {}
self.max_num_tr_tracks[layer_id] = max_num_tr
if layer_id not in self._flip_parity:
self._flip_parity[layer_id] = (1, 0)
def set_track_offset(self, layer_id, offset, unit_mode=False):
# type: (int, Union[float, int], bool) -> None
"""Set track offset for this RoutingGrid.
Parameters
----------
layer_id : int
the routing layer ID.
offset : Union[float, int]
the track offset.
unit_mode : bool
True if the track offset is specified in resolution units.
"""
if not unit_mode:
offset = int(round(offset / self.resolution))
self.offset_tracks[layer_id] = offset
def add_width_override(self, layer_id, width_ntr, tr_width, unit_mode=False):
# type: (int, int, Union[int, float], bool) -> None
"""Add width override.
NOTE: call this method only directly after you construct the RoutingGrid. Do not
use this to modify an existing grid.
Parameters
----------
layer_id : int
the new layer ID.
width_ntr : int
the width in number of tracks.
tr_width : Union[int, float]
the actual width in layout units.
unit_mode : bool
True if tr_width is in resolution units.
"""
if width_ntr == 1:
raise ValueError('Cannot override width_ntr=1.')
if not unit_mode:
tr_width = int(round(tr_width / self.resolution))
if layer_id not in self.w_override:
self.w_override[layer_id] = {width_ntr: tr_width}
else:
self.w_override[layer_id][width_ntr] = tr_width
================================================
FILE: bag/layout/tech.py
================================================
# -*- coding: utf-8 -*-
from typing import List, Tuple, Union, Optional, Callable, TYPE_CHECKING
import abc
from .core import TechInfo
if TYPE_CHECKING:
from ..layout.util import BBox
from ..layout.template import TemplateBase
class TechInfoConfig(TechInfo, metaclass=abc.ABCMeta):
"""An implementation of TechInfo that implements most methods with a technology file."""
def __init__(self, config, tech_params, mos_entry_name='mos'):
TechInfo.__init__(self, config['resolution'], config['layout_unit'],
config['tech_lib'], tech_params)
self.config = config
self._mos_entry_name = mos_entry_name
self.idc_temp = tech_params['layout']['em']['dc_temp']
self.irms_dt = tech_params['layout']['em']['rms_dt']
@abc.abstractmethod
def get_metal_em_specs(self, layer_name, w, l=-1, vertical=False, **kwargs):
return float('inf'), float('inf'), float('inf')
@abc.abstractmethod
def get_via_em_specs(self, via_name, bm_layer, tm_layer, via_type='square',
bm_dim=(-1, -1), tm_dim=(-1, -1), array=False, **kwargs):
return float('inf'), float('inf'), float('inf')
@abc.abstractmethod
def get_res_em_specs(self, res_type, w, l=-1, **kwargs):
return float('inf'), float('inf'), float('inf')
@abc.abstractmethod
def add_cell_boundary(self, template, box):
# type: (TemplateBase, BBox) -> None
pass
@abc.abstractmethod
def draw_device_blockage(self, template):
# type: (TemplateBase) -> None
pass
@abc.abstractmethod
def get_via_arr_enc(self, vname, vtype, mtype, mw_unit, is_bot):
# type: (...) -> Tuple[Optional[List[Tuple[int, int]]], Optional[Callable[[int, int], bool]]]
return None, None
@property
def pin_purpose(self):
return self.config.get('pin_purpose', 'pin')
def get_via_types(self, bmtype, tmtype):
default = [('square', 1), ('vrect', 2), ('hrect', 2)]
if 'via_type_order' in self.config:
table = self.config['via_type_order']
return table.get((bmtype, tmtype), default)
return default
def get_well_layers(self, sub_type):
# type: (str) -> List[Tuple[str, str]]
return self.config['well_layers'][sub_type]
def get_implant_layers(self, mos_type, res_type=None):
# type: (str, Optional[str]) -> List[Tuple[str, str]]
if res_type is None:
table = self.config[self._mos_entry_name]
else:
table = self.config['resistor']
return list(table['imp_layers'][mos_type].keys())
def get_threshold_layers(self, mos_type, threshold, res_type=None):
# type: (str, str, Optional[str]) -> List[Tuple[str, str]]
if res_type is None:
table = self.config[self._mos_entry_name]
else:
table = self.config['resistor']
return list(table['thres_layers'][mos_type][threshold].keys())
def get_exclude_layer(self, layer_id):
# type: (int) -> Tuple[str, str]
"""Returns the metal exclude layer"""
return self.config['metal_exclude_table'][layer_id]
def get_dnw_margin_unit(self, dnw_mode):
# type: (str) -> int
return self.config['dnw_margins'][dnw_mode]
def get_dnw_layers(self):
# type: () -> List[Tuple[str, str]]
return self.config[self._mos_entry_name]['dnw_layers']
def get_res_metal_layers(self, layer_id):
# type: (int) -> List[Tuple[str, str]]
return self.config['res_metal_layer_table'][layer_id]
def get_metal_dummy_layers(self, layer_id):
# type: (int) -> List[Tuple[str, str]]
return self.config['metal_dummy_table'][layer_id]
def use_flip_parity(self):
# type: () -> bool
return self.config['use_flip_parity']
def get_layer_name(self, layer_id):
# type: (int) -> str
name_dict = self.config['layer_name']
return name_dict[layer_id]
def get_layer_id(self, layer_name):
# type: (str) -> int
for key, val in self.config['layer_name'].items():
if val == layer_name:
return key
raise ValueError('Unknown layer: %s' % layer_name)
def get_layer_type(self, layer_name):
# type: (str) -> str
type_dict = self.config['layer_type']
return type_dict[layer_name]
def get_idc_scale_factor(self, temp, mtype, is_res=False):
# type: (float, str, bool) -> float
if is_res:
mtype = 'res'
idc_em_scale = self.config['idc_em_scale']
if mtype in idc_em_scale:
idc_params = idc_em_scale[mtype]
else:
idc_params = idc_em_scale['default']
temp_list = idc_params['temp']
scale_list = idc_params['scale']
for temp_test, scale in zip(temp_list, scale_list):
if temp <= temp_test:
return scale
return scale_list[-1]
def get_via_name(self, bot_layer_id):
# type: (int) -> str
return self.config['via_name'][bot_layer_id]
def get_via_id(self, bot_layer, top_layer):
# type: (str, str) -> str
return self.config['via_id'][(bot_layer, top_layer)]
def get_via_drc_info(self, vname, vtype, mtype, mw_unit, is_bot):
via_config = self.config['via']
if vname not in via_config:
raise ValueError('Unsupported vname %s' % vname)
via_config = via_config[vname]
if vtype.startswith('vrect') and vtype not in via_config:
# trying vertical rectangle via, but it does not exist,
# so try rotating horizontal rectangle instead
rotate = True
vtype2 = 'hrect' + vtype[5:]
else:
rotate = False
vtype2 = vtype
if vtype2 not in via_config:
raise ValueError('Unsupported vtype %s' % vtype2)
via_config = via_config[vtype2]
dim = via_config['dim']
sp = via_config['sp']
sp2_list = via_config.get('sp2', None)
sp3_list = via_config.get('sp3', None)
sp6_list = via_config.get('sp6', None)
if not is_bot or via_config['bot_enc'] is None:
enc_data = via_config['top_enc']
else:
enc_data = via_config['bot_enc']
enc_w_list = enc_data['w_list']
enc_list = enc_data['enc_list']
enc_cur = []
for mw_max, enc in zip(enc_w_list, enc_list):
if mw_unit <= mw_max:
enc_cur = enc
break
arr_enc, arr_test_tmp = self.get_via_arr_enc(vname, vtype, mtype, mw_unit, is_bot)
arr_test = arr_test_tmp
if rotate:
sp = sp[1], sp[0]
dim = dim[1], dim[0]
enc_cur = [(yv, xv) for xv, yv in enc_cur]
if sp2_list is not None:
sp2_list = [(spy, spx) for spx, spy in sp2_list]
if sp3_list is not None:
sp3_list = [(spy, spx) for spx, spy in sp3_list]
if sp6_list is not None:
sp6_list = [(spy, spx) for spx, spy in sp6_list]
if arr_enc is not None:
arr_enc = [(yv, xv) for xv, yv in arr_enc]
if arr_test_tmp is not None:
def arr_test(nrow, ncol):
return arr_test_tmp(ncol, nrow)
return sp, sp2_list, sp3_list, sp6_list, dim, enc_cur, arr_enc, arr_test
def _space_helper(self, config_name, layer_type, width):
sp_min_config = self.config[config_name]
if layer_type not in sp_min_config:
raise ValueError('Unsupported layer type: %s' % layer_type)
sp_min_config = sp_min_config[layer_type]
w_list = sp_min_config['w_list']
sp_list = sp_min_config['sp_list']
for w, sp in zip(w_list, sp_list):
if width <= w:
return sp
return None
def get_min_space_unit(self, layer_type, w_unit, same_color=False):
# type: (str, int, bool) -> int
if not same_color or 'sp_sc_min' not in self.config:
config_name = 'sp_min'
else:
config_name = 'sp_sc_min'
return self._space_helper(config_name, layer_type, w_unit)
def get_min_line_end_space_unit(self, layer_type, w_unit):
return self._space_helper('sp_le_min', layer_type, w_unit)
def get_min_space(self, layer_type, width, unit_mode=False, same_color=False):
# type: (str, float, bool, bool) -> Union[float, int]
res = self.config['resolution']
if not unit_mode:
width = int(round(width / res))
ans = self.get_min_space_unit(layer_type, width, same_color=same_color)
if unit_mode:
return ans
return ans * res
def get_min_line_end_space(self, layer_type, width, unit_mode=False):
# type: (str, float, bool) -> Union[float, int]
res = self.config['resolution']
if not unit_mode:
width = int(round(width / res))
ans = self.get_min_line_end_space_unit(layer_type, width)
if unit_mode:
return ans
return ans * res
def layer_id_to_type(self, layer_id):
name_dict = self.config['layer_name']
type_dict = self.config['layer_type']
return type_dict[name_dict[layer_id]]
def get_min_length_unit(self, layer_type, w_unit):
len_min_config = self.config['len_min']
if layer_type not in len_min_config:
raise ValueError('Unsupported layer type: %s' % layer_type)
w_list = len_min_config[layer_type]['w_list']
w_al_list = len_min_config[layer_type]['w_al_list']
md_list = len_min_config[layer_type]['md_list']
md_al_list = len_min_config[layer_type]['md_al_list']
# get minimum length from width spec
l_unit = 0
for w, (area, len_min) in zip(w_list, w_al_list):
if w_unit <= w:
l_unit = max(len_min, -(-area // w_unit))
break
# check maximum dimension spec
for max_dim, (area, len_min) in zip(reversed(md_list), reversed(md_al_list)):
if max(w_unit, l_unit) > max_dim:
return l_unit
l_unit = max(l_unit, len_min, -(-area // w_unit))
return -(-l_unit // 2) * 2
def get_min_length(self, layer_type, width):
res = self.resolution
w_unit = int(round(width / res))
return res * self.get_min_length_unit(layer_type, w_unit)
def get_res_rsquare(self, res_type):
return self.config['resistor']['info'][res_type]['rsq']
def get_res_width_bounds(self, res_type):
return self.config['resistor']['info'][res_type]['w_bounds']
def get_res_length_bounds(self, res_type):
return self.config['resistor']['info'][res_type]['l_bounds']
def get_res_min_nsquare(self, res_type):
return self.config['resistor']['info'][res_type]['min_nsq']
================================================
FILE: bag/layout/template.py
================================================
# -*- coding: utf-8 -*-
"""This module defines layout template classes.
"""
from typing import TYPE_CHECKING, Union, Dict, Any, List, Set, TypeVar, Type, \
Optional, Tuple, Iterable, Sequence, Callable, Generator, cast
import os
import abc
import copy
import time
import bisect
import pickle
from itertools import islice, product, chain
import math
import yaml
import shapely.ops as shops
import shapely.geometry as shgeo
from ..util.cache import DesignMaster, MasterDB
from ..util.interval import IntervalSet
from .core import BagLayout
from .util import BBox, BBoxArray, tuple2_to_int, tuple2_to_float_int
from ..io import get_encoding, open_file
from .routing import Port, TrackID, WireArray
from .routing.fill import UsedTracks, fill_symmetric_max_num_info, fill_symmetric_interval, \
NoFillChoiceError
from .objects import Instance, Rect, Via, Path, Polygon
if TYPE_CHECKING:
from bag.core import BagProject
from .objects import Polygon, Blockage, Boundary
from .objects import InstanceInfo, ViaInfo, PinInfo
from .routing import RoutingGrid
# try to import optional modules
try:
import cybagoa
except ImportError:
cybagoa = None
try:
# noinspection PyPackageRequirements
import gdspy
except ImportError:
gdspy = None
TemplateType = TypeVar('TemplateType', bound='TemplateBase')
class TemplateDB(MasterDB):
"""A database of all templates.
This class is responsible for keeping track of template libraries and
creating new templates.
Parameters
----------
lib_defs : str
path to the template library definition file.
routing_grid : RoutingGrid
the default RoutingGrid object.
lib_name : str
the cadence library to put all generated templates in.
prj : Optional[BagProject]
the BagProject instance.
name_prefix : str
generated layout name prefix.
name_suffix : str
generated layout name suffix.
use_cybagoa : bool
True to use cybagoa module to accelerate layout.
gds_lay_file : str
The GDS layer/purpose mapping file.
flatten : bool
True to compute flattened layout.
**kwargs :
additional arguments.
"""
def __init__(self, # type: TemplateDB
lib_defs, # type: str
routing_grid, # type: RoutingGrid
lib_name, # type: str
prj=None, # type: Optional[BagProject]
name_prefix='', # type: str
name_suffix='', # type: str
use_cybagoa=False, # type: bool
gds_lay_file='', # type: str
flatten=False, # type: bool
**kwargs):
# type: (...) -> None
MasterDB.__init__(self, lib_name, lib_defs=lib_defs,
name_prefix=name_prefix, name_suffix=name_suffix)
pure_oa = kwargs.get('pure_oa', False)
cache_dir = kwargs.get('cache_dir', '')
if gds_lay_file:
if gdspy is None:
raise ValueError('gdspy module not found; cannot export GDS.')
# GDS export takes precedence over other options
use_cybagoa = pure_oa = False
if pure_oa:
if cybagoa is None:
raise ValueError('Cannot use pure OA mode when cybagoa is not found.')
use_cybagoa = True
self._prj = prj
self._grid = routing_grid
self._use_cybagoa = use_cybagoa and cybagoa is not None
self._gds_lay_file = gds_lay_file
self._flatten = flatten
self._pure_oa = pure_oa
if cache_dir and os.path.isdir(cache_dir):
print('loading template cache...')
start = time.time()
cache_dir = os.path.realpath(cache_dir)
with open(os.path.join(cache_dir, 'db_mapping.pickle'), 'rb') as f:
info = pickle.load(f)
for key, fname in info.items():
params = dict(cache_fname=fname)
master = CachedTemplate(self, lib_name, params, self.used_cell_names,
use_cybagoa=self._use_cybagoa)
master.finalize()
self.register_master(key, master)
self.register_master(master.key, master)
end = time.time()
print('cache loading took %.5g seconds.' % (end - start))
def create_master_instance(self, gen_cls, lib_name, params, used_cell_names, **kwargs):
# type: (Type[TemplateType], str, Dict[str, Any], Set[str], **Any) -> TemplateType
"""Create a new non-finalized master instance.
This instance is used to determine if we created this instance before.
Parameters
----------
gen_cls : Type[TemplateType]
the generator Python class.
lib_name : str
generated instance library name.
params : Dict[str, Any]
instance parameters dictionary.
used_cell_names : Set[str]
a set of all used cell names.
**kwargs: Any
optional arguments for the generator.
Returns
-------
master : TemplateType
the non-finalized generated instance.
"""
# noinspection PyCallingNonCallable
return gen_cls(self, lib_name, params, used_cell_names, **kwargs)
def create_masters_in_db(self, lib_name, content_list, debug=False):
# type: (str, Sequence[Any], bool) -> None
"""Create the masters in the design database.
Parameters
----------
lib_name : str
library to create the designs in.
content_list : Sequence[Any]
a list of the master contents. Must be created in this order.
debug : bool
True to print debug messages
"""
if self._prj is None:
raise ValueError('BagProject is not defined.')
if self._gds_lay_file:
self._create_gds(lib_name, content_list, debug=debug)
elif self._use_cybagoa:
# remove write locks from old layouts
cell_view_list = [(item[0], 'layout') for item in content_list]
if self._pure_oa:
pass
else:
# create library if it does not exist
self._prj.create_library(self._lib_name)
self._prj.release_write_locks(self._lib_name, cell_view_list)
if debug:
print('Instantiating layout')
# create OALayouts
start = time.time()
if 'CDSLIBPATH' in os.environ:
cds_lib_path = os.path.abspath(os.path.join(os.environ['CDSLIBPATH'], 'cds.lib'))
else:
cds_lib_path = os.path.abspath('./cds.lib')
with cybagoa.PyOALayoutLibrary(cds_lib_path, self._lib_name, self._prj.default_lib_path,
self._prj.tech_info.via_tech_name,
get_encoding()) as lib:
lib.add_layer('prBoundary', 235)
lib.add_purpose('label', 237)
lib.add_purpose('drawing1', 241)
lib.add_purpose('drawing2', 242)
lib.add_purpose('drawing3', 243)
lib.add_purpose('drawing4', 244)
lib.add_purpose('drawing5', 245)
lib.add_purpose('drawing6', 246)
lib.add_purpose('drawing7', 247)
lib.add_purpose('drawing8', 248)
lib.add_purpose('drawing9', 249)
lib.add_purpose('boundary', 250)
lib.add_purpose('pin', 251)
for cell_name, oa_layout in content_list:
lib.create_layout(cell_name, 'layout', oa_layout)
end = time.time()
if debug:
print('layout instantiation took %.4g seconds' % (end - start))
else:
# create library if it does not exist
self._prj.create_library(self._lib_name)
if debug:
print('Instantiating layout')
via_tech_name = self._grid.tech_info.via_tech_name
start = time.time()
self._prj.instantiate_layout(self._lib_name, 'layout', via_tech_name, content_list)
end = time.time()
if debug:
print('layout instantiation took %.4g seconds' % (end - start))
@property
def grid(self):
# type: () -> RoutingGrid
"""Returns the default routing grid instance."""
return self._grid
def new_template(self, lib_name='', temp_name='', params=None, temp_cls=None, debug=False,
**kwargs):
# type: (str, str, Dict[str, Any], Type[TemplateType], bool, **Any) -> TemplateType
"""Create a new template.
Parameters
----------
lib_name : str
template library name.
temp_name : str
template name
params : Dict[str, Any]
the parameter dictionary.
temp_cls : Type[TemplateType]
the template class to instantiate.
debug : bool
True to print debug messages.
**kwargs : Any
optional template parameters.
Returns
-------
template : TemplateType
the new template instance.
"""
kwargs['use_cybagoa'] = self._use_cybagoa
master = self.new_master(lib_name=lib_name, cell_name=temp_name, params=params,
gen_cls=temp_cls, debug=debug, **kwargs)
return master
def instantiate_layout(self, prj, template, top_cell_name=None, debug=False, rename_dict=None):
# type: (BagProject, TemplateBase, Optional[str], bool, Optional[Dict[str, str]]) -> None
"""Instantiate the layout of the given :class:`~bag.layout.template.TemplateBase`.
Parameters
----------
prj : BagProject
the :class:`~bag.BagProject` instance used to create layout.
template : TemplateBase
the :class:`~bag.layout.template.TemplateBase` to instantiate.
top_cell_name : Optional[str]
name of the top level cell. If None, a default name is used.
debug : bool
True to print debugging messages
rename_dict : Optional[Dict[str, str]]
optional master cell renaming dictionary.
"""
self.batch_layout(prj, [template], [top_cell_name], debug=debug, rename_dict=rename_dict)
def batch_layout(self,
prj, # type: BagProject
template_list, # type: Sequence[TemplateBase]
name_list=None, # type: Optional[Sequence[Optional[str]]]
lib_name='', # type: str
debug=False, # type: bool
rename_dict=None, # type: Optional[Dict[str, str]]
):
# type: (...) -> None
"""Instantiate all given templates.
Parameters
----------
prj : BagProject
the :class:`~bag.BagProject` instance used to create layout.
template_list : Sequence[TemplateBase]
list of templates to instantiate.
name_list : Optional[Sequence[Optional[str]]]
list of template layout names. If not given, default names will be used.
lib_name : str
Library to create the masters in. If empty or None, use default library.
debug : bool
True to print debugging messages
rename_dict : Optional[Dict[str, str]]
optional master cell renaming dictionary.
"""
self._prj = prj
self.instantiate_masters(template_list, name_list=name_list, lib_name=lib_name,
debug=debug, rename_dict=rename_dict)
def save_to_cache(self, temp_list, dir_name, debug=False):
os.makedirs(dir_name, exist_ok=True)
info = {}
cnt = 0
for master in temp_list:
fname = os.path.join(dir_name, str(cnt))
key = master.key
if key not in info:
master.write_to_disk(fname, self.lib_name, master.cell_name, debug=debug)
info[key] = fname
cnt += 1
with open(os.path.join(dir_name, 'db_mapping.pickle'), 'wb') as f:
pickle.dump(info, f, protocol=-1)
def _create_gds(self, lib_name, content_list, debug=False):
# type: (str, Sequence[Any], bool) -> None
"""Create a GDS file containing the given layouts
Parameters
----------
lib_name : str
library to create the designs in.
content_list : Sequence[Any]
a list of the master contents. Must be created in this order.
debug : bool
True to print debug messages
"""
tech_info = self.grid.tech_info
lay_unit = tech_info.layout_unit
res = tech_info.resolution
with open(self._gds_lay_file, 'r') as f:
lay_info = yaml.load(f)
lay_map = lay_info['layer_map']
via_info = lay_info['via_info']
out_fname = '%s.gds' % lib_name
gds_lib = gdspy.GdsLibrary(name=lib_name, unit=lay_unit, precision=res * lay_unit)
cell_dict = gds_lib.cell_dict
if debug:
print('Instantiating layout')
start = time.time()
for content in content_list:
(cell_name, inst_tot_list, rect_list, via_list, pin_list,
path_list, blockage_list, boundary_list, polygon_list) = content
gds_cell = gdspy.Cell(cell_name, exclude_from_current=True)
gds_lib.add(gds_cell)
# add instances
for inst_info in inst_tot_list: # type: InstanceInfo
if inst_info.params is not None:
raise ValueError('Cannot instantiate PCells in GDS.')
num_rows = inst_info.num_rows
num_cols = inst_info.num_cols
angle, reflect = inst_info.angle_reflect
if num_rows > 1 or num_cols > 1:
cur_inst = gdspy.CellArray(cell_dict[inst_info.cell], num_cols, num_rows,
(inst_info.sp_cols, inst_info.sp_rows),
origin=inst_info.loc, rotation=angle,
x_reflection=reflect)
else:
cur_inst = gdspy.CellReference(cell_dict[inst_info.cell], origin=inst_info.loc,
rotation=angle, x_reflection=reflect)
gds_cell.add(cur_inst)
# add rectangles
for rect in rect_list:
nx, ny = rect.get('arr_nx', 1), rect.get('arr_ny', 1)
(x0, y0), (x1, y1) = rect['bbox']
lay_id, purp_id = lay_map[tuple(rect['layer'])]
if nx > 1 or ny > 1:
spx, spy = rect['arr_spx'], rect['arr_spy']
for xidx in range(nx):
dx = xidx * spx
for yidx in range(ny):
dy = yidx * spy
cur_rect = gdspy.Rectangle((x0 + dx, y0 + dy), (x1 + dx, y1 + dy),
layer=lay_id, datatype=purp_id)
gds_cell.add(cur_rect)
else:
cur_rect = gdspy.Rectangle((x0, y0), (x1, y1), layer=lay_id, datatype=purp_id)
gds_cell.add(cur_rect)
# add vias
for via in via_list: # type: ViaInfo
via_lay_info = via_info[via.id]
nx, ny = via.arr_nx, via.arr_ny
x0, y0 = via.loc
if nx > 1 or ny > 1:
spx, spy = via.arr_spx, via.arr_spy
for xidx in range(nx):
xc = x0 + xidx * spx
for yidx in range(ny):
yc = y0 + yidx * spy
self._add_gds_via(gds_cell, via, lay_map, via_lay_info, xc, yc)
else:
self._add_gds_via(gds_cell, via, lay_map, via_lay_info, x0, y0)
# add pins
for pin in pin_list: # type: PinInfo
lay_id, purp_id = lay_map[pin.layer]
bbox = pin.bbox
label = pin.label
if pin.make_rect:
cur_rect = gdspy.Rectangle((bbox.left, bbox.bottom), (bbox.right, bbox.top),
layer=lay_id, datatype=purp_id)
gds_cell.add(cur_rect)
angle = 90 if bbox.height_unit > bbox.width_unit else 0
cur_lbl = gdspy.Label(label, (bbox.xc, bbox.yc), rotation=angle,
layer=lay_id, texttype=purp_id)
gds_cell.add(cur_lbl)
for path in path_list:
pass
for blockage in blockage_list:
pass
for boundary in boundary_list:
pass
for polygon in polygon_list:
lay_id, purp_id = lay_map[polygon['layer']]
cur_poly = gdspy.Polygon(polygon['points'], layer=lay_id, datatype=purp_id,
verbose=False)
gds_cell.add(cur_poly.fracture(precision=res))
gds_lib.write_gds(out_fname)
end = time.time()
if debug:
print('layout instantiation took %.4g seconds' % (end - start))
def _add_gds_via(self, gds_cell, via, lay_map, via_lay_info, x0, y0):
blay, bpurp = lay_map[via_lay_info['bot_layer']]
tlay, tpurp = lay_map[via_lay_info['top_layer']]
vlay, vpurp = lay_map[via_lay_info['via_layer']]
cw, ch = via.cut_width, via.cut_height
if cw < 0:
cw = via_lay_info['cut_width']
if ch < 0:
ch = via_lay_info['cut_height']
num_cols, num_rows = via.num_cols, via.num_rows
sp_cols, sp_rows = via.sp_cols, via.sp_rows
w_arr = num_cols * cw + (num_cols - 1) * sp_cols
h_arr = num_rows * ch + (num_rows - 1) * sp_rows
x0 -= w_arr / 2
y0 -= h_arr / 2
# If the via array is odd dimension, prevent off-grid points
if int(round(w_arr / self.grid.resolution)) % 2 == 1:
x0 -= 0.5 * self.grid.resolution
if int(round(h_arr / self.grid.resolution)) % 2 == 1:
y0 -= 0.5 * self.grid.resolution
bl, br, bt, bb = via.enc1
tl, tr, tt, tb = via.enc2
bot_p0, bot_p1 = (x0 - bl, y0 - bb), (x0 + w_arr + br, y0 + h_arr + bt)
top_p0, top_p1 = (x0 - tl, y0 - tb), (x0 + w_arr + tr, y0 + h_arr + tt)
cur_rect = gdspy.Rectangle(bot_p0, bot_p1, layer=blay, datatype=bpurp)
gds_cell.add(cur_rect)
cur_rect = gdspy.Rectangle(top_p0, top_p1, layer=tlay, datatype=tpurp)
gds_cell.add(cur_rect)
for xidx in range(num_cols):
dx = xidx * (cw + sp_cols)
for yidx in range(num_rows):
dy = yidx * (ch + sp_rows)
cur_rect = gdspy.Rectangle((x0 + dx, y0 + dy), (x0 + cw + dx, y0 + ch + dy),
layer=vlay, datatype=vpurp)
gds_cell.add(cur_rect)
class TemplateBase(DesignMaster, metaclass=abc.ABCMeta):
"""The base template class.
Parameters
----------
temp_db : TemplateDB
the template database.
lib_name : str
the layout library name.
params : Dict[str, Any]
the parameter values.
used_names : Set[str]
a set of already used cell names.
**kwargs
dictionary of the following optional parameters:
grid : RoutingGrid
the routing grid to use for this template.
use_cybagoa : bool
True to use cybagoa module to accelerate layout.
Attributes
----------
pins : dict
the pins dictionary.
children : List[str]
a list of template cells this template uses.
params : Dict[str, Any]
the parameter values of this template.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None
use_cybagoa = kwargs.get('use_cybagoa', False)
# initialize template attributes
self._parent_grid = kwargs.get('grid', temp_db.grid)
self._grid = self._parent_grid.copy()
self._layout = BagLayout(self._grid, use_cybagoa=use_cybagoa)
self._size = None # type: Optional[Tuple[int, int, int]]
self._ports = {} # type: Dict[str, Port]
self._port_params = {} # type: Dict[str, dict]
self._prim_ports = {} # type: Dict[str, Port]
self._prim_port_params = {} # type: Dict[str, dict]
self._array_box = None # type: Optional[BBox]
self._fill_box = None # type: Optional[BBox]
self.prim_top_layer = None # type: Optional[int]
self.prim_bound_box = None # type: Optional[BBox]
self._used_tracks = UsedTracks()
self._track_boxes = {} # type: Dict[int, BBox]
self._merge_used_tracks = False
# add hidden parameters
if 'hidden_params' in kwargs:
hidden_params = kwargs['hidden_params'].copy()
else:
hidden_params = {}
hidden_params['flip_parity'] = None
DesignMaster.__init__(self, temp_db, lib_name, params, used_names,
hidden_params=hidden_params)
# update RoutingGrid
fp_dict = self.params['flip_parity']
if fp_dict is not None:
self._grid.set_flip_parity(fp_dict)
@abc.abstractmethod
def draw_layout(self):
# type: () -> None
"""Draw the layout of this template.
Override this method to create the layout.
WARNING: you should never call this method yourself.
"""
pass
def populate_params(self, table, params_info, default_params, **kwargs):
# type: (Dict[str, Any], Dict[str, str], Dict[str, Any], **Any) -> None
"""Fill params dictionary with values from table and default_params"""
DesignMaster.populate_params(self, table, params_info, default_params, **kwargs)
# add hidden parameters
hidden_params = kwargs.get('hidden_params', {})
for name, value in hidden_params.items():
self.params[name] = table.get(name, value)
# always add flip_parity parameter
if 'flip_parity' not in self.params:
self.params['flip_parity'] = table.get('flip_parity', None)
# update RoutingGrid
fp_dict = self.params['flip_parity']
if fp_dict is not None:
self._grid.set_flip_parity(fp_dict)
def get_master_basename(self):
# type: () -> str
"""Returns the base name to use for this instance.
Returns
-------
basename : str
the base name for this instance.
"""
return self.get_layout_basename()
def get_layout_basename(self):
# type: () -> str
"""Returns the base name for this template.
Returns
-------
base_name : str
the base name of this template.
"""
return self.__class__.__name__
def get_content(self, lib_name, rename_fun):
# type: (str, Callable[[str], str]) -> Union[List[Any], Tuple[str, 'cybagoa.PyOALayout']]
"""Returns the content of this master instance.
Parameters
----------
lib_name : str
the library to create the design masters in.
rename_fun : Callable[[str], str]
a function that renames design masters.
Returns
-------
content : Union[List[Any], Tuple[str, 'cybagoa.PyOALayout']]
a list describing this layout, or PyOALayout if cybagoa is enabled.
"""
if not self.finalized:
raise ValueError('This template is not finalized yet')
return self._layout.get_content(lib_name, self.cell_name, rename_fun)
def finalize(self):
# type: () -> None
"""Finalize this master instance.
"""
# create layout
self.draw_layout()
# finalize this template
self.grid.tech_info.finalize_template(self)
# update track parities of all instances
if self.grid.tech_info.use_flip_parity():
self._update_flip_parity()
# construct port objects
for net_name, port_params in self._port_params.items():
pin_dict = port_params['pins']
label = port_params['label']
if port_params['show']:
label = port_params['label']
for wire_arr_list in pin_dict.values():
for wire_arr in wire_arr_list: # type: WireArray
for layer_name, bbox in wire_arr.wire_iter(self.grid):
self._layout.add_pin(net_name, layer_name, bbox, label=label)
self._ports[net_name] = Port(net_name, pin_dict, label=label)
# construct primitive port objects
for net_name, port_params in self._prim_port_params.items():
pin_dict = port_params['pins']
label = port_params['label']
if port_params['show']:
label = port_params['label']
for layer, box_list in pin_dict.items():
for box in box_list:
self._layout.add_pin(net_name, layer, box, label=label)
self._ports[net_name] = Port(net_name, pin_dict, label=label)
# finalize layout
self._layout.finalize()
# get set of children keys
self.children = self._layout.get_masters_set()
for layer_id, bbox in self._used_tracks.track_box_iter():
self._track_boxes[layer_id] = bbox
if not self._merge_used_tracks:
for inst in self._layout.inst_iter():
for layer_id, bbox in inst.track_bbox_iter():
if layer_id not in self._track_boxes:
self._track_boxes[layer_id] = bbox
else:
self._track_boxes[layer_id] = bbox.merge(self._track_boxes[layer_id])
# call super finalize routine
DesignMaster.finalize(self)
@classmethod
def get_cache_properties(cls):
# type: () -> List[str]
"""Returns a list of properties to cache."""
return []
@property
def template_db(self):
# type: () -> TemplateDB
"""Returns the template database object"""
# noinspection PyTypeChecker
return self.master_db
@property
def is_empty(self):
# type: () -> bool
"""Returns True if this template is empty."""
return self._layout.is_empty
@property
def grid(self):
# type: () -> RoutingGrid
"""Returns the RoutingGrid object"""
return self._grid
@grid.setter
def grid(self, new_grid):
# type: (RoutingGrid) -> None
"""Change the RoutingGrid of this template."""
if not self._finalized:
self._grid = new_grid
else:
raise RuntimeError('Template already finalized.')
@property
def array_box(self):
# type: () -> Optional[BBox]
"""Returns the array/abutment bounding box of this template."""
return self._array_box
@array_box.setter
def array_box(self, new_array_box):
# type: (BBox) -> None
"""Sets the array/abutment bound box of this template."""
if not self._finalized:
self._array_box = new_array_box
else:
raise RuntimeError('Template already finalized.')
@property
def fill_box(self):
# type: () -> Optional[BBox]
"""Returns the dummy fill bounding box of this template."""
return self._fill_box
@fill_box.setter
def fill_box(self, new_box):
# type: (BBox) -> None
"""Sets the array/abutment bound box of this template."""
if not self._finalized:
self._fill_box = new_box
else:
raise RuntimeError('Template already finalized.')
@property
def top_layer(self):
# type: () -> int
"""Returns the top layer used in this template."""
if self.size is None:
if self.prim_top_layer is None:
raise Exception('Both size and prim_top_layer are unset.')
return self.prim_top_layer
return self.size[0]
@property
def size(self):
# type: () -> Optional[Tuple[int, int, int]]
"""The size of this template, in (layer, num_x_block, num_y_block) format."""
return self._size
@property
def bound_box(self):
# type: () -> Optional[BBox]
"""Returns the BBox with the size of this template. None if size not set yet."""
mysize = self.size
if mysize is None:
if self.prim_bound_box is None:
raise ValueError('Both size and prim_bound_box are unset.')
return self.prim_bound_box
wblk, hblk = self.grid.get_size_dimension(mysize, unit_mode=True)
return BBox(0, 0, wblk, hblk, self.grid.resolution, unit_mode=True)
@size.setter
def size(self, new_size):
# type: (Tuple[int, int, int]) -> None
"""Sets the size of this template."""
if not self._finalized:
self._size = new_size
else:
raise RuntimeError('Template already finalized.')
@property
def used_tracks(self):
# type: () -> UsedTracks
return self._used_tracks
def _update_flip_parity(self):
# type: () -> None
"""Update all instances in this template to have the correct track parity.
"""
for inst in self._layout.inst_iter():
top_layer = inst.master.top_layer
bot_layer = self.grid.get_bot_common_layer(inst.master.grid, top_layer)
loc = inst.location_unit
fp_dict = self.grid.get_flip_parity_at(bot_layer, top_layer, loc,
inst.orientation, unit_mode=True)
inst.new_master_with(flip_parity=fp_dict)
def instance_iter(self):
return self._layout.inst_iter()
def blockage_iter(self, layer_id, test_box, spx=0, spy=0):
# type: (int, BBox, int, int) -> Generator[BBox, None, None]
"""Returns all block intersecting the given rectangle."""
yield from self._used_tracks.blockage_iter(layer_id, test_box, spx=spx, spy=spy)
if not self._merge_used_tracks:
for inst in self._layout.inst_iter():
yield from inst.blockage_iter(layer_id, test_box, spx=spx, spy=spy)
def all_rect_iter(self):
# type: () -> Generator[Tuple[int, BBox, int, int], None, None]
"""Returns all rectangle objects in this """
yield from self._used_tracks.all_rect_iter()
if not self._merge_used_tracks:
for inst in self._layout.inst_iter():
yield from inst.all_rect_iter()
def intersection_rect_iter(self, layer_id, box):
# type: (int, BBox) -> Generator[BBox, None, None]
yield from self._used_tracks.intersection_rect_iter(layer_id, box)
if not self._merge_used_tracks:
for inst in self._layout.inst_iter():
yield from inst.intersection_rect_iter(layer_id, box)
def open_interval_iter(self, # type: TemplateBase
track_id, # type: TrackID
lower, # type: int
upper, # type: int
sp=0, # type: int
sp_le=0, # type: int
min_len=0, # type: int
):
# type: (...) -> Generator[Tuple[int, int], None, None]
res = self.grid.resolution
layer_id = track_id.layer_id
width = track_id.width
intv_dir = self.grid.get_direction(layer_id)
warr = WireArray(track_id, lower, upper, res=res, unit_mode=True)
test_box = warr.get_bbox_array(self.grid).base
sp = max(sp, int(self.grid.get_space(layer_id, width, unit_mode=True)))
sp_le = max(sp_le, int(self.grid.get_line_end_space(layer_id, width, unit_mode=True)))
if intv_dir == 'x':
spx, spy = sp_le, sp
else:
spx, spy = sp, sp_le
intv_set = IntervalSet()
for box in self.blockage_iter(layer_id, test_box, spx=spx, spy=spy):
bl, bu = tuple2_to_int(box.get_interval(intv_dir, unit_mode=True))
intv_set.add((max(bl, lower), min(bu, upper)), merge=True, abut=True)
for intv in intv_set.complement_iter((lower, upper)):
if intv[1] - intv[0] >= min_len:
yield intv
def is_track_available(self, # type: TemplateBase
layer_id, # type: int
tr_idx, # type: Union[float, int]
lower, # type: Union[float, int]
upper, # type: Union[float, int]
width=1, # type: int
sp=0, # type: Union[float, int]
sp_le=0, # type: Union[float, int]
unit_mode=False, # type: bool
):
"""Returns True if the given track is available."""
res = self.grid.resolution
if not unit_mode:
lower = int(round(lower / res))
upper = int(round(upper / res))
sp = int(round(sp / res))
sp_le = int(round(sp_le / res))
else:
lower = int(lower)
upper = int(upper)
sp = int(sp)
sp_le = int(sp_le)
intv_dir = self.grid.get_direction(layer_id)
track_id = TrackID(layer_id, tr_idx, width=width)
warr = WireArray(track_id, lower, upper, res=res, unit_mode=True)
test_box = warr.get_bbox_array(self.grid).base
sp = max(sp, int(self.grid.get_space(layer_id, width, unit_mode=True)))
sp_le = max(sp_le, int(self.grid.get_line_end_space(layer_id, width, unit_mode=True)))
if intv_dir == 'x':
spx, spy = sp_le, sp
else:
spx, spy = sp, sp_le
try:
next(self.blockage_iter(layer_id, test_box, spx=spx, spy=spy))
except StopIteration:
return True
return False
def get_rect_bbox(self, layer):
# type: (Union[str, Tuple[str, str]]) -> BBox
"""Returns the overall bounding box of all rectangles on the given layer.
Note: currently this does not check primitive instances or vias.
Parameters
----------
layer : Union[str, Tuple[str, str]]
the layer name.
Returns
-------
box : BBox
the overall bounding box of the given layer.
"""
return self._layout.get_rect_bbox(layer)
def get_track_bbox(self, layer_id):
"""Returns the bounding box of all tracks on the given layer."""
if not self.finalized:
raise ValueError('This method only works after being finalized.')
if layer_id in self._track_boxes:
return self._track_boxes[layer_id]
return BBox.get_invalid_bbox()
def track_bbox_iter(self):
"""Returns the bounding box of all tracks on the given layer."""
if not self.finalized:
raise ValueError('This method only works after being finalized.')
return self._track_boxes.items()
def new_template_with(self, **kwargs):
# type: (Any) -> TemplateBase
"""Create a new template with the given parameters.
This method will update the parameter values with the given dictionary,
then create a new template with those parameters and return it.
Parameters
----------
**kwargs
a dictionary of new parameter values.
"""
# get new parameter dictionary.
new_params = copy.deepcopy(self.params)
for key, val in kwargs.items():
if key in new_params:
new_params[key] = val
return self.template_db.new_template(params=new_params, temp_cls=self.__class__,
grid=self._parent_grid)
def set_size_from_bound_box(self, top_layer_id, bbox, round_up=False,
half_blk_x=True, half_blk_y=True):
# type: (int, BBox, bool, bool, bool) -> None
"""Compute the size from overall bounding box.
Parameters
----------
top_layer_id : int
the top level routing layer ID that array box is calculated with.
bbox : BBox
the overall bounding box
round_up: bool
True to round up bounding box if not quantized properly
half_blk_x : bool
True to allow half-block widths.
half_blk_y : bool
True to allow half-block heights.
"""
grid = self.grid
if bbox.left_unit != 0 or bbox.bottom_unit != 0:
raise ValueError('lower-left corner of overall bounding box must be (0, 0).')
self.size = grid.get_size_tuple(top_layer_id, bbox.width_unit, bbox.height_unit,
round_up=round_up, unit_mode=True, half_blk_x=half_blk_x,
half_blk_y=half_blk_y)
def set_size_from_array_box(self, top_layer_id):
# type: (int) -> None
"""Automatically compute the size from array_box.
Assumes the array box is exactly in the center of the template.
Parameters
----------
top_layer_id : int
the top level routing layer ID that array box is calculated with.
"""
grid = self.grid
array_box = self.array_box
if array_box is None:
raise ValueError("array_box is not set")
dx = array_box.left_unit
dy = array_box.bottom_unit
if dx < 0 or dy < 0:
raise ValueError('lower-left corner of array box must be in first quadrant.')
self.size = grid.get_size_tuple(top_layer_id, 2 * dx + array_box.width_unit,
2 * dy + array_box.height_unit, unit_mode=True)
def write_summary_file(self, fname, lib_name, cell_name):
# type: (str, str, str) -> None
"""Create a summary file for this template layout."""
# get all pin information
pin_dict = {}
for port_name in self.port_names_iter():
pin_cnt = 0
port = self.get_port(port_name)
for pin_warr in port:
for layer_name, bbox in pin_warr.wire_iter(self.grid):
if pin_cnt == 0:
pin_name = port_name
else:
pin_name = '%s_%d' % (port_name, pin_cnt)
pin_cnt += 1
pin_dict[pin_name] = dict(
layer=[layer_name, self._layout.pin_purpose],
netname=port_name,
xy0=[bbox.left, bbox.bottom],
xy1=[bbox.right, bbox.top],
)
# get size information
bnd_box = self.bound_box
if bnd_box is None:
raise ValueError("bound_box is not set")
info = {
lib_name: {
cell_name: dict(
pins=pin_dict,
xy0=[0.0, 0.0],
xy1=[bnd_box.width, bnd_box.height],
),
},
}
with open_file(fname, 'w') as f:
yaml.dump(info, f)
def write_to_disk(self, fname, lib_name, cell_name, debug=False):
# type: (str, str, str, bool) -> None
"""Create a cache file for this template."""
if not self.finalized:
raise ValueError('Cannot write non-final template to disk.')
if debug:
print('Writing %s to disk...' % self.__class__.__name__)
start = time.time()
prop_dict = {key: getattr(self, key) for key in self.get_cache_properties()}
res = self.grid.resolution
save_tracks = UsedTracks(fname, overwrite=True)
for layer_id, box, dx, dy in self.all_rect_iter():
save_tracks.record_box(layer_id, box, dx, dy, res)
save_tracks.close()
template_info = dict(
lib_name=lib_name,
cell_name=cell_name,
size=self._size,
port_params=self._port_params,
prim_top_layer=self.prim_top_layer,
prim_bound_box=self.prim_bound_box,
array_box=self.array_box,
properties=prop_dict,
)
with open(fname + '_info.pickle', 'wb') as f:
pickle.dump(template_info, f, protocol=-1)
stop = time.time()
if debug:
print('Writing to disk took %.4g seconds.' % (stop - start))
def merge_inst_tracks(self):
# type: () -> None
"""Flatten all rectangles from instances into the UsedTracks data structure."""
if not self._merge_used_tracks:
self._merge_used_tracks = True
res = self.grid.resolution
for inst in self._layout.inst_iter():
for layer_id, box, dx, dy in inst.all_rect_iter():
self._used_tracks.record_box(layer_id, box, dx, dy, res)
def get_pin_name(self, name):
# type: (str) -> str
"""Get the actual name of the given pin from the renaming dictionary.
Given a pin name, If this Template has a parameter called 'rename_dict',
return the actual pin name from the renaming dictionary.
Parameters
----------
name : str
the pin name.
Returns
-------
actual_name : str
the renamed pin name.
"""
rename_dict = self.params.get('rename_dict', {})
return rename_dict.get(name, name)
def get_port(self, name=''):
# type: (str) -> Port
"""Returns the port object with the given name.
Parameters
----------
name : str
the port terminal name. If None or empty, check if this template has only one port,
then return it.
Returns
-------
port : Port
the port object.
"""
if not name:
if len(self._ports) != 1:
raise ValueError('Template has %d ports != 1.' % len(self._ports))
name = next(iter(self._ports))
return self._ports[name]
def has_port(self, port_name):
# type: (str) -> bool
"""Returns True if this template has the given port."""
return port_name in self._ports
def port_names_iter(self):
# type: () -> Iterable[str]
"""Iterates over port names in this template.
Yields
------
port_name : string
name of a port in this template.
"""
return self._ports.keys()
def get_prim_port(self, name=''):
# type: (str) -> Port
"""Returns the primitive port object with the given name.
Parameters
----------
name : str
the port terminal name. If None or empty, check if this template has only one port,
then return it.
Returns
-------
port : Port
the primitive port object.
"""
if not name:
if len(self._prim_ports) != 1:
raise ValueError('Template has %d ports != 1.' % len(self._prim_ports))
name = next(iter(self._ports))
return self._prim_ports[name]
def has_prim_port(self, port_name):
# type: (str) -> bool
"""Returns True if this template has the given primitive port."""
return port_name in self._prim_ports
def prim_port_names_iter(self):
# type: () -> Iterable[str]
"""Iterates over primitive port names in this template.
Yields
------
port_name : str
name of a primitive port in this template.
"""
return self._prim_ports.keys()
def new_template(self, params=None, temp_cls=None, debug=False, **kwargs):
# type: (Dict[str, Any], Type[TemplateType], bool, **Any) -> TemplateType
"""Create a new template.
Parameters
----------
params : Dict[str, Any]
the parameter dictionary.
temp_cls : Type[TemplateType]
the template class to instantiate.
debug : bool
True to print debug messages.
**kwargs : Any
optional template parameters.
Returns
-------
template : TemplateType
the new template instance.
"""
kwargs['grid'] = self.grid
return self.template_db.new_template(params=params, temp_cls=temp_cls, debug=debug,
**kwargs)
def move_all_by(self, dx=0.0, dy=0.0, unit_mode=False):
# type: (Union[float, int], Union[float, int], bool) -> None
"""Move all layout objects Except pins in this layout by the given amount.
primitive pins will be moved, but pins on routing grid will not.
Parameters
----------
dx : Union[float, int]
the X shift.
dy : Union[float, int]
the Y shift.
unit_mode : bool
true if given shift values are in resolution units.
"""
print("WARNING: USING THIS BREAKS POWER FILL ALGORITHM.")
self._layout.move_all_by(dx=dx, dy=dy, unit_mode=unit_mode)
def add_instance(self, # type: TemplateBase
master, # type: TemplateBase
inst_name=None, # type: Optional[str]
loc=(0, 0), # type: Tuple[Union[float, int], Union[float, int]]
orient="R0", # type: str
nx=1, # type: int
ny=1, # type: int
spx=0, # type: Union[float, int]
spy=0, # type: Union[float, int]
unit_mode=False, # type: bool
):
# type: (...) -> Instance
"""Adds a new (arrayed) instance to layout.
Parameters
----------
master : TemplateBase
the master template object.
inst_name : Optional[str]
instance name. If None or an instance with this name already exists,
a generated unique name is used.
loc : Tuple[Union[float, int], Union[float, int]]
instance location.
orient : str
instance orientation. Defaults to "R0"
nx : int
number of columns. Must be positive integer.
ny : int
number of rows. Must be positive integer.
spx : Union[float, int]
column pitch. Used for arraying given instance.
spy : Union[float, int]
row pitch. Used for arraying given instance.
unit_mode : bool
True if dimensions are given in resolution units.
Returns
-------
inst : Instance
the added instance.
"""
res = self.grid.resolution
if not unit_mode:
loc = int(round(loc[0] / res)), int(round(loc[1] / res))
spx = int(round(spx / res))
spy = int(round(spy / res))
inst = Instance(self.grid, self._lib_name, master, loc=loc, orient=orient,
name=inst_name, nx=nx, ny=ny, spx=spx, spy=spy, unit_mode=True)
self._layout.add_instance(inst)
return inst
def add_instance_primitive(self, # type: TemplateBase
lib_name, # type: str
cell_name, # type: str
loc, # type: Tuple[Union[float, int], Union[float, int]]
view_name='layout', # type: str
inst_name=None, # type: Optional[str]
orient="R0", # type: str
nx=1, # type: int
ny=1, # type: int
spx=0, # type: Union[float, int]
spy=0, # type: Union[float, int]
params=None, # type: Optional[Dict[str, Any]]
unit_mode=False, # type: bool
**kwargs
):
# type: (...) -> None
"""Adds a new (arrayed) primitive instance to layout.
Parameters
----------
lib_name : str
instance library name.
cell_name : str
instance cell name.
loc : Tuple[Union[float, int], Union[float, int]]
instance location.
view_name : str
instance view name. Defaults to 'layout'.
inst_name : Optional[str]
instance name. If None or an instance with this name already exists,
a generated unique name is used.
orient : str
instance orientation. Defaults to "R0"
nx : int
number of columns. Must be positive integer.
ny : int
number of rows. Must be positive integer.
spx : Union[float, int]
column pitch. Used for arraying given instance.
spy : Union[float, int]
row pitch. Used for arraying given instance.
params : Optional[Dict[str, Any]]
the parameter dictionary. Used for adding pcell instance.
unit_mode : bool
True if distances are specified in resolution units.
**kwargs
additional arguments. Usually implementation specific.
"""
self._layout.add_instance_primitive(lib_name, cell_name, loc,
view_name=view_name, inst_name=inst_name,
orient=orient, num_rows=ny, num_cols=nx,
sp_rows=spy, sp_cols=spx,
params=params, unit_mode=unit_mode, **kwargs)
def add_rect(self, # type: TemplateBase
layer, # type: Union[str, Tuple[str, str]]
bbox, # type: Union[BBox, BBoxArray]
nx=1, # type: int
ny=1, # type: int
spx=0, # type: Union[float, int]
spy=0, # type: Union[float, int]
unit_mode=False, # type: bool
):
# type: (...) -> Rect
"""Add a new (arrayed) rectangle.
Parameters
----------
layer: Union[str, Tuple[str, str]]
the layer name, or the (layer, purpose) pair.
bbox : Union[BBox, BBoxArray]
the rectangle bounding box. If BBoxArray is given, its arraying parameters will
be used instead.
nx : int
number of columns.
ny : int
number of rows.
spx : Union[float, int]
column pitch.
spy : Union[float, int]
row pitch.
unit_mode : bool
True if spx and spy are given in resolution units.
Returns
-------
rect : Rect
the added rectangle.
"""
rect = Rect(layer, bbox, nx=nx, ny=ny, spx=spx, spy=spy, unit_mode=unit_mode)
self._layout.add_rect(rect)
self._used_tracks.record_rect(self.grid, layer, rect.bbox_array)
return rect
def add_res_metal(self, layer_id, bbox, **kwargs):
# type: (int, Union[BBox, BBoxArray], **Any) -> List[Rect]
"""Add a new metal resistor.
Parameters
----------
layer_id : int
the metal layer ID.
bbox : Union[BBox, BBoxArray]
the resistor bounding box. If BBoxArray is given, its arraying parameters will
be used instead.
**kwargs : Any
optional arguments to add_rect()
Returns
-------
rect_list : List[Rect]
list of rectangles defining the metal resistor.
"""
rect_list = []
rect_layers = self.grid.tech_info.get_res_metal_layers(layer_id)
for lay in rect_layers:
rect_list.append(self.add_rect(lay, bbox, **kwargs))
return rect_list
def add_path(self, path):
# type: (Path) -> Path
"""Add a new path.
Parameters
----------
path : Path
the path to add.
Returns
-------
path : Path
the added path object.
"""
self._layout.add_path(path)
lay_id = self.grid.tech_info.get_layer_id(path.layer[0])
res = self.grid.resolution
# record it as used tracks
points_list = path.points_unit
for pidx, [x0, y0] in enumerate(points_list[:-1]):
[x1, y1] = points_list[pidx + 1]
y_low, y_high = min(y0, y1), max(y0, y1)
x_low, x_high = min(x0, x1), max(x0, x1)
if x_low == x0:
y_xlow, y_xhigh = y0, y1
else:
y_xlow, y_xhigh = y1, y0
width_unit = int(path.width / self.grid.resolution)
w2 = math.ceil(width_unit // 2)
wr2 = math.ceil(width_unit // math.sqrt(2))
if x0 == x1:
# 1. 90 degree cases
bbox = BBox(x0 - w2, y_low - w2, x0 + w2, y_high + w2, res, unit_mode=True)
self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)
# rect = Rect(path.layer, bbox)
# self._layout.add_rect(rect)
elif y0 == y1:
# 2. 0 degree cases
bbox = BBox(x_low - w2, y0 - w2, x_high + w2, y0 + w2, res, unit_mode=True)
self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)
# rect = Rect(path.layer, bbox)
# self._layout.add_rect(rect)
elif y_xlow == y_low:
# 3. 45 degree case
x_start, x_stop = x_low - wr2, x_high + wr2
y_start, y_stop = y_low - wr2, y_high + wr2
while True:
bbox = BBox(x_start, y_start, x_start + wr2, y_start + wr2, res, unit_mode=True)
self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)
# rect = Rect(path.layer, bbox)
# self._layout.add_rect(rect)
if x_start + wr2 >= x_stop:
break
bbox = BBox(x_start, y_start + wr2, x_start + wr2, y_start + 2 * wr2, res, unit_mode=True)
self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)
# rect = Rect(path.layer, bbox)
# self._layout.add_rect(rect)
bbox = BBox(x_start + wr2, y_start, x_start + 2 * wr2, y_start + wr2, res, unit_mode=True)
self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)
# rect = Rect(path.layer, bbox)
# self._layout.add_rect(rect)
x_start += wr2
y_start += wr2
else:
# 4. 135 degree case
x_start, x_stop = x_low - wr2, x_high + wr2
y_start, y_stop = y_high + wr2, y_low - wr2
while True:
bbox = BBox(x_start, y_start - wr2, x_start + wr2, y_start, res, unit_mode=True)
self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)
# rect = Rect(path.layer, bbox)
# self._layout.add_rect(rect)
if x_start + wr2 >= x_stop:
break
bbox = BBox(x_start, y_start - 2 * wr2, x_start + wr2, y_start - wr2, res, unit_mode=True)
self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)
# rect = Rect(path.layer, bbox)
# self._layout.add_rect(rect)
bbox = BBox(x_start + wr2, y_start - wr2, x_start + 2 * wr2, y_start, res, unit_mode=True)
self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)
# rect = Rect(path.layer, bbox)
# self._layout.add_rect(rect)
x_start += wr2
y_start -= wr2
return path
def add_polygon(self, polygon):
# type: (Polygon) -> Polygon
"""Add a new polygon.
Parameters
----------
polygon : Polygon
the blockage to add.
Returns
-------
polygon : Polygon
the added blockage object.
"""
self._layout.add_polygon(polygon)
return polygon
def add_blockage(self, blockage):
# type: (Blockage) -> Blockage
"""Add a new blockage.
Parameters
----------
blockage : Blockage
the blockage to add.
Returns
-------
blockage : Blockage
the added blockage object.
"""
self._layout.add_blockage(blockage)
return blockage
def add_cell_boundary(self, box):
# type: (BBox) -> None
"""Adds a cell boundary object to the this template.
This is usually the PR boundary.
Parameters
----------
box : BBox
the cell boundary bounding box.
"""
self._grid.tech_info.add_cell_boundary(self, box)
def add_boundary(self, boundary):
# type: (Boundary) -> Boundary
"""Add a new boundary.
Parameters
----------
boundary : Boundary
the boundary to add.
Returns
-------
boundary : Boundary
the added boundary object.
"""
self._layout.add_boundary(boundary)
return boundary
def reexport(self, port, net_name='', label='', show=True):
# type: (Port, str, str, bool) -> None
"""Re-export the given port object.
Add all geometries in the given port as pins with optional new name
and label.
Parameters
----------
port : Port
the Port object to re-export.
net_name : str
the new net name. If not given, use the port's current net name.
label : str
the label. If not given, use net_name.
show : bool
True to draw the pin in layout.
"""
net_name = net_name or port.net_name
if not label:
if net_name != port.net_name:
label = net_name
else:
label = port.label
if net_name not in self._port_params:
self._port_params[net_name] = dict(label=label, pins={}, show=show)
port_params = self._port_params[net_name]
# check labels is consistent.
if port_params['label'] != label:
msg = 'Current port label = %s != specified label = %s'
raise ValueError(msg % (port_params['label'], label))
if port_params['show'] != show:
raise ValueError('Conflicting show port specification.')
# export all port geometries
port_pins = port_params['pins']
for wire_arr in port:
layer_id = wire_arr.layer_id
if layer_id not in port_pins:
port_pins[layer_id] = [wire_arr]
else:
port_pins[layer_id].append(wire_arr)
def add_pin_primitive(self, net_name, layer, bbox, label='', show=True):
# type: (str, str, BBox, str, bool) -> None
"""Add a primitive pin to the layout.
Parameters
----------
net_name : str
the net name associated with the pin.
layer : str
the pin layer name.
bbox : BBox
the pin bounding box.
label : str
the label of this pin. If None or empty, defaults to be the net_name.
this argument is used if you need the label to be different than net name
for LVS purposes. For example, unconnected pins usually need a colon after
the name to indicate that LVS should short those pins together.
show : bool
True to draw the pin in layout.
"""
label = label or net_name
if net_name in self._prim_port_params:
port_params = self._prim_port_params[net_name]
else:
port_params = self._prim_port_params[net_name] = dict(label=label, pins={}, show=show)
# check labels is consistent.
if port_params['label'] != label:
msg = 'Current port label = %s != specified label = %s'
raise ValueError(msg % (port_params['label'], label))
if port_params['show'] != show:
raise ValueError('Conflicting show port specification.')
port_pins = port_params['pins']
if layer in port_pins:
port_pins[layer].append(bbox)
else:
port_pins[layer] = [bbox]
def add_label(self, label, layer, bbox):
# type: (str, Union[str, Tuple[str, str]], BBox) -> None
"""Adds a label to the layout.
This is mainly used to add voltage text labels.
Parameters
----------
label : str
the label text.
layer : Union[str, Tuple[str, str]]
the pin layer name.
bbox : BBox
the pin bounding box.
"""
self._layout.add_label(label, layer, bbox)
def add_pin(self, net_name, wire_arr_list, label='', show=True, edge_mode=0):
# type: (str, Union[WireArray, List[WireArray]], str, bool, int) -> None
"""Add new pin to the layout.
If one or more pins with the same net name already exists,
they'll be grouped under the same port.
Parameters
----------
net_name : str
the net name associated with the pin.
wire_arr_list : Union[WireArray, List[WireArray]]
WireArrays representing the pin geometry.
label : str
the label of this pin. If None or empty, defaults to be the net_name.
this argument is used if you need the label to be different than net name
for LVS purposes. For example, unconnected pins usually need a colon after
the name to indicate that LVS should short those pins together.
edge_mode : int
If <0, draw the pin on the lower end of the WireArray. If >0, draw the pin
on the upper end. If 0, draw the pin on the entire WireArray.
show : bool
if True, draw the pin in layout.
"""
if isinstance(wire_arr_list, WireArray):
wire_arr_list = [wire_arr_list]
else:
pass
label = label or net_name
if net_name not in self._port_params:
self._port_params[net_name] = dict(label=label, pins={}, show=show)
port_params = self._port_params[net_name]
# check labels is consistent.
if port_params['label'] != label:
msg = 'Current port label = %s != specified label = %s'
raise ValueError(msg % (port_params['label'], label))
if port_params['show'] != show:
raise ValueError('Conflicting show port specification.')
for warr in wire_arr_list:
# add pin array to port_pins
layer_id = warr.track_id.layer_id
if edge_mode != 0:
cur_w = self.grid.get_track_width(layer_id, warr.track_id.width, unit_mode=True)
wl = warr.lower_unit
wu = warr.upper_unit
pin_len = min(cur_w * 6, wu - wl)
if edge_mode < 0:
wu = wl + pin_len
else:
wl = wu - pin_len
warr = WireArray(warr.track_id, wl, wu, res=self.grid.resolution, unit_mode=True)
port_pins = port_params['pins']
if layer_id not in port_pins:
port_pins[layer_id] = [warr]
else:
port_pins[layer_id].append(warr)
def add_via(self, # type: TemplateBase
bbox, # type: BBox
bot_layer, # type: Union[str, Tuple[str, str]]
top_layer, # type: Union[str, Tuple[str, str]]
bot_dir, # type: str
nx=1, # type: int
ny=1, # type: int
spx=0.0, # type: Union[float, int]
spy=0.0, # type: Union[float, int]
extend=True, # type: bool
top_dir=None, # type: Optional[str]
unit_mode=False, # type: bool
):
# type: (...) -> Via
"""Adds a (arrayed) via object to the layout.
Parameters
----------
bbox : BBox
the via bounding box, not including extensions.
bot_layer : Union[str, Tuple[str, str]]
the bottom layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
top_layer : Union[str, Tuple[str, str]]
the top layer name, or a tuple of layer name and purpose name.
If purpose name not given, defaults to 'drawing'.
bot_dir : str
the bottom layer extension direction. Either 'x' or 'y'.
nx : int
number of columns.
ny : int
number of rows.
spx : Union[float, int]
column pitch.
spy : Union[float, int]
row pitch.
extend : bool
True if via extension can be drawn outside of the box.
top_dir : Optional[str]
top layer extension direction. Can force to extend in same direction as bottom.
unit_mode : bool
True if spx/spy are specified in resolution units.
Returns
-------
via : Via
the created via object.
"""
via = Via(self.grid.tech_info, bbox, bot_layer, top_layer, bot_dir,
nx=nx, ny=ny, spx=spx, spy=spy, extend=extend, top_dir=top_dir,
unit_mode=unit_mode)
self._layout.add_via(via)
return via
def add_via_primitive(self, via_type, # type: str
loc, # type: Tuple[float, float]
num_rows=1, # type: int
num_cols=1, # type: int
sp_rows=0.0, # type: float
sp_cols=0.0, # type: float
enc1=None, # type: Optional[List[float]]
enc2=None, # type: Optional[List[float]]
orient='R0', # type: str
cut_width=None, # type: Optional[float]
cut_height=None, # type: Optional[float]
nx=1, # type: int
ny=1, # type: int
spx=0.0, # type: float
spy=0.0, # type: float
unit_mode=False, # type: bool
):
# type: (...) -> None
"""Adds a via by specifying all parameters.
Parameters
----------
via_type : str
the via type name.
loc : Tuple[float, float]
the via location as a two-element tuple.
num_rows : int
number of via cut rows.
num_cols : int
number of via cut columns.
sp_rows : float
spacing between via cut rows.
sp_cols : float
spacing between via cut columns.
enc1 : Optional[List[float]]
a list of left, right, top, and bottom enclosure values on bottom layer.
Defaults to all 0.
enc2 : Optional[List[float]]
a list of left, right, top, and bottom enclosure values on top layer.
Defaults to all 0.
orient : str
orientation of the via.
cut_width : Optional[float]
via cut width. This is used to create rectangle via.
cut_height : Optional[float]
via cut height. This is used to create rectangle via.
nx : int
number of columns.
ny : int
number of rows.
spx : float
column pitch.
spy : float
row pitch.
unit_mode : bool
True if all given dimensions are in resolution units.
"""
if unit_mode:
res = self.grid.resolution
loc = (loc[0] * res, loc[1] * res)
sp_rows *= res
sp_cols *= res
if enc1 is not None:
enc1 = [v * res for v in enc1]
if enc2 is not None:
enc2 = [v * res for v in enc2]
if cut_width is not None:
cut_width *= res
if cut_height is not None:
cut_height *= res
spx *= res
spy *= res
self._layout.add_via_primitive(via_type, loc, num_rows=num_rows, num_cols=num_cols,
sp_rows=sp_rows, sp_cols=sp_cols,
enc1=enc1, enc2=enc2, orient=orient,
cut_width=cut_width, cut_height=cut_height,
arr_nx=nx, arr_ny=ny, arr_spx=spx, arr_spy=spy)
def add_via_on_grid(self, bot_layer_id, bot_track, top_track, bot_width=1, top_width=1):
# type: (int, Union[float, int], Union[float, int], int, int) -> Via
"""Add a via on the routing grid.
Parameters
----------
bot_layer_id : int
the bottom layer ID.
bot_track : Union[float, int]
the bottom track index.
top_track : Union[float, int]
the top track index.
bot_width : int
the bottom track width.
top_width : int
the top track width.
"""
grid = self.grid
res = grid.resolution
bl, bu = tuple2_to_int(
grid.get_wire_bounds(bot_layer_id, bot_track, width=bot_width, unit_mode=True))
tl, tu = tuple2_to_int(
grid.get_wire_bounds(bot_layer_id + 1, top_track, width=top_width, unit_mode=True))
bot_dir = grid.get_direction(bot_layer_id)
if bot_dir == 'x':
bbox = BBox(tl, bl, tu, bu, res, unit_mode=True)
else:
bbox = BBox(bl, tl, bu, tu, res, unit_mode=True)
bname = grid.get_layer_name(bot_layer_id, bot_track)
tname = grid.get_layer_name(bot_layer_id + 1, top_track)
return self.add_via(bbox, bname, tname, bot_dir)
def extend_wires(self, # type: TemplateBase
warr_list, # type: Union[WireArray, List[Optional[WireArray]]]
lower=None, # type: Optional[Union[float, int]]
upper=None, # type: Optional[Union[float, int]]
unit_mode=False, # type: bool
min_len_mode=None, # type: Optional[int]
):
# type: (...) -> List[Optional[WireArray]]
"""Extend the given wires to the given coordinates.
Parameters
----------
warr_list : Union[WireArray, List[Optional[WireArray]]]
the wires to extend.
lower : Optional[Union[float, int]]
the wire lower coordinate.
upper : Optional[Union[float, int]]
the wire upper coordinate.
unit_mode: bool
True if lower/upper/fill_margin is given in resolution units.
min_len_mode : Optional[int]
If not None, will extend track so it satisfy minimum length requirement.
Use -1 to extend lower bound, 1 to extend upper bound, 0 to extend both equally.
Returns
-------
warr_list : List[Optional[WireArray]]
list of added wire arrays.
If any elements in warr_list were None, they will be None in the return.
"""
if isinstance(warr_list, WireArray):
warr_list = [warr_list]
else:
pass
res = self.grid.resolution
if not unit_mode:
if lower is not None:
lower = int(round(lower / res))
if upper is not None:
upper = int(round(upper / res))
new_warr_list = [] # type: List[Optional[WireArray]]
for warr in warr_list:
if warr is None:
new_warr_list.append(None)
else:
wlower = warr.lower_unit
wupper = warr.upper_unit
if lower is None:
cur_lower = wlower
else:
cur_lower = min(lower, wlower)
if upper is None:
cur_upper = wupper
else:
cur_upper = max(upper, wupper)
if min_len_mode is not None:
# extend track to meet minimum length
min_len = self.grid.get_min_length(warr.layer_id, warr.track_id.width,
unit_mode=True)
# make sure minimum length is even so that middle coordinate exists
min_len = -(-min_len // 2) * 2
tr_len = cur_upper - cur_lower
if min_len > tr_len:
ext = min_len - tr_len
if min_len_mode < 0:
cur_lower -= ext
elif min_len_mode > 0:
cur_upper += ext
else:
cur_lower -= ext // 2
cur_upper = cur_lower + min_len
new_warr = WireArray(warr.track_id, cur_lower, cur_upper, res=res, unit_mode=True)
for layer_name, bbox_arr in new_warr.wire_arr_iter(self.grid):
self.add_rect(layer_name, bbox_arr)
new_warr_list.append(new_warr)
return new_warr_list
def add_wires(self, # type: TemplateBase
layer_id, # type: int
track_idx, # type: Union[float, int]
lower, # type: Union[float, int]
upper, # type: Union[float, int]
width=1, # type: int
num=1, # type: int
pitch=0, # type: Union[float, int]
unit_mode=False # type: bool
):
# type: (...) -> WireArray
"""Add the given wire(s) to this layout.
Parameters
----------
layer_id : int
the wire layer ID.
track_idx : Union[float, int]
the smallest wire track index.
lower : Union[float, int]
the wire lower coordinate.
upper : Union[float, int]
the wire upper coordinate.
width : int
the wire width in number of tracks.
num : int
number of wires.
pitch : Union[float, int]
the wire pitch.
unit_mode: bool
True if lower/upper is given in resolution units.
Returns
-------
warr : WireArray
the added WireArray object.
"""
res = self.grid.resolution
if not unit_mode:
lower = int(round(lower / res))
upper = int(round(upper / res))
tid = TrackID(layer_id, track_idx, width=width, num=num, pitch=pitch)
warr = WireArray(tid, lower, upper, res=res, unit_mode=True)
for layer_name, bbox_arr in warr.wire_arr_iter(self.grid):
self.add_rect(layer_name, bbox_arr)
return warr
def add_res_metal_warr(self, # type: TemplateBase
layer_id, # type: int
track_idx, # type: Union[float, int]
lower, # type: Union[float, int]
upper, # type: Union[float, int]
**kwargs):
# type: (...) -> WireArray
"""Add metal resistor as WireArray to this layout.
Parameters
----------
layer_id : int
the wire layer ID.
track_idx : Union[float, int]
the smallest wire track index.
lower : Union[float, int]
the wire lower coordinate.
upper : Union[float, int]
the wire upper coordinate.
**kwargs :
optional arguments to add_wires()
Returns
-------
warr : WireArray
the added WireArray object.
"""
warr = self.add_wires(layer_id, track_idx, lower, upper, **kwargs)
for _, bbox_arr in warr.wire_arr_iter(self.grid):
self.add_res_metal(layer_id, bbox_arr)
return warr
def add_mom_cap(self, # type: TemplateBase
cap_box, # type: BBox
bot_layer, # type: int
num_layer, # type: int
port_widths=1, # type: Union[int, List[int], Dict[int, int]]
port_parity=None,
# type: Optional[Union[Tuple[int, int], Dict[int, Tuple[int, int]]]]
array=False, # type: bool
**kwargs
):
# type: (...) -> Any
"""Draw mom cap in the defined bounding box."""
return_rect = kwargs.get('return_cap_wires', False)
cap_type = kwargs.get('cap_type', 'standard')
if num_layer <= 1:
raise ValueError('Must have at least 2 layers for MOM cap.')
res = self.grid.resolution
tech_info = self.grid.tech_info
mom_cap_dict = tech_info.tech_params['layout']['mom_cap'][cap_type]
cap_margins = mom_cap_dict['margins']
cap_info = mom_cap_dict['width_space']
num_ports_on_edge = mom_cap_dict.get('num_ports_on_edge', {})
port_widths_default = mom_cap_dict.get('port_widths_default', {})
port_sp_min = mom_cap_dict.get('port_sp_min', {})
top_layer = bot_layer + num_layer - 1
if isinstance(port_widths, int):
port_widths = {lay: port_widths for lay in range(bot_layer, top_layer + 1)}
elif isinstance(port_widths, list) or isinstance(port_widths, tuple):
if len(port_widths) != num_layer:
raise ValueError('port_widths length != %d' % num_layer)
port_widths = dict(zip(range(bot_layer, top_layer + 1), port_widths))
else:
port_widths = {lay: port_widths.get(lay, port_widths_default.get(lay, 1))
for lay in range(bot_layer, top_layer + 1)}
if port_parity is None:
port_parity = {lay: (0, 1) for lay in range(bot_layer, top_layer + 1)}
elif isinstance(port_parity, tuple) or isinstance(port_parity, list):
if len(port_parity) != 2:
raise ValueError('port parity should be a tuple/list of 2 elements.')
port_parity = {lay: port_parity for lay in range(bot_layer, top_layer + 1)}
else:
port_parity = {lay: port_parity.get(lay, (0, 1)) for lay in
range(bot_layer, top_layer + 1)}
via_ext_dict = {lay: 0 for lay in range(bot_layer, top_layer + 1)} # type: Dict[int, int]
# get via extensions on each layer
for vbot_layer in range(bot_layer, top_layer):
vtop_layer = vbot_layer + 1
bport_w = int(
self.grid.get_track_width(vbot_layer, port_widths[vbot_layer], unit_mode=True))
tport_w = int(
self.grid.get_track_width(vtop_layer, port_widths[vtop_layer], unit_mode=True))
bcap_w = int(round(cap_info[vbot_layer][0] / res))
tcap_w = int(round(cap_info[vtop_layer][0] / res))
# port-to-port via
vbext1, vtext1 = tuple2_to_int(
self.grid.get_via_extensions_dim(vbot_layer, bport_w, tport_w,
unit_mode=True))
# cap-to-port via
vbext2 = int(self.grid.get_via_extensions_dim(vbot_layer, bcap_w, tport_w,
unit_mode=True)[0])
# port-to-cap via
vtext2 = int(self.grid.get_via_extensions_dim(vbot_layer, bport_w, tcap_w,
unit_mode=True)[1])
# record extension due to via
via_ext_dict[vbot_layer] = max(via_ext_dict[vbot_layer], vbext1, vbext2)
via_ext_dict[vtop_layer] = max(via_ext_dict[vtop_layer], vtext1, vtext2)
# find port locations and cap boundaries.
port_tracks = {}
cap_bounds = {}
cap_exts = {}
for cur_layer in range(bot_layer, top_layer + 1):
# mark bounding box as used.
self.mark_bbox_used(cur_layer, cap_box)
cur_num_ports = num_ports_on_edge.get(cur_layer, 1)
cur_port_width = port_widths[cur_layer]
cur_port_space = self.grid.get_num_space_tracks(cur_layer, cur_port_width,
half_space=True)
if self.grid.get_direction(cur_layer) == 'x':
cur_lower, cur_upper = cap_box.bottom_unit, cap_box.top_unit
else:
cur_lower, cur_upper = cap_box.left_unit, cap_box.right_unit
# make sure adjacent layer via extension will not extend outside of cap bounding box.
adj_via_ext = 0
if cur_layer != bot_layer:
adj_via_ext = via_ext_dict[cur_layer - 1]
if cur_layer != top_layer:
adj_via_ext = max(adj_via_ext, via_ext_dict[cur_layer + 1])
# find track indices
if array:
tr_lower = self.grid.coord_to_track(cur_layer, cur_lower, unit_mode=True)
tr_upper = self.grid.coord_to_track(cur_layer, cur_upper, unit_mode=True)
else:
tr_lower = self.grid.find_next_track(cur_layer, cur_lower + adj_via_ext,
tr_width=cur_port_width,
half_track=True, mode=1, unit_mode=True)
tr_upper = self.grid.find_next_track(cur_layer, cur_upper - adj_via_ext,
tr_width=cur_port_width,
half_track=True, mode=-1, unit_mode=True)
port_delta = cur_port_width + max(port_sp_min.get(cur_layer, 0), cur_port_space)
if tr_lower + 2 * (cur_num_ports - 1) * port_delta >= tr_upper:
raise ValueError('Cannot draw MOM cap; area too small.')
ll0, lu0 = tuple2_to_int(
self.grid.get_wire_bounds(cur_layer, tr_lower, width=cur_port_width,
unit_mode=True))
tmp = self.grid.get_wire_bounds(cur_layer,
tr_lower + (cur_num_ports - 1) * port_delta,
width=cur_port_width,
unit_mode=True)
ll1, lu1 = tuple2_to_int(tmp)
tmp = self.grid.get_wire_bounds(cur_layer,
tr_upper - (cur_num_ports - 1) * port_delta,
width=cur_port_width,
unit_mode=True)
ul0, uu0 = tuple2_to_int(tmp)
ul1, uu1 = tuple2_to_int(self.grid.get_wire_bounds(cur_layer, tr_upper,
width=cur_port_width,
unit_mode=True))
# compute space from MOM cap wires to port wires
port_w = lu0 - ll0
lay_name = tech_info.get_layer_name(cur_layer)
if isinstance(lay_name, tuple) or isinstance(lay_name, list):
lay_name = lay_name[0]
lay_type = tech_info.get_layer_type(lay_name)
cur_margin = int(round(cap_margins[cur_layer] / res))
cur_margin = max(cur_margin, tech_info.get_min_space(lay_type, port_w, unit_mode=True))
lower_tracks = [tr_lower + idx * port_delta for idx in range(cur_num_ports)]
upper_tracks = [tr_upper - idx * port_delta for idx in range(cur_num_ports - 1, -1, -1)]
port_tracks[cur_layer] = (lower_tracks, upper_tracks)
cap_bounds[cur_layer] = (lu1 + cur_margin, ul0 - cur_margin)
cap_exts[cur_layer] = (ll0, uu1)
port_dict = {}
cap_wire_dict = {}
# draw ports/wires
for cur_layer in range(bot_layer, top_layer + 1):
cur_port_width = port_widths[cur_layer]
# find port/cap wires lower/upper coordinates
lower, upper = None, None
if cur_layer != top_layer:
lower, upper = cap_exts[cur_layer + 1]
if cur_layer != bot_layer:
tmpl, tmpu = cap_exts[cur_layer - 1]
lower = tmpl if lower is None else min(lower, tmpl)
upper = tmpu if upper is None else max(upper, tmpu)
assert lower is not None and upper is not None, \
('cur_layer is iterating and should never be equal '
'to both bot_layer and top_layer at the same time')
via_ext = via_ext_dict[cur_layer]
lower -= via_ext
upper += via_ext
# draw lower and upper ports
lower_tracks, upper_tracks = port_tracks[cur_layer]
lower_warrs = [self.add_wires(cur_layer, tr_idx, lower, upper, width=cur_port_width,
unit_mode=True)
for tr_idx in lower_tracks]
upper_warrs = [self.add_wires(cur_layer, tr_idx, lower, upper, width=cur_port_width,
unit_mode=True)
for tr_idx in upper_tracks]
# assign port wires to positive/negative terminals
lpar, upar = port_parity[cur_layer]
if lpar == upar:
raise ValueError('Port parity must be different.')
elif lpar == 0:
plist = upper_warrs
nlist = lower_warrs
else:
plist = lower_warrs
nlist = upper_warrs
port_dict[cur_layer] = plist, nlist
if cur_layer != bot_layer:
# connect ports to layer below
for clist, blist in zip((plist, nlist), port_dict[cur_layer - 1]):
if len(clist) == len(blist):
iter_list = zip(clist, blist)
else:
iter_list = product(clist, blist)
for cur_warr, bot_warr in iter_list:
cur_tid = cur_warr.track_id.base_index
cur_w = cur_warr.track_id.width
bot_tid = bot_warr.track_id.base_index
bot_w = bot_warr.track_id.width
self.add_via_on_grid(cur_layer - 1, bot_tid, cur_tid, bot_width=bot_w,
top_width=cur_w)
# draw cap wires
cap_lower, cap_upper = cap_bounds[cur_layer]
cap_tot_space = cap_upper - cap_lower
cap_w, cap_sp = cap_info[cur_layer]
cap_w = int(round(cap_w / res))
cap_sp = int(round(cap_sp / res))
cap_pitch = cap_w + cap_sp
num_cap_wires = cap_tot_space // cap_pitch
cap_lower += (cap_tot_space - (num_cap_wires * cap_pitch - cap_sp)) // 2
is_horizontal = (self.grid.get_direction(cur_layer) == 'x')
if is_horizontal:
wbox = BBox(lower, cap_lower, upper, cap_lower + cap_w, res, unit_mode=True)
else:
wbox = BBox(cap_lower, lower, cap_lower + cap_w, upper, res, unit_mode=True)
lay_name_list = tech_info.get_layer_name(cur_layer)
if isinstance(lay_name_list, str):
lay_name_list = [lay_name_list]
# save cap wire information
cur_rect_box = wbox
cap_wire_dict[cur_layer] = (lpar, lay_name_list, cur_rect_box, num_cap_wires, cap_pitch)
# draw cap wires and connect to port
rect_list = []
for cur_layer in range(bot_layer, top_layer + 1):
cur_rect_list = []
lpar, lay_name_list, cap_base_box, num_cap_wires, cap_pitch = cap_wire_dict[cur_layer]
if cur_layer == bot_layer:
prev_plist = prev_nlist = None
else:
prev_plist, prev_nlist = port_dict[cur_layer - 1]
if cur_layer == top_layer:
next_plist = next_nlist = None
else:
next_plist, next_nlist = port_dict[cur_layer + 1]
cur_dir = self.grid.get_direction(cur_layer)
is_horizontal = (cur_dir == 'x')
next_dir = 'y' if is_horizontal else 'x'
num_lay_names = len(lay_name_list)
p_lists = (prev_plist, next_plist)
n_lists = (prev_nlist, next_nlist)
for idx in range(num_cap_wires):
# figure out the port wire to connect this cap wire to
if idx % 2 == 0 and lpar == 0 or idx % 2 == 1 and lpar == 1:
ports_list = p_lists
else:
ports_list = n_lists
# draw the cap wire
cap_lay_name = lay_name_list[idx % num_lay_names]
if is_horizontal:
cap_box = cap_base_box.move_by(dy=cap_pitch * idx, unit_mode=True)
else:
cap_box = cap_base_box.move_by(dx=cap_pitch * idx, unit_mode=True)
rect = self.add_rect(cap_lay_name, cap_box)
cur_rect_list.append(rect)
# connect cap wire to port
for pidx, port in enumerate(ports_list):
if port is not None:
port_warr = port[(idx // 2) % len(port)]
port_lay_name = self.grid.get_layer_name(port_warr.layer_id,
port_warr.track_id.base_index)
vbox = cap_box.intersect(port_warr.get_bbox_array(self.grid).base)
if pidx == 1:
self.add_via(vbox, cap_lay_name, port_lay_name, cur_dir)
else:
self.add_via(vbox, port_lay_name, cap_lay_name, next_dir)
rect_list.append(cur_rect_list)
if return_rect:
return port_dict, rect_list
else:
return port_dict
def reserve_tracks(self, # type: TemplateBase
layer_id, # type: int
track_idx, # type: Union[float, int]
width=1, # type: int
num=1, # type: int
pitch=0, # type: Union[float, int]
):
# type: (...) -> None
"""Reserve the given routing tracks so that power fill will not fill these tracks.
Note: the size of this template should be set before calling this method.
Parameters
----------
layer_id : int
the wire layer ID.
track_idx : Union[float, int]
the smallest wire track index.
width : int
the wire width in number of tracks.
num : int
number of wires.
pitch : Union[float, int]
the wire pitch.
"""
bnd_box = self.bound_box
if bnd_box is None:
raise ValueError("bound_box is not set")
tid = TrackID(layer_id, track_idx, width=width, num=num, pitch=pitch)
if self.grid.get_direction(layer_id) == 'x':
upper = bnd_box.width_unit
else:
upper = bnd_box.height_unit
warr = WireArray(tid, 0, upper, res=self.grid.resolution, unit_mode=True)
lay_name = self.grid.get_layer_name(layer_id, track_idx)
self._used_tracks.record_rect(self.grid, lay_name, warr.get_bbox_array(self.grid))
def connect_wires(self, # type: TemplateBase
wire_arr_list, # type: Union[WireArray, List[WireArray]]
lower=None, # type: Optional[Union[int, float]]
upper=None, # type: Optional[Union[int, float]]
debug=False, # type: bool
unit_mode=False, # type: bool
):
# type: (...) -> List[WireArray]
"""Connect all given WireArrays together.
all WireArrays must be on the same layer.
Parameters
----------
wire_arr_list : Union[WireArr, List[WireArr]]
WireArrays to connect together.
lower : Optional[Union[int, float]]
if given, extend connection wires to this lower coordinate.
upper : Optional[Union[int, float]]
if given, extend connection wires to this upper coordinate.
debug : bool
True to print debug messages.
unit_mode: bool
True if lower/upper/fill_margin is given in resolution units.
Returns
-------
conn_list : List[WireArray]
list of connection wires created.
"""
grid = self.grid
res = grid.resolution
if not unit_mode:
if lower is not None:
lower = int(round(lower / res))
if upper is not None:
upper = int(round(upper / res))
else:
if lower is not None:
lower = int(lower)
if upper is not None:
upper = int(upper)
if isinstance(wire_arr_list, WireArray):
wire_arr_list = [wire_arr_list]
else:
pass
if not wire_arr_list:
# do nothing
return []
# record all wire ranges
a = wire_arr_list[0]
layer_id = a.layer_id
direction = grid.get_direction(layer_id)
is_horiz = direction == 'x'
perp_dir = 'y' if direction == 'x' else 'x'
htr_pitch = int(grid.get_track_pitch(layer_id, unit_mode=True)) // 2
intv_set = IntervalSet()
for wire_arr in wire_arr_list:
if wire_arr.layer_id != layer_id:
raise ValueError('WireArray layer ID != %d' % layer_id)
cur_range = wire_arr.lower_unit, wire_arr.upper_unit
box_arr = wire_arr.get_bbox_array(grid)
for box in box_arr:
intv = tuple2_to_int(box.get_interval(perp_dir, unit_mode=True))
intv_rang_item = intv_set.get_first_overlap_item(intv)
if intv_rang_item is None:
range_set = IntervalSet()
range_set.add(cur_range)
intv_set.add(intv, val=range_set)
elif intv_rang_item[0] == intv:
intv_rang_item[1].add(cur_range, merge=True, abut=True)
else:
raise ValueError('wire interval {} overlap existing wires.'.format(intv))
# draw wires, group into arrays
new_warr_list = []
base_start = None # type: Optional[int]
base_end = None # type: Optional[int]
base_intv = None # type: Optional[Tuple[int, int]]
base_width = None # type: Optional[int]
count = 0
hpitch = 0
last_lower = 0
for intv, range_set in intv_set.items():
cur_start = range_set.get_start() # type: int
cur_end = range_set.get_end() # type: int
add = len(range_set) > 1
if lower is not None and lower < cur_start:
cur_start = lower
add = True
if upper is not None and upper > cur_end:
cur_end = upper
add = True
cur_lower, cur_upper = intv
if add:
tr_id = grid.coord_to_track(layer_id, (cur_lower + cur_upper) // 2, unit_mode=True)
layer_name = grid.get_layer_name(layer_id, tr_id)
if is_horiz:
box = BBox(cur_start, cur_lower, cur_end, cur_upper, res, unit_mode=True)
else:
box = BBox(cur_lower, cur_start, cur_upper, cur_end, res, unit_mode=True)
self.add_rect(layer_name, box)
if debug:
print('wires intv: %s, range: (%d, %d)' % (intv, cur_start, cur_end))
cur_width = cur_upper - cur_lower
if count == 0:
base_intv = intv
base_start = cur_start
base_end = cur_end
base_width = cur_upper - cur_lower
count += 1
hpitch = 0
else:
assert base_intv is not None, "count == 0 should have set base_intv"
assert base_width is not None, "count == 0 should have set base_width"
assert base_start is not None, "count == 0 should have set base_start"
assert base_end is not None, "count == 0 should have set base_end"
if cur_start == base_start and cur_end == base_end and base_width == cur_width:
# length and width matches
cur_hpitch = (cur_lower - last_lower) // htr_pitch
if count == 1:
# second wire, set half pitch
hpitch = cur_hpitch
count += 1
elif hpitch == cur_hpitch:
# pitch matches
count += 1
else:
# pitch does not match, add current wires and start anew
tr_idx, tr_width = tuple2_to_float_int(
grid.interval_to_track(layer_id, base_intv,
unit_mode=True))
track_id = TrackID(layer_id, tr_idx, width=tr_width,
num=count, pitch=hpitch / 2)
warr = WireArray(track_id, base_start, base_end, res=res, unit_mode=True)
new_warr_list.append(warr)
base_intv = intv
count = 1
hpitch = 0
else:
# length/width does not match, add cumulated wires and start anew
tr_idx, tr_width = tuple2_to_float_int(
grid.interval_to_track(layer_id, base_intv, unit_mode=True))
track_id = TrackID(layer_id, tr_idx, width=tr_width,
num=count, pitch=hpitch / 2)
warr = WireArray(track_id, base_start, base_end, res=res, unit_mode=True)
new_warr_list.append(warr)
base_start = cur_start
base_end = cur_end
base_intv = intv
base_width = cur_width
count = 1
hpitch = 0
# update last lower coordinate
last_lower = cur_lower
assert base_intv is not None, "count == 0 should have set base_intv"
assert base_start is not None, "count == 0 should have set base_start"
assert base_end is not None, "count == 0 should have set base_end"
# add last wires
tr_idx, tr_width = tuple2_to_float_int(
grid.interval_to_track(layer_id, base_intv, unit_mode=True))
track_id = TrackID(layer_id, tr_idx, tr_width, num=count, pitch=hpitch / 2)
warr = WireArray(track_id, base_start, base_end, res=res, unit_mode=True)
new_warr_list.append(warr)
return new_warr_list
def _draw_via_on_track(self, wlayer, box_arr, track_id, tl_unit=None,
tu_unit=None):
# type: (str, BBoxArray, TrackID, Optional[float], Optional[float]) -> Tuple[float, float]
"""Helper method. Draw vias on the intersection of the BBoxArray and TrackID."""
grid = self.grid
res = grid.resolution
tr_layer_id = track_id.layer_id
tr_width = track_id.width
tr_dir = grid.get_direction(tr_layer_id)
tr_pitch = grid.get_track_pitch(tr_layer_id)
w_layer_id = grid.tech_info.get_layer_id(wlayer)
w_dir = 'x' if tr_dir == 'y' else 'y'
wbase = box_arr.base
for sub_track_id in track_id.sub_tracks_iter(grid):
base_idx = sub_track_id.base_index
if w_layer_id > tr_layer_id:
bot_layer = grid.get_layer_name(tr_layer_id, base_idx)
top_layer = wlayer
bot_dir = tr_dir
else:
bot_layer = wlayer
top_layer = grid.get_layer_name(tr_layer_id, base_idx)
bot_dir = w_dir
# compute via bounding box
tl, tu = tuple2_to_int(
grid.get_wire_bounds(tr_layer_id, base_idx, width=tr_width, unit_mode=True))
if tr_dir == 'x':
via_box = BBox(wbase.left_unit, tl, wbase.right_unit, tu, res, unit_mode=True)
nx, ny = box_arr.nx, sub_track_id.num
spx, spy = box_arr.spx, sub_track_id.pitch * tr_pitch
via = self.add_via(via_box, bot_layer, top_layer, bot_dir,
nx=nx, ny=ny, spx=spx, spy=spy)
vtbox = via.bottom_box if w_layer_id > tr_layer_id else via.top_box
if tl_unit is None:
tl_unit = vtbox.left_unit
else:
tl_unit = min(tl_unit, vtbox.left_unit)
if tu_unit is None:
tu_unit = vtbox.right_unit + (nx - 1) * box_arr.spx_unit
else:
tu_unit = max(tu_unit, vtbox.right_unit + (nx - 1) * box_arr.spx_unit)
else:
via_box = BBox(tl, wbase.bottom_unit, tu, wbase.top_unit, res, unit_mode=True)
nx, ny = sub_track_id.num, box_arr.ny
spx, spy = sub_track_id.pitch * tr_pitch, box_arr.spy
via = self.add_via(via_box, bot_layer, top_layer, bot_dir,
nx=nx, ny=ny, spx=spx, spy=spy)
vtbox = via.bottom_box if w_layer_id > tr_layer_id else via.top_box
if tl_unit is None:
tl_unit = vtbox.bottom_unit
else:
tl_unit = min(tl_unit, vtbox.bottom_unit)
if tu_unit is None:
tu_unit = vtbox.top_unit + (ny - 1) * box_arr.spy_unit
else:
tu_unit = max(tu_unit, vtbox.top_unit + (ny - 1) * box_arr.spy_unit)
assert tl_unit is not None and tu_unit is not None, \
"for loop should have assigned tl_unit and tu_unit"
return tl_unit, tu_unit
def connect_bbox_to_tracks(self, # type: TemplateBase
layer_name, # type: str
box_arr, # type: Union[BBox, BBoxArray]
track_id, # type: TrackID
track_lower=None, # type: Optional[Union[int, float]]
track_upper=None, # type: Optional[Union[int, float]]
unit_mode=False, # type: bool
min_len_mode=None, # type: Optional[int]
wire_lower=None, # type: Optional[Union[float, int]]
wire_upper=None, # type: Optional[Union[float, int]]
):
# type: (...) -> WireArray
"""Connect the given primitive wire to given tracks.
Parameters
----------
layer_name : str
the primitive wire layer name.
box_arr : Union[BBox, BBoxArray]
bounding box of the wire(s) to connect to tracks.
track_id : TrackID
TrackID that specifies the track(s) to connect the given wires to.
track_lower : Optional[Union[int, float]]
if given, extend track(s) to this lower coordinate.
track_upper : Optional[Union[int, float]]
if given, extend track(s) to this upper coordinate.
unit_mode: bool
True if track_lower/track_upper/fill_margin is given in resolution units.
min_len_mode : Optional[int]
If not None, will extend track so it satisfy minimum length requirement.
Use -1 to extend lower bound, 1 to extend upper bound, 0 to extend both equally.
wire_lower : Optional[Union[float, int]]
if given, extend wire(s) to this lower coordinate.
wire_upper : Optional[Union[float, int]]
if given, extend wire(s) to this upper coordinate.
Returns
-------
wire_arr : WireArray
WireArray representing the tracks created.
"""
if isinstance(box_arr, BBox):
box_arr = BBoxArray(box_arr)
else:
pass
grid = self.grid
res = grid.resolution
if not unit_mode:
if track_lower is not None:
track_lower = int(round(track_lower / res))
if track_upper is not None:
track_upper = int(round(track_upper / res))
if wire_lower is not None:
wire_lower = int(round(wire_lower / res))
if wire_upper is not None:
wire_upper = int(round(wire_upper / res))
# extend bounding boxes to tracks
tl, tu = track_id.get_bounds(grid, unit_mode=True)
if wire_lower is not None:
tl = min(wire_lower, tl)
if wire_upper is not None:
tu = max(wire_upper, tu)
tr_layer = track_id.layer_id
tr_dir = grid.get_direction(tr_layer)
base = box_arr.base
if tr_dir == 'x':
self.add_rect(layer_name,
base.extend(y=tl, unit_mode=True).extend(y=tu, unit_mode=True),
nx=box_arr.nx, ny=box_arr.ny, spx=box_arr.spx, spy=box_arr.spy)
else:
self.add_rect(layer_name,
base.extend(x=tl, unit_mode=True).extend(x=tu, unit_mode=True),
nx=box_arr.nx, ny=box_arr.ny, spx=box_arr.spx, spy=box_arr.spy)
# draw vias
tl_unit, tu_unit = self._draw_via_on_track(layer_name, box_arr, track_id,
tl_unit=track_lower, tu_unit=track_upper)
# draw tracks
if min_len_mode is not None:
# extend track to meet minimum length
min_len = grid.get_min_length(tr_layer, track_id.width, unit_mode=True)
# make sure minimum length is even so that middle coordinate exists
min_len = -(-min_len // 2) * 2
tr_len = tu_unit - tl_unit
if min_len > tr_len:
ext = min_len - tr_len
if min_len_mode < 0:
tl_unit -= ext
elif min_len_mode > 0:
tu_unit += ext
else:
tl_unit -= ext // 2
tu_unit = tl_unit + min_len
result = WireArray(track_id, tl_unit, tu_unit, res=res, unit_mode=True)
for layer_name, bbox_arr in result.wire_arr_iter(grid):
self.add_rect(layer_name, bbox_arr)
return result
def connect_bbox_to_differential_tracks(self, # type: TemplateBase
layer_name, # type: str
pbox, # type: Union[BBox, BBoxArray]
nbox, # type: Union[BBox, BBoxArray]
tr_layer_id, # type: int
ptr_idx, # type: Union[int, float]
ntr_idx, # type: Union[int, float]
width=1, # type: int
track_lower=None, # type: Optional[Union[float, int]]
track_upper=None, # type: Optional[Union[float, int]]
unit_mode=False, # type: bool
):
# type: (...) -> Tuple[Optional[WireArray], Optional[WireArray]]
"""Connect the given differential primitive wires to two tracks symmetrically.
This method makes sure the connections are symmetric and have identical parasitics.
Parameters
----------
layer_name : str
the primitive wire layer name.
pbox : Union[BBox, BBoxArray]
positive signal wires to connect.
nbox : Union[BBox, BBoxArray]
negative signal wires to connect.
tr_layer_id : int
track layer ID.
ptr_idx : Union[int, float]
positive track index.
ntr_idx : Union[int, float]
negative track index.
width : int
track width in number of tracks.
track_lower : Optional[Union[float, int]]
if given, extend track(s) to this lower coordinate.
track_upper : Optional[Union[float, int]]
if given, extend track(s) to this upper coordinate.
unit_mode: bool
True if track_lower/track_upper/fill_margin is given in resolution units.
Returns
-------
p_track : Optional[WireArray]
the positive track.
n_track : Optional[WireArray]
the negative track.
"""
track_list = self.connect_bbox_to_matching_tracks(layer_name, [pbox, nbox], tr_layer_id,
[ptr_idx, ntr_idx], width=width,
track_lower=track_lower,
track_upper=track_upper,
unit_mode=unit_mode)
return track_list[0], track_list[1]
def connect_bbox_to_matching_tracks(self, # type: TemplateBase
layer_name, # type: str
box_arr_list, # type: List[Union[BBox, BBoxArray]]
tr_layer_id, # type: int
tr_idx_list, # type: List[Union[int, float]]
width=1, # type: int
track_lower=None, # type: Optional[Union[int, float]]
track_upper=None, # type: Optional[Union[int, float]]
unit_mode=False # type: bool
):
# type: (...) -> List[Optional[WireArray]]
"""Connect the given primitive wire to given tracks.
Parameters
----------
layer_name : str
the primitive wire layer name.
box_arr_list : List[Union[BBox, BBoxArray]]
bounding box of the wire(s) to connect to tracks.
tr_layer_id : int
track layer ID.
tr_idx_list : List[Union[int, float]]
list of track indices.
width : int
track width in number of tracks.
track_lower : Optional[Union[int, float]]
if given, extend track(s) to this lower coordinate.
track_upper : Optional[Union[int, float]]
if given, extend track(s) to this upper coordinate.
unit_mode: bool
True if track_lower/track_upper/fill_margin is given in resolution units.
Returns
-------
wire_arr : WireArray
WireArray representing the tracks created.
"""
grid = self.grid
res = grid.resolution
if not unit_mode:
if track_lower is not None:
track_lower = int(round(track_lower / res))
if track_upper is not None:
track_upper = int(round(track_upper / res))
num_tracks = len(tr_idx_list)
if num_tracks != len(box_arr_list):
raise ValueError('wire list length and track index list length mismatch.')
if num_tracks == 0:
raise ValueError('No tracks given')
w_layer_id = grid.tech_info.get_layer_id(layer_name)
if abs(w_layer_id - tr_layer_id) != 1:
raise ValueError('Given primitive wires not adjacent to given track layer.')
bot_layer_id = min(w_layer_id, tr_layer_id)
# compute wire_lower/upper without via extension
w_lower, w_upper = tuple2_to_int(
grid.get_wire_bounds(tr_layer_id, tr_idx_list[0], width=width,
unit_mode=True))
for tr_idx in islice(tr_idx_list, 1, None):
cur_low, cur_up = tuple2_to_int(grid.get_wire_bounds(tr_layer_id, tr_idx, width=width,
unit_mode=True))
w_lower = min(w_lower, cur_low)
w_upper = max(w_upper, cur_up)
# separate wire arrays into bottom/top tracks, compute wire/track lower/upper coordinates
tr_width = grid.get_track_width(tr_layer_id, width, unit_mode=True)
tr_dir = grid.get_direction(tr_layer_id)
tr_horizontal = tr_dir == 'x'
bbox_bounds = (None, None) # type: Tuple[Optional[int], Optional[int]]
for idx, box_arr in enumerate(box_arr_list):
# convert to WireArray list
if isinstance(box_arr, BBox):
box_arr = BBoxArray(box_arr)
else:
pass
base = box_arr.base
if w_layer_id < tr_layer_id:
bot_dim = base.width_unit if tr_horizontal else base.height_unit
top_dim = tr_width
w_ext, tr_ext = tuple2_to_int(
grid.get_via_extensions_dim(bot_layer_id, bot_dim, top_dim,
unit_mode=True))
else:
bot_dim = tr_width
top_dim = base.width_unit if tr_horizontal else base.height_unit
tr_ext, w_ext = tuple2_to_int(
grid.get_via_extensions_dim(bot_layer_id, bot_dim, top_dim,
unit_mode=True))
if bbox_bounds[0] is None:
bbox_bounds = (w_lower - w_ext, w_upper + w_ext)
else:
bbox_bounds = (
min(bbox_bounds[0], w_lower - w_ext), max(bbox_bounds[1], w_upper + w_ext))
# compute track lower/upper including via extension
tr_bounds = tuple2_to_int(
box_arr.get_overall_bbox().get_interval(tr_dir, unit_mode=True))
if track_lower is None:
track_lower = tr_bounds[0] - tr_ext
else:
track_lower = min(track_lower, tr_bounds[0] - tr_ext)
if track_upper is None:
track_upper = tr_bounds[1] + tr_ext
else:
track_upper = max(track_upper, tr_bounds[1] + tr_ext)
assert track_lower is not None and track_upper is not None, \
"track_lower/track_upper should be set above"
# draw tracks
track_list = [] # type: List[Optional[WireArray]]
for box_arr, tr_idx in zip(box_arr_list, tr_idx_list):
track_list.append(self.add_wires(tr_layer_id, tr_idx, track_lower, track_upper,
width=width, unit_mode=True))
tr_id = TrackID(tr_layer_id, tr_idx, width=width)
self.connect_bbox_to_tracks(layer_name, box_arr, tr_id, wire_lower=bbox_bounds[0],
wire_upper=bbox_bounds[1], unit_mode=True)
return track_list
def connect_to_tracks(self, # type: TemplateBase
wire_arr_list, # type: Union[WireArray, List[WireArray]]
track_id, # type: TrackID
wire_lower=None, # type: Optional[Union[float, int]]
wire_upper=None, # type: Optional[Union[float, int]]
track_lower=None, # type: Optional[Union[float, int]]
track_upper=None, # type: Optional[Union[float, int]]
unit_mode=False, # type: bool
min_len_mode=None, # type: Optional[int]
return_wires=False, # type: bool
debug=False, # type: bool
):
# type: (...) -> Union[Optional[WireArray], Tuple[Optional[WireArray], List[WireArray]]]
"""Connect all given WireArrays to the given track(s).
All given wires should be on adjacent layers of the track.
Parameters
----------
wire_arr_list : Union[WireArray, List[WireArray]]
list of WireArrays to connect to track.
track_id : TrackID
TrackID that specifies the track(s) to connect the given wires to.
wire_lower : Optional[Union[float, int]]
if given, extend wire(s) to this lower coordinate.
wire_upper : Optional[Union[float, int]]
if given, extend wire(s) to this upper coordinate.
track_lower : Optional[Union[float, int]]
if given, extend track(s) to this lower coordinate.
track_upper : Optional[Union[float, int]]
if given, extend track(s) to this upper coordinate.
unit_mode : bool
True if track_lower/track_upper is given in resolution units.
min_len_mode : Optional[int]
If not None, will extend track so it satisfy minimum length requirement.
Use -1 to extend lower bound, 1 to extend upper bound, 0 to extend both equally.
return_wires : bool
True to return the extended wires.
debug : bool
True to print debug messages.
Returns
-------
wire_arr : Union[Optional[WireArray], Tuple[Optional[WireArray], List[WireArray]]]
WireArray representing the tracks/wires created.
If return_wires is True, returns a Tuple[Optional[WireArray], List[WireArray]].
If there was nothing to do, the first argument will be None.
Otherwise, returns a WireArray.
"""
if isinstance(wire_arr_list, WireArray):
# convert to list.
wire_arr_list = [wire_arr_list]
else:
pass
if not wire_arr_list:
# do nothing
if return_wires:
return None, []
return None
grid = self.grid
res = grid.resolution
if track_upper is not None:
if not unit_mode:
track_upper = int(round(track_upper / res))
else:
track_upper = int(track_upper)
if track_lower is not None:
if not unit_mode:
track_lower = int(round(track_lower / res))
else:
track_lower = int(track_lower)
# find min/max track Y coordinates
tr_layer_id = track_id.layer_id
wl, wu = tuple2_to_int(track_id.get_bounds(grid, unit_mode=True))
if wire_lower is not None:
if not unit_mode:
wire_lower = int(round(wire_lower / res))
else:
wire_lower = int(wire_lower)
wl = min(wire_lower, wl)
if wire_upper is not None:
if not unit_mode:
wire_upper = int(round(wire_upper / res))
else:
wire_upper = int(wire_upper)
wu = max(wire_upper, wu)
# get top wire and bottom wire list
top_list = []
bot_list = []
for wire_arr in wire_arr_list:
cur_layer_id = wire_arr.layer_id
if cur_layer_id == tr_layer_id + 1:
top_list.append(wire_arr)
elif cur_layer_id == tr_layer_id - 1:
bot_list.append(wire_arr)
else:
raise ValueError(
'WireArray layer %d cannot connect to layer %d' % (cur_layer_id, tr_layer_id))
# connect wires together
top_wire_list = self.connect_wires(top_list, lower=wl, upper=wu, unit_mode=True,
debug=debug)
bot_wire_list = self.connect_wires(bot_list, lower=wl, upper=wu, unit_mode=True,
debug=debug)
# draw vias
for w_layer_id, wire_list in ((tr_layer_id + 1, top_wire_list),
(tr_layer_id - 1, bot_wire_list)):
for wire_arr in wire_list:
for wlayer, box_arr in wire_arr.wire_arr_iter(grid):
track_lower, track_upper = self._draw_via_on_track(wlayer, box_arr, track_id,
tl_unit=track_lower,
tu_unit=track_upper)
assert_msg = "track_lower/track_upper should have been set just above"
assert track_lower is not None and track_upper is not None, assert_msg
if min_len_mode is not None:
# extend track to meet minimum length
min_len = int(grid.get_min_length(tr_layer_id, track_id.width, unit_mode=True))
# make sure minimum length is even so that middle coordinate exists
min_len = -(-min_len // 2) * 2
tr_len = track_upper - track_lower
if min_len > tr_len:
ext = min_len - tr_len
if min_len_mode < 0:
track_lower -= ext
elif min_len_mode > 0:
track_upper += ext
else:
track_lower -= ext // 2
track_upper = track_lower + min_len
# draw tracks
result = WireArray(track_id, track_lower, track_upper, res=res, unit_mode=True)
for layer_name, bbox_arr in result.wire_arr_iter(grid):
self.add_rect(layer_name, bbox_arr)
if return_wires:
top_wire_list.extend(bot_wire_list)
return result, top_wire_list
else:
return result
def connect_to_track_wires(self, # type: TemplateBase
wire_arr_list, # type: Union[WireArray, List[WireArray]]
track_wires, # type: Union[WireArray, List[WireArray]]
min_len_mode=None, # type: Optional[int]
debug=False, # type: bool
):
# type: (...) -> Union[WireArray, List[WireArray]]
"""Connect all given WireArrays to the given WireArrays on adjacent layer.
Parameters
----------
wire_arr_list : Union[WireArray, List[WireArray]]
list of WireArrays to connect to track.
track_wires : Union[WireArray, List[WireArray]]
list of tracks as WireArrays.
min_len_mode : Optional[int]
If not None, will extend track so it satisfy minimum length requirement.
Use -1 to extend lower bound, 1 to extend upper bound, 0 to extend both equally.
debug : bool
True to print debug messages.
Returns
-------
wire_arr : Union[WireArray, List[WireArray]]
WireArray representing the tracks created. None if nothing to do.
"""
res = self.grid.resolution
ans = [] # type: List[WireArray]
if isinstance(track_wires, WireArray):
ans_is_list = False
track_wires = [track_wires]
else:
ans_is_list = True
for warr in track_wires:
track_lower = int(round(warr.lower / res))
track_upper = int(round(warr.upper / res))
tr = self.connect_to_tracks(wire_arr_list, warr.track_id,
track_lower=track_lower, track_upper=track_upper,
unit_mode=True, min_len_mode=min_len_mode, debug=debug,
return_wires=False)
assert tr is not None, "connect_to_tracks did nothing"
assert isinstance(tr, WireArray), "return_wires=False should return a WireArray"
ans.append(tr)
if not ans_is_list:
return ans[0]
return ans
def connect_with_via_stack(self, # type: TemplateBase
wire_array, # type: Union[WireArray, List[WireArray]]
track_id, # type: TrackID
tr_w_list=None, # type: Optional[List[int]]
tr_mode_list=None, # type: Optional[Union[int, List[int]]]
min_len_mode_list=None, # type: Optional[Union[int, List[int]]]
debug=False, # type: bool
):
# type: (...) -> List[WireArray]
"""Connect a single wire to the given track by using a via stack.
This is a convenience function that draws via connections through several layers
at once. With optional parameters to control the track widths on each
intermediate layers.
Parameters
----------
wire_array : Union[WireArray, List[WireArray]]
the starting WireArray.
track_id : TrackID
the TrackID to connect to.
tr_w_list : Optional[List[int]]
the track widths to use on each layer. If not specified, will compute automatically.
tr_mode_list : Optional[Union[int, List[int]]]
If tracks on intermediate layers do not line up nicely,
the track mode flags determine whether to pick upper or lower tracks
min_len_mode_list : Optional[Union[int, List[int]]]
minimum length mode flags on each layer.
debug : bool
True to print debug messages.
Returns
-------
warr_list : List[WireArray]
List of created WireArrays.
"""
if not isinstance(wire_array, WireArray):
# error checking
if len(wire_array) != 1:
raise ValueError('connect_with_via_stack() only works on WireArray '
'and TrackID with a single wire.')
# convert to WireArray.
wire_array = wire_array[0]
# error checking
warr_tid = wire_array.track_id
warr_layer = warr_tid.layer_id
tr_layer = track_id.layer_id
tr_index = track_id.base_index
if warr_tid.num != 1 or track_id.num != 1:
raise ValueError('connect_with_via_stack() only works on WireArray '
'and TrackID with a single wire.')
if tr_layer == warr_layer:
raise ValueError('Cannot connect wire to track on the same layer.')
num_connections = abs(tr_layer - warr_layer)
# set default values
if tr_w_list is None:
tr_w_list = [-1] * num_connections
elif len(tr_w_list) == num_connections - 1:
# user might be inclined to not list the last track width, as it is included in
# TrackID. Allow for this exception
tr_w_list = tr_w_list + [-1]
elif len(tr_w_list) != num_connections:
raise ValueError('tr_w_list must have exactly %d elements.' % num_connections)
else:
# create a copy of the given list, as this list may be modified later.
tr_w_list = list(tr_w_list)
if tr_mode_list is None:
tr_mode_list = [0] * num_connections
elif isinstance(tr_mode_list, int):
tr_mode_list = [tr_mode_list] * num_connections
elif len(tr_mode_list) != num_connections:
raise ValueError('tr_mode_list must have exactly %d elements.' % num_connections)
if min_len_mode_list is None:
min_len_mode_list_resolved = [None] * num_connections # type: List[Optional[int]]
elif isinstance(min_len_mode_list, int):
min_len_mode_list_resolved = [min_len_mode_list] * num_connections
elif len(min_len_mode_list) != num_connections:
raise ValueError('min_len_mode_list must have exactly %d elements.' % num_connections)
else:
min_len_mode_list_resolved = min_len_mode_list
# determine via location
grid = self.grid
w_dir = grid.get_direction(warr_layer)
t_dir = grid.get_direction(tr_layer)
w_coord = grid.track_to_coord(warr_layer, warr_tid.base_index, unit_mode=True)
t_coord = grid.track_to_coord(tr_layer, tr_index, unit_mode=True)
if w_dir != t_dir:
x0, y0 = (w_coord, t_coord) if w_dir == 'y' else (t_coord, w_coord)
else:
w_mid = int(round(wire_array.middle / grid.resolution))
x0, y0 = (w_coord, w_mid) if w_dir == 'y' else (w_mid, w_coord)
# determine track width on each layer
tr_w_list[num_connections - 1] = track_id.width
if tr_layer > warr_layer:
layer_dir = 1
tr_w_prev = grid.get_track_width(tr_layer, tr_w_list[num_connections - 1],
unit_mode=True)
tr_w_idx_iter = range(num_connections - 2, -1, -1)
else:
layer_dir = -1
tr_w_prev = grid.get_track_width(warr_layer, warr_tid.width, unit_mode=True)
tr_w_idx_iter = range(0, num_connections - 1)
for idx in tr_w_idx_iter:
cur_layer = warr_layer + layer_dir * (idx + 1)
if tr_w_list[idx] < 0:
tr_w_list[idx] = max(1, grid.get_track_width_inverse(cur_layer, tr_w_prev,
unit_mode=True))
tr_w_prev = grid.get_track_width(cur_layer, tr_w_list[idx], unit_mode=True)
# draw via stacks
results = [] # type: List[WireArray]
targ_layer = warr_layer
for tr_w, tr_mode, min_len_mode in zip(tr_w_list, tr_mode_list, min_len_mode_list_resolved):
targ_layer += layer_dir
# determine track index to connect to
if targ_layer == tr_layer:
targ_index = tr_index
else:
targ_dir = grid.get_direction(targ_layer)
coord = x0 if targ_dir == 'y' else y0
targ_index = grid.coord_to_nearest_track(targ_layer, coord, half_track=True,
mode=tr_mode, unit_mode=True)
targ_tid = TrackID(targ_layer, targ_index, width=tr_w)
warr = self.connect_to_tracks(wire_array, targ_tid, min_len_mode=min_len_mode,
unit_mode=True, debug=debug, return_wires=False)
assert warr is not None, "connect_to_tracks did nothing"
assert isinstance(warr, WireArray), "return_wires=False should return a WireArray"
results.append(warr)
wire_array = warr
return results
def strap_wires(self, # type: TemplateBase
warr, # type: WireArray
targ_layer, # type: int
tr_w_list=None, # type: Optional[List[int]]
min_len_mode_list=None, # type: Optional[List[int]]
):
# type: (...) -> WireArray
"""Strap the given WireArrays to the target routing layer.
This method is used to connects wires on adjacent layers that has the same direction.
The track locations must be valid on all routing layers for this method to work.
Parameters
----------
warr : WireArray
the WireArrays to strap.
targ_layer : int
the final routing layer ID.
tr_w_list : Optional[List[int]]
the track widths to use on each layer. If not specified, will determine automatically.
min_len_mode_list : Optional[List[int]]
minimum length mode flags on each layer.
Returns
-------
wire_arr : WireArray
WireArray representing the tracks created. None if nothing to do.
"""
warr_layer = warr.layer_id
if targ_layer == warr_layer:
# no need to do anything
return warr
num_connections = abs(targ_layer - warr_layer) # type: int
# set default values
if tr_w_list is None:
tr_w_list = [-1] * num_connections
elif len(tr_w_list) != num_connections:
raise ValueError('tr_w_list must have exactly %d elements.' % num_connections)
else:
# create a copy of the given list, as this list may be modified later.
tr_w_list = list(tr_w_list)
if min_len_mode_list is None:
min_len_mode_list_resolved = ([None] * num_connections) # type: List[Optional[int]]
else:
# List[int] is a List[Optional[int]]
min_len_mode_list_resolved = cast(List[Optional[int]], min_len_mode_list)
if len(min_len_mode_list_resolved) != num_connections:
raise ValueError('min_len_mode_list must have exactly %d elements.' % num_connections)
layer_dir = 1 if targ_layer > warr_layer else -1
for tr_w, mlen_mode in zip(tr_w_list, min_len_mode_list_resolved):
warr = self._strap_wires_helper(warr, warr.layer_id + layer_dir, tr_w, mlen_mode)
return warr
def _strap_wires_helper(self, # type: TemplateBase
warr, # type: WireArray
targ_layer, # type: int
tr_w, # type: int
mlen_mode, # type: Optional[int]
):
# type: (...) -> WireArray
"""Helper method for strap_wires(). Connect one layer at a time."""
wire_tid = warr.track_id
wire_layer = wire_tid.layer_id
res = self.grid.resolution
lower = int(round(warr.lower / res))
upper = int(round(warr.upper / res))
# error checking
wdir = self.grid.get_direction(wire_layer)
if wdir != self.grid.get_direction(targ_layer):
raise ValueError('Cannot strap wires with different directions.')
# convert base track index
base_coord = int(self.grid.track_to_coord(wire_layer, wire_tid.base_index, unit_mode=True))
base_tid = int(self.grid.coord_to_track(targ_layer, base_coord, unit_mode=True))
# convert pitch
wire_pitch = int(self.grid.get_track_pitch(wire_layer, unit_mode=True))
targ_pitch = int(self.grid.get_track_pitch(targ_layer, unit_mode=True))
targ_pitch_half = targ_pitch // 2
pitch_unit = int(round(wire_pitch * wire_tid.pitch))
if pitch_unit % targ_pitch_half != 0:
raise ValueError('Cannot strap wires on layers with mismatched pitch ')
num_pitch_2 = pitch_unit // targ_pitch_half
if num_pitch_2 % 2 == 0:
num_pitch = num_pitch_2 // 2 # type: Union[float, int]
else:
num_pitch = num_pitch_2 / 2 # type: Union[float, int]
# convert width
if tr_w < 0:
width_unit = int(self.grid.get_track_width(wire_layer, wire_tid.width, unit_mode=True))
tr_w = max(1, self.grid.get_track_width_inverse(targ_layer, width_unit, mode=-1,
unit_mode=True))
# draw vias. Update WireArray lower/upper
new_lower = lower # type: int
new_upper = upper # type: int
w_lower = lower # type: int
w_upper = upper # type: int
for tid in wire_tid:
coord = int(self.grid.track_to_coord(wire_layer, tid, unit_mode=True))
tid2 = int(self.grid.coord_to_track(targ_layer, coord, unit_mode=True))
w_name = self.grid.get_layer_name(wire_layer, tid)
t_name = self.grid.get_layer_name(targ_layer, tid2)
w_yb, w_yt = tuple2_to_int(
self.grid.get_wire_bounds(wire_layer, tid, wire_tid.width, unit_mode=True))
t_yb, t_yt = tuple2_to_int(
self.grid.get_wire_bounds(targ_layer, tid2, tr_w, unit_mode=True))
vbox = BBox(lower, max(w_yb, t_yb), upper, min(w_yt, t_yt), res, unit_mode=True)
if wdir == 'y':
vbox = vbox.flip_xy()
if wire_layer < targ_layer:
via = self.add_via(vbox, w_name, t_name, wdir, extend=True, top_dir=wdir)
tbox, wbox = via.top_box, via.bottom_box
else:
via = self.add_via(vbox, t_name, w_name, wdir, extend=True, top_dir=wdir)
tbox, wbox = via.bottom_box, via.top_box
if wdir == 'y':
new_lower = min(new_lower, tbox.bottom_unit)
new_upper = max(new_upper, tbox.top_unit)
w_lower = min(w_lower, wbox.bottom_unit)
w_upper = max(w_upper, wbox.top_unit)
else:
new_lower = min(new_lower, tbox.left_unit)
new_upper = max(new_upper, tbox.right_unit)
w_lower = min(w_lower, wbox.left_unit)
w_upper = max(w_upper, wbox.top_unit)
# handle minimum length DRC rule
min_len = int(self.grid.get_min_length(targ_layer, tr_w, unit_mode=True))
ext = min_len - (new_upper - new_lower)
if mlen_mode is not None and ext > 0:
if mlen_mode < 0:
new_lower -= ext
elif mlen_mode > 0:
new_upper += ext
else:
new_lower -= ext // 2
new_upper += (ext - ext // 2)
# add wires
self.add_wires(wire_layer, wire_tid.base_index, w_lower, w_upper, width=wire_tid.width,
num=wire_tid.num, pitch=wire_tid.pitch, unit_mode=True)
return self.add_wires(targ_layer, base_tid, new_lower, new_upper, width=tr_w,
num=wire_tid.num, pitch=num_pitch, unit_mode=True)
def connect_differential_tracks(self, # type: TemplateBase
pwarr_list, # type: Union[WireArray, List[WireArray]]
nwarr_list, # type: Union[WireArray, List[WireArray]]
tr_layer_id, # type: int
ptr_idx, # type: Union[int, float]
ntr_idx, # type: Union[int, float]
width=1, # type: int
track_lower=None, # type: Optional[Union[float, int]]
track_upper=None, # type: Optional[Union[float, int]]
unit_mode=False, # type: bool
debug=False # type: bool
):
# type: (...) -> Tuple[Optional[WireArray], Optional[WireArray]]
"""Connect the given differential wires to two tracks symmetrically.
This method makes sure the connections are symmetric and have identical parasitics.
Parameters
----------
pwarr_list : Union[WireArray, List[WireArray]]
positive signal wires to connect.
nwarr_list : Union[WireArray, List[WireArray]]
negative signal wires to connect.
tr_layer_id : int
track layer ID.
ptr_idx : Union[int, float]
positive track index.
ntr_idx : Union[int, float]
negative track index.
width : int
track width in number of tracks.
track_lower : Optional[Union[float, int]]
if given, extend track(s) to this lower coordinate.
track_upper : Optional[Union[float, int]]
if given, extend track(s) to this upper coordinate.
unit_mode: bool
True if track_lower/track_upper is given in resolution units.
debug : bool
True to print debug messages.
Returns
-------
p_track : Optional[WireArray]
the positive track.
n_track : Optional[WireArray]
the negative track.
"""
track_list = self.connect_matching_tracks([pwarr_list, nwarr_list], tr_layer_id,
[ptr_idx, ntr_idx], width=width,
track_lower=track_lower,
track_upper=track_upper,
unit_mode=unit_mode,
debug=debug)
return track_list[0], track_list[1]
def connect_differential_wires(self, # type: TemplateBase
pin_warrs, # type: Union[WireArray, List[WireArray]]
nin_warrs, # type: Union[WireArray, List[WireArray]]
pout_warr, # type: WireArray
nout_warr, # type: WireArray
track_lower=None, # type: Optional[Union[float, int]]
track_upper=None, # type: Optional[Union[float, int]]
unit_mode=False, # type: bool
debug=False # type: bool
):
# type: (...) -> Tuple[Optional[WireArray], Optional[WireArray]]
if not unit_mode:
res = self.grid.resolution
if track_lower is not None:
track_lower = int(round(track_lower / res))
if track_upper is not None:
track_upper = int(round(track_upper / res))
p_tid = pout_warr.track_id
lay_id = p_tid.layer_id
pidx = p_tid.base_index
nidx = nout_warr.track_id.base_index
width = p_tid.width
if track_lower is None:
tr_lower = pout_warr.lower_unit
else:
tr_lower = min(track_lower, pout_warr.lower_unit)
if track_upper is None:
tr_upper = pout_warr.upper_unit
else:
tr_upper = max(track_upper, pout_warr.upper_unit)
return self.connect_differential_tracks(pin_warrs, nin_warrs, lay_id, pidx, nidx,
width=width, track_lower=tr_lower,
track_upper=tr_upper, unit_mode=True, debug=debug)
def connect_matching_tracks(self, # type: TemplateBase
warr_list_list, # type: List[Union[WireArray, List[WireArray]]]
tr_layer_id, # type: int
tr_idx_list, # type: List[Union[int, float]]
width=1, # type: int
track_lower=None, # type: Optional[Union[float, int]]
track_upper=None, # type: Optional[Union[float, int]]
unit_mode=False, # type: bool
debug=False # type: bool
):
# type: (...) -> List[Optional[WireArray]]
"""Connect wires to tracks with optimal matching.
This method connects the wires to tracks in a way that minimizes the parasitic mismatches.
Parameters
----------
warr_list_list : List[Union[WireArray, List[WireArray]]]
list of signal wires to connect.
tr_layer_id : int
track layer ID.
tr_idx_list : List[Union[int, float]]
list of track indices.
width : int
track width in number of tracks.
track_lower : Optional[Union[float, int]]
if given, extend track(s) to this lower coordinate.
track_upper : Optional[Union[float, int]]
if given, extend track(s) to this upper coordinate.
unit_mode: bool
True if track_lower/track_upper is given in resolution units.
debug : bool
True to print debug messages.
Returns
-------
track_list : List[WireArray]
list of created tracks.
"""
grid = self.grid
res = grid.resolution
if not unit_mode:
if track_lower is not None:
track_lower = int(round(track_lower / res))
if track_upper is not None:
track_upper = int(round(track_upper / res))
# simple error checking
num_tracks = len(tr_idx_list) # type: int
if num_tracks != len(warr_list_list):
raise ValueError('wire list length and track index list length mismatch.')
if num_tracks == 0:
raise ValueError('No tracks given')
# compute wire_lower/upper without via extension
w_lower, w_upper = tuple2_to_int(
grid.get_wire_bounds(tr_layer_id, tr_idx_list[0], width=width,
unit_mode=True))
for tr_idx in islice(tr_idx_list, 1, None):
cur_low, cur_up = tuple2_to_int(
grid.get_wire_bounds(tr_layer_id, tr_idx, width=width, unit_mode=True))
w_lower = min(w_lower, cur_low)
w_upper = max(w_upper, cur_up)
# separate wire arrays into bottom/top tracks, compute wire/track lower/upper coordinates
bot_warrs = [[] for _ in range(num_tracks)] # type: List[List[WireArray]]
top_warrs = [[] for _ in range(num_tracks)] # type: List[List[WireArray]]
bot_bounds = [None, None] # type: List[Optional[Union[float, int]]]
top_bounds = [None, None] # type: List[Optional[Union[float, int]]]
for idx, warr_list in enumerate(warr_list_list):
# convert to WireArray list
if isinstance(warr_list, WireArray):
warr_list = [warr_list]
else:
pass
if not warr_list:
raise ValueError('No wires found for track index %d' % idx)
for warr in warr_list:
warr_tid = warr.track_id
cur_layer_id = warr_tid.layer_id
cur_width = warr_tid.width
if cur_layer_id == tr_layer_id + 1:
tr_w_ext = grid.get_via_extensions(tr_layer_id, width, cur_width,
unit_mode=True)
top_warrs[idx].append(warr)
cur_bounds = top_bounds
tr_ext, w_ext = tuple2_to_int(tr_w_ext)
elif cur_layer_id == tr_layer_id - 1:
tr_w_ext = grid.get_via_extensions(cur_layer_id, cur_width, width,
unit_mode=True)
bot_warrs[idx].append(warr)
cur_bounds = bot_bounds
w_ext, tr_ext = tuple2_to_int(tr_w_ext)
else:
raise ValueError('Cannot connect wire on layer %d '
'to track on layer %d' % (cur_layer_id, tr_layer_id))
# compute wire lower/upper including via extension
if cur_bounds[0] is None:
cur_bounds[0] = w_lower - w_ext
cur_bounds[1] = w_upper + w_ext
else:
cur_bounds[0] = min(cur_bounds[0], w_lower - w_ext)
cur_bounds[1] = max(cur_bounds[1], w_upper + w_ext)
# compute track lower/upper including via extension
warr_bounds = warr_tid.get_bounds(grid, unit_mode=True)
if track_lower is None:
track_lower = warr_bounds[0] - tr_ext
else:
track_lower = min(track_lower, warr_bounds[0] - tr_ext)
if track_upper is None:
track_upper = warr_bounds[1] + tr_ext
else:
track_upper = max(track_upper, warr_bounds[1] + tr_ext)
assert track_lower is not None and track_upper is not None, \
"track_lower/track_upper should have been set above"
# draw tracks
track_list = [] # type: List[Optional[WireArray]]
for bwarr_list, twarr_list, tr_idx in zip(bot_warrs, top_warrs, tr_idx_list):
track_list.append(self.add_wires(tr_layer_id, tr_idx, track_lower, track_upper,
width=width, unit_mode=True))
tr_id = TrackID(tr_layer_id, tr_idx, width=width)
self.connect_to_tracks(bwarr_list, tr_id, wire_lower=bot_bounds[0],
wire_upper=bot_bounds[1], unit_mode=True,
min_len_mode=None, debug=debug)
self.connect_to_tracks(twarr_list, tr_id, wire_lower=top_bounds[0],
wire_upper=top_bounds[1], unit_mode=True,
min_len_mode=None, debug=debug)
return track_list
def draw_vias_on_intersections(self, bot_warr_list, top_warr_list):
# type: (Union[WireArray, List[WireArray]], Union[WireArray, List[WireArray]]) -> List[bool]
"""Draw vias on all intersections of the two given wire groups.
Parameters
----------
bot_warr_list : Union[WireArray, List[WireArray]]
the bottom wires.
top_warr_list : Union[WireArray, List[WireArray]]
the top wires.
"""
if isinstance(bot_warr_list, WireArray):
bot_warr_list = [bot_warr_list]
else:
pass
if isinstance(top_warr_list, WireArray):
top_warr_list = [top_warr_list]
else:
pass
grid = self.grid
res = grid.resolution
bwarr_conn_made_list = [False for _ in bot_warr_list]
for bwarr_indx, bwarr in enumerate(bot_warr_list):
bot_tl = bwarr.lower_unit
bot_tu = bwarr.upper_unit
bot_track_idx = bwarr.track_id
bot_layer_id = bot_track_idx.layer_id
top_layer_id = bot_layer_id + 1
bot_width = bot_track_idx.width
bot_dir = self.grid.get_direction(bot_layer_id)
bot_horizontal = (bot_dir == 'x')
for bot_index in bot_track_idx:
bot_lay_name = self.grid.get_layer_name(bot_layer_id, bot_index)
btl, btu = tuple2_to_int(
grid.get_wire_bounds(bot_layer_id, bot_index, width=bot_width,
unit_mode=True))
for twarr in top_warr_list:
top_tl = twarr.lower_unit
top_tu = twarr.upper_unit
top_track_idx = twarr.track_id
top_width = top_track_idx.width
if top_tu >= btu and top_tl <= btl:
# top wire cuts bottom wire, possible intersection
for top_index in top_track_idx:
ttl, ttu = tuple2_to_int(grid.get_wire_bounds(top_layer_id, top_index,
width=top_width,
unit_mode=True))
if bot_tu >= ttu and bot_tl <= ttl:
# bottom wire cuts top wire, we have intersection. Make bbox
if bot_horizontal:
box = BBox(ttl, btl, ttu, btu, res, unit_mode=True)
else:
box = BBox(btl, ttl, btu, ttu, res, unit_mode=True)
top_lay_name = self.grid.get_layer_name(top_layer_id, top_index)
self.add_via(box, bot_lay_name, top_lay_name, bot_dir)
bwarr_conn_made_list[bwarr_indx] = True
return bwarr_conn_made_list
def mark_bbox_used(self, layer_id, bbox):
# type: (int, BBox) -> None
"""Marks the given bounding-box region as used in this Template."""
layer_name = self.grid.get_layer_name(layer_id, 0)
self._used_tracks.record_rect(self.grid, layer_name, BBoxArray(bbox, unit_mode=True),
dx=0, dy=0)
def get_available_tracks(self, # type: TemplateBase
layer_id, # type: int
tr_idx_list, # type: List[int]
lower, # type: Union[float, int]
upper, # type: Union[float, int]
width=1, # type: int
margin=0, # type: Union[float, int]
unit_mode=False, # type: bool
):
# type: (...) -> List[int]
"""Returns empty tracks"""
if not unit_mode:
res = self.grid.resolution
lower = int(round(lower / res))
upper = int(round(upper / res))
margin = int(round(margin / res))
return [tr_idx for tr_idx in tr_idx_list
if self.is_track_available(layer_id, tr_idx, lower, upper, width=width,
sp=margin, sp_le=margin, unit_mode=True)]
def do_power_fill(self, # type: TemplateBase
layer_id, # type: int
space, # type: Union[float, int]
space_le, # type: Union[float, int]
vdd_warrs=None, # type: Optional[Union[WireArray, List[WireArray]]]
vss_warrs=None, # type: Optional[Union[WireArray, List[WireArray]]]
bound_box=None, # type: Optional[BBox]
fill_width=1, # type: int
fill_space=0, # type: int
x_margin=0, # type: Union[float, int]
y_margin=0, # type: Union[float, int]
tr_offset=0, # type: Union[float, int]
min_len=0, # type: Union[float, int]
flip=False, # type: bool
unit_mode=False, # type: bool
sup_type='both', # type: str
vss_only=False, # type: bool
vdd_only=False, # type: bool
):
# type: (...) -> Tuple[List[WireArray], List[WireArray]]
"""Draw power fill on the given layer."""
res = self.grid.resolution
if not unit_mode:
space = int(round(space / res))
space_le = int(round(space_le / res))
x_margin = int(round(x_margin / res))
y_margin = int(round(y_margin / res))
tr_offset = int(round(tr_offset / res))
min_len = int(round(min_len / res))
else:
space = int(space)
space_le = int(space_le)
x_margin = int(x_margin)
y_margin = int(y_margin)
tr_offset = int(tr_offset)
min_len = int(min_len)
min_len = max(min_len, int(self.grid.get_min_length(layer_id, fill_width, unit_mode=True)))
if bound_box is None:
if self.bound_box is None:
raise ValueError("bound_box is not set")
bound_box = self.bound_box
bound_box = bound_box.expand(dx=-x_margin, dy=-y_margin, unit_mode=True)
tr_off = self.grid.coord_to_track(layer_id, tr_offset, unit_mode=True)
htr0 = int(tr_off * 2) + 1 + fill_width + fill_space
htr_pitch = 2 * (fill_width + fill_space)
is_horizontal = (self.grid.get_direction(layer_id) == 'x')
if is_horizontal:
cl, cu = bound_box.bottom_unit, bound_box.top_unit
lower, upper = bound_box.left_unit, bound_box.right_unit
else:
cl, cu = bound_box.left_unit, bound_box.right_unit
lower, upper = bound_box.bottom_unit, bound_box.top_unit
tr_bot = int(self.grid.find_next_track(layer_id, cl, tr_width=fill_width, half_track=True,
mode=1, unit_mode=True))
tr_top = int(self.grid.find_next_track(layer_id, cu, tr_width=fill_width, half_track=True,
mode=-1, unit_mode=True))
n0 = - (-(int(tr_bot * 2) + 1 - htr0) // htr_pitch)
n1 = (int(tr_top * 2) + 1 - htr0) // htr_pitch
top_vdd = [] # type: List[WireArray]
top_vss = [] # type: List[WireArray]
for ncur in range(n0, n1 + 1):
tr_idx = (htr0 + ncur * htr_pitch - 1) / 2
tid = TrackID(layer_id, tr_idx, width=fill_width)
# Two options for legacy support
if vss_only or vdd_only:
if vss_only and vdd_only:
raise ValueError("only one of 'vss_only' and 'vdd_only' could be True.")
cur_list = top_vss if vss_only else top_vdd
else:
if sup_type.lower() == 'vss':
cur_list = top_vss
elif sup_type.lower() == 'vdd':
cur_list = top_vdd
elif sup_type.lower() == 'both':
cur_list = top_vss if (ncur % 2 == 0) != flip else top_vdd
else:
raise ValueError('sup_type has to be "VDD" or "VSS" or "both"(default)')
for tl, tu in self.open_interval_iter(tid, lower, upper, sp=space, sp_le=space_le,
min_len=min_len):
cur_list.append(WireArray(tid, tl, tu, res=res, unit_mode=True))
for warr in chain(top_vdd, top_vss):
for lay, box_arr in warr.wire_arr_iter(self.grid):
self.add_rect(lay, box_arr)
if vdd_warrs:
self.draw_vias_on_intersections(vdd_warrs, top_vdd)
if vss_warrs:
self.draw_vias_on_intersections(vss_warrs, top_vss)
return top_vdd, top_vss
def do_max_space_fill2(self, # type: TemplateBase
layer_id, # type: int
bound_box=None, # type: Optional[BBox]
):
# type: (...) -> None
"""Draw density fill on the given layer."""
grid = self.grid
tech_info = grid.tech_info
fill_config = tech_info.tech_params['layout']['dummy_fill'][layer_id]
density = fill_config['density']
sp_max = fill_config['sp_max']
sp_le_max = fill_config['sp_le_max']
ip_margin = fill_config['margin']
ip_margin_le = fill_config['margin_le']
sp_max2 = sp_max // 2
sp_le_max2 = sp_le_max // 2
margin = sp_max2 // 2
margin_le = sp_le_max2 // 2
if bound_box is None:
if self.bound_box is None:
raise ValueError("bound_box is not set")
bound_box = self.bound_box
# get tracks information
long_dir = grid.get_direction(layer_id)
if long_dir == 'y':
tran_dir = 'x'
spx = sp_max2
spy = sp_le_max2
else:
tran_dir = 'y'
spx = sp_le_max2
spy = sp_max2
dim_tran0, dim_tran1 = tuple2_to_int(bound_box.get_interval(tran_dir, unit_mode=True))
dim_long0, dim_long1 = tuple2_to_int(bound_box.get_interval(long_dir, unit_mode=True))
dim_tranl = min(dim_tran1, dim_tran0 + sp_max2)
dim_tranu = max(dim_tran0, dim_tran1 - sp_max2)
dim_longl = min(dim_long1, dim_long0 + sp_le_max2)
dim_longu = max(dim_long0, dim_long1 - sp_le_max2)
dim_tran = dim_tran1 - dim_tran0
dim_long = dim_long1 - dim_long0
# self.add_rect(tech_info.get_exclude_layer(layer_id), bound_box)
if dim_tran <= ip_margin or dim_long <= ip_margin_le:
return
min_len = int(grid.get_min_length(layer_id, 1, unit_mode=True))
htr0 = int(self.grid.coord_to_nearest_track(layer_id, dim_tranl, half_track=True,
mode=-1, unit_mode=True))
htr1 = int(self.grid.coord_to_nearest_track(layer_id, dim_tranu, half_track=True,
mode=1, unit_mode=True))
htr0 = int(round(htr0 * 2 + 1))
htr1 = int(round(htr1 * 2 + 1))
num_htr_tot = htr1 - htr0 + 1
# calculate track pitch based on density/max space
tr_w, tr_sp = tuple2_to_int(grid.get_track_info(layer_id, unit_mode=True))
sp_le = int(grid.get_line_end_space(layer_id, 1, unit_mode=True))
tr_pitch2 = int(grid.get_track_pitch(layer_id, unit_mode=True)) // 2
num_tracks = int(round(-(-(dim_tran * density) // tr_w)))
num_tracks = min(max(num_tracks, -(-num_htr_tot // ((sp_max - tr_sp) // tr_pitch2 + 2))),
num_htr_tot // 2)
fill_info = None
invert = False
for _ in range(100):
try:
fill_info, invert = fill_symmetric_max_num_info(num_htr_tot, num_tracks, 1, 1, 1,
fill_on_edge=True, cyclic=False)
except NoFillChoiceError:
num_tracks -= 1
if fill_info is None:
raise ValueError('no fill solution.')
intv_list = fill_symmetric_interval(*fill_info[1], offset=htr0, invert=invert)[0]
# create interval sets
intv_tran0 = IntervalSet()
intv_tran1 = IntervalSet()
htr_list = [intv[0] for intv in intv_list]
num_htr = len(htr_list)
set_long0 = set(htr_list)
set_long1 = set_long0.copy()
intv_list = [IntervalSet() for _ in range(num_htr)]
# handle blockages
for blk_box in self.blockage_iter(layer_id, bound_box, spx=spx, spy=spy):
b_tran0, b_tran1 = tuple2_to_int(blk_box.get_interval(tran_dir, unit_mode=True))
b_long0, b_long1 = tuple2_to_int(blk_box.get_interval(long_dir, unit_mode=True))
b_long0_lim = max(b_long0, dim_longl)
b_long1_lim = min(b_long1, dim_longu)
blk_intv = (b_long0_lim, b_long1_lim)
if b_long0_lim < b_long1_lim:
# handle lower/upper transverse edges
if b_tran0 <= dim_tran0 and dim_tranl <= b_tran1:
intv_tran0.add(blk_intv, merge=True, abut=True)
if b_tran0 <= dim_tranu and dim_tran1 <= b_tran1:
intv_tran1.add(blk_intv, merge=True, abut=True)
cur_htr0 = int(self.grid.find_next_track(layer_id, b_tran0, half_track=True, mode=1,
unit_mode=True))
cur_htr1 = int(self.grid.find_next_track(layer_id, b_tran1, half_track=True, mode=-1,
unit_mode=True))
cur_htr0 = max(htr0, int(round(cur_htr0 * 2 + 1)))
cur_htr1 = min(htr1, int(round(cur_htr1 * 2 + 1)))
htr_idx0 = bisect.bisect_left(htr_list, cur_htr0)
if htr_idx0 < num_htr and htr_list[htr_idx0] <= cur_htr1:
htr_idx1 = min(num_htr - 1, bisect.bisect_right(htr_list, cur_htr1, lo=htr_idx0))
for htr_idx in range(htr_idx0, htr_idx1 + 1):
htr = htr_list[htr_idx]
# handle lower/upper longitudinal edges
if b_long0 <= dim_long0 and dim_longl <= b_long1:
set_long0.discard(htr)
if b_long0 <= dim_longu and dim_long1 <= b_long1:
set_long1.discard(htr)
if b_long0_lim < b_long1_lim:
intv_list[htr_idx].add(blk_intv, merge=True, abut=True)
# add fill in edges on transverse sides
trl = int(self.grid.coord_to_nearest_track(layer_id, dim_tran0 + margin, half_track=True,
mode=-1, unit_mode=True))
trr = int(self.grid.coord_to_nearest_track(layer_id, dim_tran1 - margin, half_track=True,
mode=1, unit_mode=True))
if trr < trl + 1:
# handle cases where the given bounding box is small
dim_mid = (dim_tran0 + dim_tran1) // 2
trl = int(self.grid.coord_to_nearest_track(layer_id, dim_mid, half_track=True,
mode=0, unit_mode=True))
tran_edge_iter = ((intv_tran0, trl),)
else:
tran_edge_iter = ((intv_tran0, trl), (intv_tran1, trr))
intv_long = (dim_longl, dim_longu)
for intv_set, tidx in tran_edge_iter:
for long0, long1 in intv_set.complement_iter(intv_long):
if long1 - long0 < min_len:
long0 = (long0 + long1 - min_len) // 2
long1 = long0 + min_len
self.add_wires(layer_id, tidx, long0, long1, unit_mode=True)
# add fill in edges on longitude sides
if dim_long0 + 2 * (margin_le + min_len) + sp_le > dim_long1:
# handle cases where the giving bounding box is small
long_lower = min(dim_long0 + margin_le, (dim_long0 + dim_long1 - min_len) // 2)
long_upper = max(dim_long1 - margin_le, long_lower + min_len)
long_edge_iter = ((set_long0, long_lower, long_upper),)
else:
long_lower = dim_long0 + margin_le - min_len // 2
long_upper = dim_long1 - margin_le + min_len // 2
long_edge_iter = ((set_long0, long_lower, long_lower + min_len),
(set_long1, long_upper - min_len, long_upper))
for set_long_edge, lower, upper in long_edge_iter:
intv_mark = (max(dim_longl, lower - sp_le_max2), min(dim_longu, upper + sp_le_max2))
for htr in set_long_edge:
htr_idx = bisect.bisect_left(htr_list, htr)
intv_list[htr_idx].add(intv_mark, merge=True, abut=True)
self.add_wires(layer_id, (htr - 1) / 2, lower, upper, unit_mode=True)
# add rest of fill
for htr, intv_set in zip(htr_list, intv_list):
tidx = (htr - 1) / 2
for long0, long1 in intv_set.complement_iter(intv_long):
if long1 - long0 < min_len:
long0 = (long0 + long1 - min_len) // 2
long1 = long0 + min_len
self.add_wires(layer_id, tidx, long0, long1, unit_mode=True)
def do_max_space_fill(self, # type: TemplateBase
layer_id, # type: int
bound_box=None, # type: Optional[BBox]
fill_pitch=1, # type: Union[float, int]
):
# type: (...) -> None
"""Draw density fill on the given layer."""
grid = self.grid
tech_info = grid.tech_info
fill_config = tech_info.tech_params['layout']['dummy_fill'][layer_id]
sp_max = fill_config['sp_max']
sp_le_max = fill_config['sp_le_max']
ip_margin = fill_config['margin']
ip_margin_le = fill_config['margin_le']
sp_max2 = sp_max // 2
sp_le_max2 = sp_le_max // 2
margin = sp_max2 // 2
margin_le = sp_le_max2 // 2
min_len = grid.get_min_length(layer_id, 1, unit_mode=True)
long_dir = grid.get_direction(layer_id)
is_horiz = (long_dir == 'x')
if bound_box is None:
if self.bound_box is None:
raise ValueError("bound_box_resolved is not set")
bound_box_resolved = self.bound_box # type: BBox
else:
bound_box_resolved = bound_box
xl = bound_box_resolved.left_unit
xr = bound_box_resolved.right_unit
yb = bound_box_resolved.bottom_unit
yt = bound_box_resolved.top_unit
if is_horiz:
tran_box = shgeo.box(xl + margin_le, yb, xr - margin_le, yb + sp_max2)
long_box = shgeo.box(xl, yb + margin_le, xl + sp_le_max2, yt - margin_le)
dim_tran0 = yb
dim_tran1 = yt
dim_long0 = xl
dim_long1 = xr
else:
tran_box = shgeo.box(xl, yb + margin_le, xl + sp_max2, yt - margin_le)
long_box = shgeo.box(xl + margin_le, yb, xr - margin_le, yb + sp_le_max2)
dim_tran0 = xl
dim_tran1 = xr
dim_long0 = yb
dim_long1 = yt
dim_tran = dim_tran1 - dim_tran0
dim_long = dim_long1 - dim_long0
self.add_rect(tech_info.get_exclude_layer(layer_id), bound_box_resolved)
if dim_tran <= ip_margin or dim_long <= ip_margin_le:
return
box_list = [shgeo.box(*box.get_bounds(unit_mode=True))
for box in self.intersection_rect_iter(layer_id, bound_box_resolved)]
tot_geo = shops.cascaded_union(box_list) # type: shgeo.Polygon
tot_geo = tot_geo.buffer(sp_max2, cap_style=2, join_style=2)
# fill transverse edges
new_polys = [] # type: List[shgeo.Polygon]
if sp_max2 * 2 >= dim_tran:
tr = grid.coord_to_nearest_track(layer_id, (dim_tran0 + dim_tran1) // 2,
half_track=True, unit_mode=True)
do_upper = False
else:
tr = grid.coord_to_nearest_track(layer_id, dim_tran0 + margin, half_track=True,
mode=-1, unit_mode=True)
do_upper = True
self._fill_tran_edge_helper(layer_id, grid, tot_geo, tran_box, tr, is_horiz,
min_len, sp_max2, new_polys)
if do_upper:
tr = grid.coord_to_nearest_track(layer_id, dim_tran1 - margin, half_track=True,
mode=1, unit_mode=True)
if is_horiz:
tran_box = shgeo.box(xl + margin_le, yt - sp_max2, xr - margin_le, yt)
else:
tran_box = shgeo.box(xr - sp_max2, yb + margin_le, xr, yt - margin_le)
self._fill_tran_edge_helper(layer_id, grid, tot_geo, tran_box, tr, is_horiz,
min_len, sp_max2, new_polys)
new_polys.append(tot_geo)
tot_geo = shops.cascaded_union(new_polys)
# fill longitudinal edges
new_polys.clear()
if sp_le_max2 * 2 >= dim_long:
coord_mid = (dim_long1 + dim_long0) // 2
do_upper = False
else:
coord_mid = dim_long0 + margin_le
do_upper = True
self._fill_long_edge_helper(layer_id, grid, tot_geo, long_box, coord_mid, is_horiz,
min_len, sp_max2, new_polys, mode=-1 if do_upper else 0)
if do_upper:
coord_mid = dim_long1 - margin_le
if is_horiz:
long_box = shgeo.box(xr - sp_le_max2, yb + margin_le, xr, yt - margin_le)
else:
long_box = shgeo.box(xl + margin_le, yt - sp_le_max2, xr - margin_le, yt)
self._fill_long_edge_helper(layer_id, grid, tot_geo, long_box, coord_mid, is_horiz,
min_len, sp_max2, new_polys, mode=1)
new_polys.append(tot_geo)
tot_geo = shops.cascaded_union(new_polys)
# fill interior
min_len2 = -(-min_len // 2)
tot_box = shgeo.box(*bound_box_resolved.get_bounds(unit_mode=True))
geo = tot_box.difference(tot_geo)
for poly in self._get_flat_poly_iter(geo):
if not poly.is_empty:
self._fill_poly_bounds(poly, layer_id, is_horiz, min_len2, fill_pitch)
def _fill_poly_bounds(self, poly, layer_id, is_horiz, min_len2, fill_pitch):
grid = self.grid
bounds = poly.bounds
xl = int(round(bounds[0]))
yb = int(round(bounds[1]))
xr = int(round(bounds[2]))
yt = int(round(bounds[3]))
tr_p2 = grid.get_track_pitch(layer_id, unit_mode=True) // 2
fill_htr = int(round(2 * fill_pitch))
if is_horiz:
tr0 = grid.coord_to_nearest_track(layer_id, yb, half_track=True,
mode=-1, unit_mode=True)
tr1 = grid.coord_to_nearest_track(layer_id, yt, half_track=True,
mode=1, unit_mode=True)
wl, wu = tuple2_to_int(grid.get_wire_bounds(layer_id, tr0, width=1, unit_mode=True))
comb = shgeo.MultiPolygon([shgeo.box(xl, wl + tr_p2 * idx, xr, wu + tr_p2 * idx)
for idx in range(0, int(round(2 * (tr1 - tr0))) + 2,
fill_htr)])
else:
tr0 = grid.coord_to_nearest_track(layer_id, xl, half_track=True,
mode=-1, unit_mode=True)
tr1 = grid.coord_to_nearest_track(layer_id, xr, half_track=True,
mode=1, unit_mode=True)
wl, wu = tuple2_to_int(grid.get_wire_bounds(layer_id, tr0, width=1, unit_mode=True))
comb = shgeo.MultiPolygon([shgeo.box(wl + tr_p2 * idx, yb, wu + tr_p2 * idx, yt)
for idx in range(0, int(round(2 * (tr1 - tr0))) + 2,
fill_htr)])
htr0 = int(round(tr0 * 2)) + 1
pitch = fill_htr * tr_p2
for p in self._get_flat_poly_iter(poly.intersection(comb)):
p_bnds = p.bounds
if p_bnds:
if is_horiz:
htr = htr0 + (int(round(p_bnds[1])) - wl) // pitch * fill_htr
pl = int(round(p_bnds[0]))
pu = int(round(p_bnds[2]))
else:
htr = htr0 + (int(round(p_bnds[0])) - wl) // pitch * fill_htr
pl = int(round(p_bnds[1]))
pu = int(round(p_bnds[3]))
pc = (pl + pu) // 2
self.add_wires(layer_id, (htr - 1) / 2, min(pl, pc - min_len2),
max(pu, pc + min_len2), unit_mode=True)
@classmethod
def _get_flat_poly_iter(cls, poly):
if (isinstance(poly, shgeo.MultiPolygon) or
isinstance(poly, shgeo.MultiLineString) or
isinstance(poly, shgeo.GeometryCollection)):
yield from poly
else:
yield poly
def _fill_long_edge_helper(self, layer_id, grid, tot_geo, long_box, coord_mid, is_horiz,
min_len, sp_max2, new_polys, mode=0):
if mode < 0:
clower = coord_mid
elif mode == 0:
clower = coord_mid - min_len // 2
else:
clower = coord_mid - min_len
cupper = clower + min_len
geo = long_box.difference(tot_geo)
if isinstance(geo, shgeo.Polygon):
geo = [geo]
for poly in geo:
poly_bnds = poly.bounds
if poly_bnds:
if is_horiz:
lower = poly_bnds[1]
upper = poly_bnds[3]
else:
lower = poly_bnds[0]
upper = poly_bnds[2]
htr0 = grid.coord_to_nearest_track(layer_id, lower, half_track=True, mode=-1,
unit_mode=True)
htr1 = grid.coord_to_nearest_track(layer_id, upper, half_track=True, mode=1,
unit_mode=True)
htr0 = int(round(htr0 * 2 + 1))
htr1 = int(round(htr1 * 2 + 1))
for htr in range(htr0, htr1 + 1, 2):
warr = self.add_wires(layer_id, (htr - 1) / 2, clower, cupper, unit_mode=True)
wbox = shgeo.box(*warr.get_bbox_array(grid).base.get_bounds(unit_mode=True))
new_polys.append(wbox.buffer(sp_max2, cap_style=2, join_style=2))
def _fill_tran_edge_helper(self, layer_id, grid, tot_geo, tran_box, tr, is_horiz, min_len,
sp_max2, new_polys):
geo = tran_box.difference(tot_geo)
if isinstance(geo, shgeo.Polygon):
geo = [geo]
for poly in geo:
poly_bnds = poly.bounds
if poly_bnds:
if is_horiz:
lower = int(round(poly_bnds[0]))
upper = int(round(poly_bnds[2]))
else:
lower = int(round(poly_bnds[1]))
upper = int(round(poly_bnds[3]))
lower = min(lower, (lower + upper - min_len) // 2)
upper = max(upper, lower + min_len)
warr = self.add_wires(layer_id, tr, lower, upper, unit_mode=True)
wbox = shgeo.box(*warr.get_bbox_array(grid).base.get_bounds(unit_mode=True))
new_polys.append(wbox.buffer(sp_max2, cap_style=2, join_style=2))
class CachedTemplate(TemplateBase):
"""A template that's cached in file."""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None
TemplateBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
return dict(
cache_fname='the cache file name.',
)
def draw_layout(self):
# type: () -> None
fname = self.params['cache_fname']
with open(fname + '_info.pickle', 'rb') as f:
info = pickle.load(f)
self._size = info['size']
self._port_params = info['port_params']
self.prim_top_layer = info['prim_top_layer']
self.prim_bound_box = info['prim_bound_box']
self.array_box = info['array_box']
self._merge_used_tracks = True
self._used_tracks = UsedTracks(fname, overwrite=False)
prop_dict = info['properties']
for key, val in prop_dict.items():
setattr(self, key, val)
lib_name = info['lib_name']
cell_name = info['cell_name']
self.add_instance_primitive(lib_name, cell_name, (0, 0), inst_name='X0', unit_mode=True)
class BlackBoxTemplate(TemplateBase):
"""A black box template."""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None
TemplateBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)
self._sch_params = {} # type: Dict[str, Any]
@property
def sch_params(self):
# type: () -> Dict[str, Any]
return self._sch_params
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
return dict(
lib_name='The library name.',
cell_name='The layout cell name.',
top_layer='The top level layer.',
size='The width/height of the cell, in resolution units.',
ports='The port information dictionary.',
show_pins='True to show pins.',
)
def get_layout_basename(self):
return self.params['cell_name']
def draw_layout(self):
# type: () -> None
lib_name = self.params['lib_name']
cell_name = self.params['cell_name']
top_layer = self.params['top_layer']
size = self.params['size']
ports = self.params['ports']
show_pins = self.params['show_pins']
res = self.grid.resolution
tech_info = self.grid.tech_info
for term_name, pin_dict in ports.items():
for lay_name, bbox_list in pin_dict.items():
lay_id = tech_info.get_layer_id(lay_name)
for xl, yb, xr, yt in bbox_list:
box = BBox(xl, yb, xr, yt, res, unit_mode=True)
self._register_pin(lay_id, lay_name, term_name, box, show_pins)
self.add_instance_primitive(lib_name, cell_name, (0, 0), unit_mode=True)
self.prim_top_layer = top_layer
self.prim_bound_box = BBox(0, 0, size[0], size[1], self.grid.resolution, unit_mode=True)
for layer in range(1, top_layer + 1):
self.mark_bbox_used(layer, self.prim_bound_box)
self._sch_params = dict(
lib_name=lib_name,
cell_name=cell_name,
)
def _register_pin(self, lay_id, lay_name, term_name, box, show_pins):
if lay_id is None:
self.add_pin_primitive(term_name, lay_name, box, show=show_pins)
else:
if self.grid.get_direction(lay_id) == 'x':
dim = box.height_unit
coord = box.yc_unit
lower = box.left_unit
upper = box.right_unit
else:
dim = box.width_unit
coord = box.xc_unit
lower = box.bottom_unit
upper = box.top_unit
try:
tr_idx = self.grid.coord_to_track(lay_id, coord, unit_mode=True)
except ValueError:
self.add_pin_primitive(term_name, lay_name, box, show=show_pins)
return
width_ntr = self.grid.get_track_width_inverse(lay_id, dim, unit_mode=True)
if self.grid.get_track_width(lay_id, width_ntr, unit_mode=True) == dim:
track_id = TrackID(lay_id, tr_idx, width=width_ntr)
warr = WireArray(track_id, lower, upper, res=self.grid.resolution, unit_mode=True)
self.add_pin(term_name, warr, show=show_pins)
else:
self.add_pin_primitive(term_name, lay_name, box, show=show_pins)
================================================
FILE: bag/layout/util.py
================================================
# -*- coding: utf-8 -*-
"""This module contains utility classes used for layout
"""
from typing import Iterator, Union, Tuple, List, Any
import pprint
import numpy as np
__all__ = ['BBox', 'BBoxArray', 'Pin', 'transform_table', 'transform_point',
'get_inverse_transform', 'tuple2_to_int', 'tuple2_to_float_int']
transform_table = {'R0': np.array([[1, 0], [0, 1]], dtype=int),
'MX': np.array([[1, 0], [0, -1]], dtype=int),
'MY': np.array([[-1, 0], [0, 1]], dtype=int),
'R180': np.array([[-1, 0], [0, -1]], dtype=int),
'R90': np.array([[0, -1], [1, 0]], dtype=int),
'MXR90': np.array([[0, 1], [1, 0]], dtype=int),
'MYR90': np.array([[0, -1], [-1, 0]], dtype=int),
'R270': np.array([[0, 1], [-1, 0]], dtype=int),
}
def tuple2_to_int(input_tuple: Tuple[Any, Any]) -> Tuple[int, int]:
"""
Cast a tuple of 2 elements to a tuple of 2 ints.
:param input_tuple: Tuple of two elements
:return: Tuple of two ints
"""
return int(input_tuple[0]), int(input_tuple[1])
def tuple2_to_float_int(input_tuple: Tuple[Any, Any]) -> Tuple[float, int]:
"""
Cast a tuple of 2 elements to a tuple of 2 ints.
:param input_tuple: Tuple of two elements
:return: Tuple of two ints
"""
return float(input_tuple[0]), int(input_tuple[1])
def transform_point(x, y, loc, orient):
"""Transform the (x, y) point using the given location and orientation."""
shift = np.asarray(loc)
if orient not in transform_table:
raise ValueError('Unsupported orientation: %s' % orient)
mat = transform_table[orient]
ans = np.dot(mat, np.array([x, y])) + shift
return ans.item(0), ans.item(1)
def get_inverse_transform(loc, orient):
"""Returns the inverse transform"""
if orient == 'R90':
orient_inv = 'R270'
elif orient == 'R270':
orient_inv = 'R90'
else:
orient_inv = orient
inv_mat = transform_table[orient_inv]
new_shift = np.dot(inv_mat, -np.asarray(loc))
return (new_shift.item(0), new_shift.item(1)), orient_inv
def transform_loc_orient(loc, orient, trans_loc, trans_orient):
"""Transforms loc orient with trans_loc and trans_orient"""
mat1 = transform_table[orient]
mat2 = transform_table[trans_orient]
new_mat = np.dot(mat2, mat1)
new_loc = np.array(trans_loc) + np.dot(mat2, np.array(loc))
for key, val in transform_table.items():
if np.allclose(new_mat, val):
return (new_loc.item(0), new_loc.item(1)), key
class PortSpec(object):
"""Specification of a port.
Parameters
----------
ntr : int
number of tracks the port should occupy
idc : float
DC current the port should support, in Amperes.
"""
def __init__(self, ntr, idc):
self._ntr = ntr
self._idc = idc
@property
def ntr(self):
"""minimum number of tracks the port should occupy"""
return self._ntr
@property
def idc(self):
"""minimum DC current the port should support, in Amperes"""
return self._idc
def __str__(self):
return repr(self)
def __repr__(self):
fmt_str = '%s(%d, %.4g)'
return fmt_str % (self.__class__.__name__, self._ntr, self._idc)
class BBox(object):
"""An immutable bounding box.
Parameters
----------
left : float or int
left coordinate.
bottom : float or int
bottom coordinate.
right : float or int
right coordinate.
top : float or int
top coordinate.
resolution : float
the coordinate resolution
unit_mode : bool
True if the given coordinates are in layout units already.
"""
def __init__(self, left, bottom, right, top, resolution, unit_mode=False):
if not unit_mode:
self._left_unit = int(round(left / resolution))
self._bot_unit = int(round(bottom / resolution))
self._right_unit = int(round(right / resolution))
self._top_unit = int(round(top / resolution))
else:
self._left_unit = int(round(left))
self._bot_unit = int(round(bottom))
self._right_unit = int(round(right))
self._top_unit = int(round(top))
# self._left_unit = left
# self._bot_unit = bottom
# self._right_unit = right
# self._top_unit = top
self._res = resolution
@classmethod
def get_invalid_bbox(cls):
# type: () -> BBox
"""Returns a default invalid bounding box.
Returns
-------
box : bag.layout.util.BBox
an invalid bounding box.
"""
return cls(0, 0, -1, -1, 0.1, unit_mode=True)
@property
def left(self):
"""left coordinate."""
return self._left_unit * self._res
@property
def left_unit(self):
"""left coordinate."""
return self._left_unit
@property
def right(self):
"""right coordinate."""
return self._right_unit * self._res
@property
def right_unit(self):
"""right coordinate."""
return self._right_unit
@property
def bottom(self):
"""bottom coordinate."""
return self._bot_unit * self._res
@property
def bottom_unit(self):
"""bottom coordinate."""
return self._bot_unit
@property
def top(self):
"""top coordinate."""
return self._top_unit * self._res
@property
def top_unit(self):
"""top coordinate."""
return self._top_unit
@property
def resolution(self):
"""coordinate resolution."""
return self._res
@property
def width(self):
"""width of this bounding box."""
return self.width_unit * self._res
@property
def width_unit(self):
"""width of this bounding box in resolution units."""
return self._right_unit - self._left_unit
@property
def height(self):
"""height of this bounding box."""
return self.height_unit * self._res
@property
def height_unit(self):
"""height of this bounding box in resolution units."""
return self._top_unit - self._bot_unit
@property
def xc(self):
"""The center X coordinate, rounded to nearest grid point."""
return ((self._left_unit + self._right_unit) // 2) * self._res
@property
def xc_unit(self):
"""The center X coordinate in resolution units."""
return (self._left_unit + self._right_unit) // 2
@property
def yc(self):
"""The center Y coordinate, rounded to nearest grid point."""
return ((self._bot_unit + self._top_unit) // 2) * self._res
@property
def yc_unit(self):
"""The center Y coordinate in resolution units."""
return (self._bot_unit + self._top_unit) // 2
def get_points(self, unit_mode=False):
# type: (bool) -> List[Tuple[Union[float, int], Union[float, int]]]
"""Returns this bounding box as a list of points.
Parameters
----------
unit_mode : bool
True to return points in resolution units.
Returns
-------
points : List[Tuple[Union[float, int], Union[float, int]]]
this bounding box as a list of points.
"""
if unit_mode:
return [(self._left_unit, self._bot_unit),
(self._left_unit, self._top_unit),
(self._right_unit, self._top_unit),
(self._right_unit, self._bot_unit)]
else:
return [(self.left, self.bottom),
(self.left, self.top),
(self.right, self.top),
(self.right, self.bottom)]
def as_bbox_array(self):
"""Cast this BBox as a BBoxArray."""
return BBoxArray(self)
def as_bbox_collection(self):
"""Cast this BBox as a BBoxCollection."""
return BBoxCollection([BBoxArray(self)])
def merge(self, bbox):
# type: (BBox) -> BBox
"""Returns a new bounding box that's the union of this bounding box and the given one.
Parameters
----------
bbox : bag.layout.util.BBox
the bounding box to merge with.
Returns
-------
total : bag.layout.util.BBox
the merged bounding box.
"""
if not self.is_valid():
return bbox
elif not bbox.is_valid():
return self
return BBox(min(self._left_unit, bbox._left_unit),
min(self._bot_unit, bbox._bot_unit),
max(self._right_unit, bbox._right_unit),
max(self._top_unit, bbox._top_unit),
self._res, unit_mode=True)
def intersect(self, bbox):
# type: (BBox) -> BBox
"""Returns a new bounding box that's the intersection of this bounding box and the given one.
Parameters
----------
bbox : bag.layout.util.BBox
the bounding box to intersect with.
Returns
-------
intersect : bag.layout.util.BBox
the intersection bounding box.
"""
return BBox(max(self._left_unit, bbox._left_unit),
max(self._bot_unit, bbox._bot_unit),
min(self._right_unit, bbox._right_unit),
min(self._top_unit, bbox._top_unit),
self._res, unit_mode=True)
def overlaps(self, bbox):
# type: (BBox) -> bool
"""Returns True if this BBox overlaps the given BBox."""
return ((max(self._left_unit, bbox._left_unit) <
min(self._right_unit, bbox._right_unit)) and
(max(self._bot_unit, bbox._bot_unit) <
min(self._top_unit, bbox._top_unit)))
def extend(self, x=None, y=None, unit_mode=False):
# type: (Union[float, int], Union[float, int], bool) -> BBox
"""Returns an extended BBox that covers the given point.
Parameters
----------
x : float or None
if given, the X coordinate to extend to.
y : float or None
if given, the Y coordinate to extend to
unit_mode : bool
True if x and y are given in resolution units.
Returns
-------
ext_box : BBox
the extended bounding box.
"""
if x is None:
x = self._left_unit
elif not unit_mode:
x = int(round(x / self._res))
if y is None:
y = self._bot_unit
elif not unit_mode:
y = int(round(y / self._res))
return BBox(min(self._left_unit, x),
min(self._bot_unit, y),
max(self._right_unit, x),
max(self._top_unit, y), self._res, unit_mode=True)
def expand(self, dx=0, dy=0, unit_mode=False):
# type: (Union[float, int], Union[float, int], bool) -> BBox
"""Returns a BBox expanded by the given amount.
Parameters
----------
dx : Union[float, int]
if given, expand left and right edge by this amount.
dy : Union[float, int]
if given, expand top and bottom edge by this amount.
unit_mode : bool
True if x and y are given in resolution units.
Returns
-------
ext_box : BBox
the extended bounding box.
"""
if not unit_mode:
dx = int(round(dx / self._res))
dy = int(round(dy / self._res))
return BBox(self._left_unit - dx, self._bot_unit - dy, self._right_unit + dx,
self._top_unit + dy, self._res, unit_mode=True)
def transform(self, loc=(0, 0), orient='R0', unit_mode=False):
# type: (Tuple[Union[float, int], Union[float, int]], str, bool) -> BBox
"""Returns a new BBox under the given transformation.
rotates first before shift.
Parameters
----------
loc : Tuple[Union[float, int], Union[float, int]]
location of the anchor.
orient : str
the orientation of the bounding box.
unit_mode : bool
True if location is given in resolution units
Returns
-------
box : BBox
the new bounding box.
"""
if not self.is_valid():
return BBox.get_invalid_bbox()
if not unit_mode:
loc = int(round(loc[0] / self._res)), int(round(loc[1] / self._res))
p1 = transform_point(self._left_unit, self._bot_unit, loc, orient)
p2 = transform_point(self._right_unit, self._top_unit, loc, orient)
return BBox(min(p1[0], p2[0]), min(p1[1], p2[1]),
max(p1[0], p2[0]), max(p1[1], p2[1]),
self._res, unit_mode=True)
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (Union[float, int], Union[float, int], bool) -> BBox
"""Returns a new BBox shifted by the given amount.
Parameters
----------
dx : float
shift in X direction.
dy : float
shift in Y direction.
unit_mode : bool
True if shifts are given in resolution units
Returns
-------
box : bag.layout.util.BBox
the new bounding box.
"""
if not unit_mode:
dx = int(round(dx / self._res))
dy = int(round(dy / self._res))
return BBox(self._left_unit + dx, self._bot_unit + dy,
self._right_unit + dx, self._top_unit + dy,
self._res, unit_mode=True)
def flip_xy(self):
# type: () -> BBox
"""Returns a new BBox with X and Y coordinate swapped."""
return BBox(self._bot_unit, self._left_unit, self._top_unit, self._right_unit,
self._res, unit_mode=True)
def with_interval(self, direction, lower, upper, unit_mode=False):
if not unit_mode:
lower = int(round(lower / self._res))
upper = int(round(upper / self._res))
if direction == 'x':
return BBox(lower, self._bot_unit, upper, self._top_unit, self._res, unit_mode=True)
else:
return BBox(self._left_unit, lower, self._right_unit, upper, self._res, unit_mode=True)
def get_interval(self, direction, unit_mode=False):
# type: (str, bool) -> Tuple[Union[float, int], Union[float, int]]
"""Returns the interval of this bounding box along the given direction.
Parameters
----------
direction : str
direction along which to campute the bounding box interval. Either 'x' or 'y'.
unit_mode : bool
True to return dimensions in resolution units.
Returns
-------
lower : float
the lower coordinate along the given direction.
upper : float
the upper coordinate along the given direction.
"""
if direction == 'x':
ans = self._left_unit, self._right_unit
else:
ans = self._bot_unit, self._top_unit
if unit_mode:
return ans
return ans[0] * self._res, ans[1] * self._res
def get_bounds(self, unit_mode=False):
# type: (bool) -> Tuple[Union[float, int], ...]
"""Returns the bounds of this bounding box.
Parameters
----------
unit_mode : bool
True to return bounds in resolution units.
Returns
-------
bounds : Tuple[Union[float, int], ...]
a tuple of (left, bottom, right, top) coordinates.
"""
if unit_mode:
return self._left_unit, self._bot_unit, self._right_unit, self._top_unit
else:
return self.left, self.bottom, self.right, self.top
def is_physical(self):
"""Returns True if this bounding box has positive area.
Returns
-------
is_physical : bool
True if this bounding box has positive area.
"""
return self._right_unit - self._left_unit > 0 and self._top_unit - self._bot_unit > 0
def is_valid(self):
"""Returns True if this bounding box is valid, i.e. nonnegative area.
Returns
-------
is_valid : bool
True if this bounding box has nonnegative area.
"""
return self._right_unit >= self._left_unit and self._top_unit >= self._bot_unit
def get_immutable_key(self):
"""Returns an immutable key object that can be used to uniquely identify this BBox."""
return (self.__class__.__name__, self._left_unit, self._bot_unit,
self._right_unit, self._top_unit, self._res)
def __str__(self):
return repr(self)
def __repr__(self):
precision = max(1, -1 * int(np.floor(np.log10(self._res))))
fmt_str = '%s(%.{0}f, %.{0}f, %.{0}f, %.{0}f)'.format(precision)
return fmt_str % (self.__class__.__name__, self.left, self.bottom, self.right, self.top)
def __hash__(self):
return hash(self.get_immutable_key())
def __eq__(self, other):
return self.get_immutable_key() == other.get_immutable_key()
class BBoxArray(object):
"""An array of bounding boxes.
Useful for representing bus of wires.
Parameters
----------
bbox : BBox
the lower-left bounding box.
nx : int
number of columns.
ny : int
number of rows.
spx : Union[float, int]
column pitch.
spy : Union[float, int]
row pitch.
unit_mode : bool
True if layout dimensions are specified in resolution units.
"""
def __init__(self, bbox, nx=1, ny=1, spx=0, spy=0, unit_mode=False):
# type: (BBox, int, int, Union[float, int], Union[float, int], bool) -> None
if not isinstance(bbox, BBox):
raise ValueError('%s is not a BBox object' % bbox)
if nx <= 0 or ny <= 0:
raise ValueError('Cannot have 0 bounding boxes.')
if spx < 0 or spy < 0:
raise ValueError('Currently does not support negative pitches.')
self._bbox = bbox
self._nx = nx
self._ny = ny
if unit_mode:
self._spx_unit = int(spx) # type: int
self._spy_unit = int(spy) # type: int
else:
self._spx_unit = int(round(spx / bbox.resolution))
self._spy_unit = int(round(spy / bbox.resolution))
def __iter__(self):
# type: () -> Iterator[BBox]
"""Iterates over all bounding boxes in this BBoxArray.
traverses from left to right, then from bottom to top.
"""
for idx in range(self._nx * self._ny):
yield self.get_bbox(idx)
@property
def base(self):
# type: () -> BBox
"""the lower-left bounding box"""
return self._bbox
@property
def nx(self):
# type: () -> int
"""number of columns"""
return self._nx
@property
def ny(self):
# type: () -> int
"""number of columns"""
return self._ny
@property
def spx(self):
# type: () -> float
"""column pitch"""
return self._spx_unit * self._bbox.resolution
@property
def spx_unit(self):
# type: () -> int
"""column pitch in resolution units."""
return self._spx_unit
@property
def spy(self):
# type: () -> float
"""row pitch"""
return self._spy_unit * self._bbox.resolution
@property
def spy_unit(self):
# type: () -> int
"""row pitch in resolution units."""
return self._spy_unit
@property
def left(self):
# type: () -> float
"""left-most edge coordinate."""
return self._bbox.left
@property
def left_unit(self):
# type: () -> int
"""left-most edge coordinate."""
return self._bbox.left_unit
@property
def right(self):
# type: () -> float
"""right-most edge coordinate."""
return self.right_unit * self._bbox.resolution
@property
def right_unit(self):
# type: () -> int
"""right-most edge coordinate."""
return self._bbox.right_unit + self._spx_unit * (self._nx - 1)
@property
def bottom(self):
# type: () -> float
"""bottom-most edge coordinate."""
return self._bbox.bottom
@property
def bottom_unit(self):
# type: () -> int
"""bottom-most edge coordinate."""
return self._bbox.bottom_unit
@property
def top(self):
# type: () -> float
"""top-most edge coordinate."""
return self.top_unit * self._bbox.resolution
@property
def top_unit(self):
# type: () -> int
"""top-most edge coordinate."""
return self._bbox.top_unit + self._spy_unit * (self._ny - 1)
@property
def xc(self):
return self.xc_unit * self._bbox.resolution
@property
def xc_unit(self):
# type: () -> int
return (self.left_unit + self.right_unit) // 2
@property
def yc(self):
return self.yc_unit * self._bbox.resolution
@property
def yc_unit(self):
# type: () -> int
return (self.bottom_unit + self.top_unit) // 2
def as_bbox_collection(self):
# type: () -> 'BBoxCollection'
"""Cast this BBoxArray as a BBoxCollection."""
return BBoxCollection([self])
def get_bbox(self, idx):
# type: (int) -> BBox
"""Returns the bounding box with the given index.
index increases from left to right, then from bottom to top. lower-left box is index 0.
Returns
-------
bbox : bag.layout.util.BBox
the bounding box with the given index.
"""
row_idx, col_idx = divmod(idx, self._nx)
return self._bbox.transform(loc=(col_idx * self._spx_unit,
row_idx * self._spy_unit), unit_mode=True)
def get_overall_bbox(self):
"""Returns the overall bounding box of this BBoxArray.
Returns
-------
overall_bbox : bag.layout.util.BBox
the overall bounding box of this BBoxArray.
"""
return BBox(self.left_unit, self.bottom_unit, self.right_unit, self.top_unit,
self._bbox.resolution, unit_mode=True)
def move_by(self, dx=0, dy=0, unit_mode=False):
# type: (Union[float, int], Union[float, int], bool) -> BBoxArray
"""Returns a new BBox shifted by the given amount.
Parameters
----------
dx : float
shift in X direction.
dy : float
shift in Y direction.
unit_mode : bool
True if shifts are given in resolution units
Returns
-------
box_arr : BBoxArray
the new BBoxArray.
"""
return self.transform((dx, dy), unit_mode=unit_mode)
def transform(self, loc=(0, 0), orient='R0', unit_mode=False):
# type: (Tuple[Union[float, int], Union[float, int]], str, bool) -> BBoxArray
"""Returns a new BBoxArray under the given transformation.
rotates first before shift.
Parameters
----------
loc : Tuple[Union[float, int], Union[float, int]]
location of the anchor.
orient : str
the orientation of the bounding box.
unit_mode : bool
True if location is given in resolution units
Returns
-------
box_arr : BBoxArray
the new BBoxArray.
"""
if unit_mode:
dx, dy = loc[0], loc[1]
else:
res = self._bbox.resolution
dx = int(round(loc[0] / res))
dy = int(round(loc[1] / res))
if orient == 'R0':
left = self.left_unit + dx
bottom = self.bottom_unit + dy
elif orient == 'MX':
left = self.left_unit + dx
bottom = -self.top_unit + dy
elif orient == 'MY':
left = -self.right_unit + dx
bottom = self.bottom_unit + dy
elif orient == 'R180':
left = -self.right_unit + dx
bottom = -self.top_unit + dy
else:
raise ValueError('Invalid orientation: ' + orient)
# no 90 degree-ish rotation; width and height will not interchange
new_base = BBox(left, bottom, left + self._bbox.width_unit,
bottom + self._bbox.height_unit, self._bbox.resolution,
unit_mode=True)
return BBoxArray(new_base, nx=self._nx, ny=self._ny,
spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)
def arrayed_copies(self, nx=1, ny=1, spx=0, spy=0, unit_mode=False):
# type: (int, int, Union[float, int], Union[float, int], bool) -> 'BBoxCollection'
"""Returns a BBoxCollection containing arrayed copies of this BBoxArray
Parameters
----------
nx : int
number of copies in horizontal direction.
ny : int
number of copies in vertical direction.
spx : Union[float, int]
pitch in horizontal direction.
spy : Union[float, int]
pitch in vertical direction.
unit_mode : bool
True if location is given in resolution units
Returns
-------
bcol : :class:`bag.layout.util.BBoxCollection`
a BBoxCollection of the arrayed copies.
"""
if not unit_mode:
res = self._bbox.resolution
spx = int(round(spy / res))
spy = int(round(spy / res))
x_info = self._array_helper(nx, spx, self.nx, self._spx_unit)
y_info = self._array_helper(ny, spy, self.ny, self._spy_unit)
base = self.base
barr_list = [BBoxArray(base.move_by(dx, dy, unit_mode=True), nx=new_nx, ny=new_ny,
spx=new_spx, spy=new_spy)
for new_nx, new_spx, dx in zip(*x_info)
for new_ny, new_spy, dy in zip(*y_info)]
return BBoxCollection(barr_list)
@staticmethod
def _array_helper(n1, sp1, n2, sp2):
if n1 == 1:
return [n2], [sp2], [0]
elif n2 == 1:
return [n1], [sp1], [0]
elif sp1 == sp2 * n2:
return [n1 * n2], [sp2], [0]
elif sp2 == sp1 * n1:
return [n1 * n2], [sp1], [0]
else:
# no way to express as single array
if n1 < n2 or (n1 == n2 and sp2 < sp1):
return [n2] * n1, [sp2] * n1, list(range(0, sp1 * n1, sp1))
else:
return [n1] * n2, [sp1] * n2, list(range(0, sp2 * n2, sp2))
def __str__(self):
return repr(self)
def __repr__(self):
precision = max(1, -1 * int(np.floor(np.log10(self._bbox.resolution))))
fmt_str = '%s(%s, %d, %d, %.{0}f, %.{0}f)'.format(precision)
return fmt_str % (self.__class__.__name__, self._bbox, self._nx,
self._ny, self.spx, self.spy)
class BBoxCollection(object):
"""A collection of bounding boxes.
To support efficient computation, this class stores bounding boxes as a list of
BBoxArray objects.
Parameters
----------
box_arr_list : list[bag.layout.util.BBoxArray]
list of BBoxArrays in this collections.
"""
def __init__(self, box_arr_list):
self._box_arr_list = box_arr_list
def __iter__(self):
"""Iterates over all BBoxArray in this collection."""
return self._box_arr_list.__iter__()
def __reversed__(self):
return self._box_arr_list.__reversed__()
def __len__(self):
return len(self._box_arr_list)
def as_bbox_array(self):
"""Attempt to cast this BBoxCollection into a BBoxArray.
Returns
-------
bbox_arr : bag.layout.util.BBoxArray
the BBoxArray object that's equivalent to this BBoxCollection.
Raises
------
Exception :
if this BBoxCollection cannot be cast into a BBoxArray.
"""
if len(self._box_arr_list) != 1:
raise Exception('Unable to cast this BBoxCollection into a BBoxArray.')
return self._box_arr_list[0]
def as_bbox(self):
"""Attempt to cast this BBoxCollection into a BBox.
Returns
-------
bbox : bag.layout.util.BBox
the BBox object that's equivalent to this BBoxCollection.
Raises
------
Exception :
if this BBoxCollection cannot be cast into a BBox.
"""
if len(self._box_arr_list) != 1:
raise Exception('Unable to cast this BBoxCollection into a BBoxArray.')
box_arr = self._box_arr_list[0]
if box_arr.nx != 1 or box_arr.ny != 1:
raise Exception('Unable to cast this BBoxCollection into a BBoxArray.')
return box_arr.base
def get_bounding_box(self):
"""Returns the bounding box that encloses all boxes in this collection.
Returns
-------
bbox : bag.layout.util.BBox
the bounding box of this BBoxCollection.
"""
box = BBox.get_invalid_bbox()
for box_arr in self._box_arr_list:
all_box = BBox(box_arr.left, box_arr.bottom, box_arr.right, box_arr.top,
box_arr.base.resolution)
box = box.merge(all_box)
return box
def transform(self, loc=(0, 0), orient='R0'):
"""Returns a new BBoxCollection under the given transformation.
rotates first before shift.
Parameters
----------
loc : (float, float)
location of the anchor.
orient : str
the orientation of the bounding box.
Returns
-------
box_collection : bag.layout.util.BBoxCollection
the new BBoxCollection.
"""
new_list = [box_arr.transform(loc=loc, orient=orient) for box_arr in self._box_arr_list]
return BBoxCollection(new_list)
def __str__(self):
return repr(self)
def __repr__(self):
return pprint.pformat(self._box_arr_list)
class Pin(object):
"""A layout pin.
Multiple pins can share the same terminal name.
Parameters
----------
pin_name : str
the pin label.
term_name : str
the terminal name.
layer : str
the pin layer name.
bbox : bag.layout.util.BBox
the pin bounding box.
"""
def __init__(self, pin_name, term_name, layer, bbox):
if not bbox.is_physical():
raise Exception('Non-physical pin bounding box: %s' % bbox)
self._pin_name = pin_name
self._term_name = term_name
self._layer = layer
self._bbox = bbox
@property
def pin_name(self):
"""the pin label."""
return self._pin_name
@property
def term_name(self):
"""the terminal name."""
return self._term_name
@property
def layer(self):
"""the pin layer name"""
return self._layer
@property
def bbox(self):
"""the pin bounding box."""
return self._bbox
def __str__(self):
return repr(self)
def __repr__(self):
return '%s(%s, %s, %s, %s)' % (self.__class__.__name__, self._pin_name,
self._term_name, self._layer, self._bbox)
================================================
FILE: bag/math/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/math/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package defines design template classes.
"""
from typing import Iterable
import numpy as np
from . import interpolate
__all__ = ['lcm', 'gcd', 'interpolate', 'float_to_si_string', 'si_string_to_float']
si_mag = [-18, -15, -12, -9, -6, -3, 0, 3, 6, 9, 12]
si_pre = ['a', 'f', 'p', 'n', 'u', 'm', '', 'k', 'M', 'G', 'T']
def float_to_si_string(num, precision=6):
"""Converts the given floating point number to a string using SI prefix.
Parameters
----------
num : float
the number to convert.
precision : int
number of significant digits, defaults to 6.
Returns
-------
ans : str
the string representation of the given number using SI suffix.
"""
if abs(num) < 1e-21:
return '0'
exp = np.log10(abs(num))
pre_idx = len(si_mag) - 1
for idx in range(len(si_mag)):
if exp < si_mag[idx]:
pre_idx = idx - 1
break
fmt = '%%.%dg%%s' % precision
res = 10.0 ** (si_mag[pre_idx])
return fmt % (num / res, si_pre[pre_idx])
def si_string_to_float(si_str):
"""Converts the given string with SI prefix to float.
Parameters
----------
si_str : str
the string to convert
Returns
-------
ans : float
the floating point value of the given string.
"""
if si_str[-1] in si_pre:
idx = si_pre.index(si_str[-1])
return float(si_str[:-1]) * 10**si_mag[idx]
else:
return float(si_str)
def gcd(a, b):
# type: (int, int) -> int
"""Compute greatest common divisor of two positive integers.
Parameters
----------
a : int
the first number.
b : int
the second number.
Returns
-------
ans : int
the greatest common divisor of the two given integers.
"""
while b:
a, b = b, a % b
return a
def lcm(arr, init=1):
# type: (Iterable[int], int) -> int
"""Compute least common multiple of all numbers in the given list.
Parameters
----------
arr : Iterable[int]
a list of integers.
init : int
the initial LCM. Defaults to 1.
Returns
-------
ans : int
the least common multiple of all the given numbers.
"""
cur_lcm = init
for val in arr:
cur_lcm = cur_lcm * val // gcd(cur_lcm, val)
return cur_lcm
================================================
FILE: bag/math/dfun.py
================================================
# -*- coding: utf-8 -*-
"""This module defines the differentiable function class."""
from typing import Union, List, Optional, Tuple
import abc
import numpy as np
class DiffFunction(abc.ABC):
"""An abstract class representing a differentiable scalar function.
Supports Numpy broadcasting. Defaults to using finite difference for derivative calculation.
Parameters
----------
input_ranges : List[Tuple[Optional[float], Optional[float]]]
input ranges.
delta_list : Optional[List[float]]
a list of finite difference step size for each input. If None,
finite difference will be disabled.
"""
def __init__(self, input_ranges, delta_list=None):
# type: (List[Tuple[Optional[float], Optional[float]]], Optional[List[float]]) -> None
# error checking
self._ndim = len(input_ranges)
if delta_list is not None and len(delta_list) != self._ndim:
raise ValueError('finite difference list length inconsistent.')
self._input_ranges = input_ranges
self.delta_list = delta_list # type: Optional[List[float]]
@property
def input_ranges(self):
# type: () -> List[Tuple[Optional[float], Optional[float]]]
return self._input_ranges
@property
def ndim(self):
# type: () -> int
"""Number of input dimensions."""
return self._ndim
@abc.abstractmethod
def __call__(self, xi):
"""Interpolate at the given coordinates.
Numpy broadcasting rules apply.
Parameters
----------
xi : array_like
The coordinates to evaluate, with shape (..., ndim)
Returns
-------
val : np.multiarray.ndarray
The interpolated values at the given coordinates.
"""
raise NotImplementedError('Not implemented')
def get_input_range(self, idx):
# type: (int) -> Tuple[Optional[float], Optional[float]]
"""Returns the input range of the given dimension."""
return self._input_ranges[idx]
def deriv(self, xi, j):
"""Calculate the derivative at the given coordinates with respect to input j.
Numpy broadcasting rules apply.
Parameters
----------
xi : array_like
The coordinates to evaluate, with shape (..., ndim)
j : int
input index.
Returns
-------
val : np.multiarray.ndarray
The derivatives at the given coordinates.
"""
return self._fd(xi, j, self.delta_list[j])
def jacobian(self, xi):
"""Calculate the Jacobian at the given coordinates.
Numpy broadcasting rules apply.
If finite difference step sizes are not specified,
will call deriv() in a for loop to compute the Jacobian.
Parameters
----------
xi : array_like
The coordinates to evaluate, with shape (..., ndim)
Returns
-------
val : np.multiarray.ndarray
The Jacobian matrices at the given coordinates.
"""
if self.delta_list:
return self._fd_jacobian(xi, self.delta_list)
else:
xi = np.asarray(xi, dtype=float)
ans = np.empty(xi.shape)
for n in range(self.ndim):
ans[..., n] = self.deriv(xi, n)
return ans
def _fd(self, xi, idx, delta):
"""Calculate the derivative along the given index using central finite difference.
Parameters
----------
xi : array_like
The coordinates to evaluate, with shape (..., ndim)
idx : int
The index to calculate the derivative on.
delta : float
The finite difference step size.
Returns
-------
val : np.multiarray.ndarray
The derivatives at the given coordinates.
"""
if idx < 0 or idx >= self.ndim:
raise ValueError('Invalid derivative index: %d' % idx)
xi = np.asarray(xi, dtype=float)
if xi.shape[-1] != self.ndim:
raise ValueError("The requested sample points xi have dimension %d, "
"but this interpolator has dimension %d" % (xi.shape[-1], self.ndim))
# use broadcasting to evaluate two points at once
xtest = np.broadcast_to(xi, (2,) + xi.shape).copy()
xtest[0, ..., idx] += delta / 2.0
xtest[1, ..., idx] -= delta / 2.0
val = self(xtest)
ans = (val[0] - val[1]) / delta # type: np.ndarray
if ans.size == 1 and not np.isscalar(ans):
return ans[0]
return ans
def _fd_jacobian(self, xi, delta_list):
"""Calculate the Jacobian matrix using central finite difference.
Parameters
----------
xi : array_like
The coordinates to evaluate, with shape (..., ndim)
delta_list : List[float]
list of finite difference step sizes for each input.
Returns
-------
val : np.multiarray.ndarray
The Jacobian matrices at the given coordinates.
"""
xi = np.asarray(xi, dtype=float)
if xi.shape[-1] != self.ndim:
raise ValueError("The requested sample points xi have dimension %d, "
"but this interpolator has dimension %d" % (xi.shape[-1], self.ndim))
# use broadcasting to evaluate all points at once
xtest = np.broadcast_to(xi, (2 * self.ndim,) + xi.shape).copy()
for idx, delta in enumerate(delta_list):
xtest[2 * idx, ..., idx] += delta / 2.0
xtest[2 * idx + 1, ..., idx] -= delta / 2.0
val = self(xtest)
ans = np.empty(xi.shape)
for idx, delta in enumerate(delta_list):
ans[..., idx] = (val[2 * idx, ...] - val[2 * idx + 1, ...]) / delta
return ans
def transform_input(self, amat, bmat):
# type: (np.multiarray.ndarray, np.multiarray.ndarray) -> DiffFunction
"""Returns f(Ax + B), where f is this function and A, B are matrices.
Parameters
----------
amat : np.multiarray.ndarray
the input transform matrix.
bmat : np.multiarray.ndarray
the input shift matrix.
Returns
-------
dfun : DiffFunction
a scalar differential function.
"""
return InLinTransformFunction(self, amat, bmat)
def __add__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
if isinstance(other, DiffFunction):
return SumDiffFunction(self, other, f2_sgn=1.0)
elif isinstance(other, float) or isinstance(other, int):
return ScaleAddFunction(self, other, 1.0)
elif isinstance(other, np.ndarray):
return ScaleAddFunction(self, np.asscalar(other), 1.0)
else:
raise NotImplementedError('Unknown type %s' % type(other))
def __radd__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
return self.__add__(other)
def __sub__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
if isinstance(other, DiffFunction):
return SumDiffFunction(self, other, f2_sgn=-1.0)
elif isinstance(other, float) or isinstance(other, int):
return ScaleAddFunction(self, -other, 1.0)
elif isinstance(other, np.ndarray):
return ScaleAddFunction(self, -np.asscalar(other), 1.0)
else:
raise NotImplementedError('Unknown type %s' % type(other))
def __rsub__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
if isinstance(other, DiffFunction):
return SumDiffFunction(other, self, f2_sgn=-1.0)
elif isinstance(other, float) or isinstance(other, int):
return ScaleAddFunction(self, other, -1.0)
elif isinstance(other, np.ndarray):
return ScaleAddFunction(self, np.asscalar(other), -1.0)
else:
raise NotImplementedError('Unknown type %s' % type(other))
def __mul__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
if isinstance(other, DiffFunction):
return ProdFunction(self, other)
elif isinstance(other, float) or isinstance(other, int):
return ScaleAddFunction(self, 0.0, other)
elif isinstance(other, np.ndarray):
return ScaleAddFunction(self, 0.0, np.asscalar(other))
else:
raise NotImplementedError('Unknown type %s' % type(other))
def __rmul__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
return self.__mul__(other)
def __pow__(self, other):
# type: (Union[float, int, np.multiarray.ndarray]) -> DiffFunction
if isinstance(other, float) or isinstance(other, int):
return PwrFunction(self, other, scale=1.0)
elif isinstance(other, np.ndarray):
return PwrFunction(self, np.asscalar(other), scale=1.0)
else:
raise NotImplementedError('Unknown type %s' % type(other))
def __div__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
if isinstance(other, DiffFunction):
return DivFunction(self, other)
elif isinstance(other, float) or isinstance(other, int):
return ScaleAddFunction(self, 0.0, 1.0 / other)
elif isinstance(other, np.ndarray):
return ScaleAddFunction(self, 0.0, 1.0 / np.asscalar(other))
else:
raise NotImplementedError('Unknown type %s' % type(other))
def __truediv__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
return self.__div__(other)
def __rdiv__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
if isinstance(other, DiffFunction):
return DivFunction(other, self)
elif isinstance(other, float) or isinstance(other, int):
return PwrFunction(self, -1.0, scale=other)
elif isinstance(other, np.ndarray):
return PwrFunction(self, -1.0, scale=np.asscalar(other))
else:
raise NotImplementedError('Unknown type %s' % type(other))
def __rtruediv__(self, other):
# type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction
return self.__rdiv__(other)
def __neg__(self):
# type: () -> DiffFunction
return ScaleAddFunction(self, 0.0, -1.0)
class InLinTransformFunction(DiffFunction):
"""A DiffFunction where the input undergoes a linear transformation first.
This function computes f(Ax + B), where A and B are matrices.
Parameters
----------
f1 : DiffFunction
the parent function.
amat : np.multiarray.ndarray
the input transform matrix.
bmat : np.multiarray.ndarray
the input shift matrix.
"""
def __init__(self, f1, amat, bmat):
# type: (DiffFunction, np.multiarray.ndarray, np.multiarray.ndarray) -> None
if amat.shape[0] != f1.ndim or bmat.shape[0] != f1.ndim:
raise ValueError('amat/bmat number of rows must be %d' % f1.ndim)
if len(bmat.shape) != 1:
raise ValueError('bmat must be 1 dimension.')
# domain of f(Ax+B) cannot be represented by input ranges.
super(InLinTransformFunction, self).__init__([(None, None)] * amat.shape[1], delta_list=None)
self._f1 = f1
self._amat = amat
self._bmat = bmat.reshape(-1, 1)
def _get_arg(self, xi):
xi = np.asarray(xi)
xi_shape = xi.shape
my_ndim = self.ndim
if xi_shape[-1] != my_ndim:
raise ValueError('Last dimension must have size %d' % my_ndim)
xi = xi.reshape(-1, my_ndim)
return (self._amat.dot(xi.T) + self._bmat).T, xi_shape
def __call__(self, xi):
farg, xi_shape = self._get_arg(xi)
result = self._f1(farg)
if np.isscalar(result):
return result
return result.reshape(xi_shape[:-1])
def deriv(self, xi, j):
jmat = self.jacobian(xi)
return jmat[..., 0, j]
def jacobian(self, xi):
farg, xi_shape = self._get_arg(xi)
jmat = self._f1.jacobian(farg).dot(self._amat)
shape_trunc = xi_shape[:-1] # type: Tuple[int, ...]
return jmat.reshape(shape_trunc + (1, self.ndim))
class ScaleAddFunction(DiffFunction):
"""A DiffFunction multiply by a scalar then added to a scalar.
Parameters
----------
f1 : DiffFunction
the first function.
adder : float
constant to add.
scaler : float
constant to multiply.
"""
def __init__(self, f1, adder, scaler):
# type: (DiffFunction, float, float) -> None
DiffFunction.__init__(self, f1.input_ranges, delta_list=None)
self._f1 = f1
self._adder = adder
self._scaler = scaler
def __call__(self, xi):
return self._f1(xi) * self._scaler + self._adder
def deriv(self, xi, j):
return self._f1.deriv(xi, j) * self._scaler
def jacobian(self, xi):
return self._f1.jacobian(xi) * self._scaler
def _intersection(*args):
input_ranges = []
for bound_list in zip(*args):
lmax, umin = None, None
for l, u in bound_list:
if l is None:
lmax, umin = None, None
break
else:
if lmax is None:
lmax, umin = l, u
else:
lmax = max(l, lmax)
umin = min(u, umin)
input_ranges.append((lmax, umin))
return input_ranges
class SumDiffFunction(DiffFunction):
"""Sum or Difference of two DiffFunctions
Parameters
----------
f1 : DiffFunction
the first function.
f2 : DiffFunction
the second function.
f2_sgn : float
1 if adding, -1 if subtracting.
"""
def __init__(self, f1, f2, f2_sgn=1.0):
# type: (DiffFunction, DiffFunction, float) -> None
if f1.ndim != f2.ndim:
raise ValueError('functions dimension mismatch.')
DiffFunction.__init__(self, _intersection(f1.input_ranges, f2.input_ranges), delta_list=None)
self._f1 = f1
self._f2 = f2
self._f2_sgn = f2_sgn
def __call__(self, xi):
return self._f1(xi) + self._f2_sgn * self._f2(xi)
def deriv(self, xi, j):
return self._f1.deriv(xi, j) + self._f2_sgn * self._f2.deriv(xi, j)
def jacobian(self, xi):
return self._f1.jacobian(xi) + self._f2_sgn * self._f2.jacobian(xi)
class ProdFunction(DiffFunction):
"""product of two DiffFunctions
Parameters
----------
f1 : DiffFunction
the first function.
f2 : DiffFunction
the second function.
"""
def __init__(self, f1, f2):
# type: (DiffFunction, DiffFunction) -> None
if f1.ndim != f2.ndim:
raise ValueError('functions dimension mismatch.')
DiffFunction.__init__(self, _intersection(f1.input_ranges, f2.input_ranges), delta_list=None)
self._f1 = f1
self._f2 = f2
def __call__(self, xi):
return self._f1(xi) * self._f2(xi)
def deriv(self, xi, j):
return self._f1.deriv(xi, j) * self._f2(xi) + self._f1(xi) * self._f2.deriv(xi, j)
def jacobian(self, xi):
f1_val = self._f1(xi)[..., np.newaxis]
f2_val = self._f2(xi)[..., np.newaxis]
f1_jac = self._f1.jacobian(xi)
f2_jac = self._f2.jacobian(xi)
return f1_jac * f2_val + f1_val * f2_jac
class DivFunction(DiffFunction):
"""division of two DiffFunctions
Parameters
----------
f1 : DiffFunction
the first function.
f2 : DiffFunction
the second function.
"""
def __init__(self, f1, f2):
# type: (DiffFunction, DiffFunction) -> None
if f1.ndim != f2.ndim:
raise ValueError('functions dimension mismatch.')
DiffFunction.__init__(self, _intersection(f1.input_ranges, f2.input_ranges), delta_list=None)
self._f1 = f1
self._f2 = f2
def __call__(self, xi):
return self._f1(xi) / self._f2(xi)
def deriv(self, xi, j):
f2_val = self._f2(xi)
return self._f1.deriv(xi, j) / f2_val - (self._f1(xi) * self._f2.deriv(xi, j) / (f2_val**2))
def jacobian(self, xi):
f1_val = self._f1(xi)[..., np.newaxis]
f2_val = self._f2(xi)[..., np.newaxis]
f1_jac = self._f1.jacobian(xi)
f2_jac = self._f2.jacobian(xi)
return f1_jac / f2_val - (f1_val * f2_jac) / (f2_val**2)
class PwrFunction(DiffFunction):
"""a DiffFunction raised to a power.
Parameters
----------
f : DiffFunction
the DiffFunction.
pwr : float
the power.
scale : float
scaling factor. Used to implement a / x.
"""
def __init__(self, f, pwr, scale=1.0):
# type: (DiffFunction, float, float) -> None
DiffFunction.__init__(self, f.input_ranges, delta_list=None)
self._f = f
self._pwr = pwr
self._scale = scale
def __call__(self, xi):
return (self._f(xi) ** self._pwr) * self._scale
def deriv(self, xi, j):
return (self._f(xi) ** (self._pwr - 1) * self._pwr * self._f.deriv(xi, j)) * self._scale
def jacobian(self, xi):
f_val = self._f(xi)[..., np.newaxis]
f_jac = self._f.jacobian(xi)
return (f_jac * (f_val ** (self._pwr - 1) * self._pwr)) * self._scale
class VectorDiffFunction(object):
"""A differentiable vector function.
Parameters
----------
fun_list : List[DiffFunction]
list of interpolator functions, one for each element of the output vector.
"""
def __init__(self, fun_list):
# type: (List[DiffFunction]) -> None
# error checking
if not fun_list:
raise ValueError('No interpolators are given.')
self._input_ranges = _intersection(*(f.input_ranges for f in fun_list))
self._in_dim = fun_list[0].ndim
for fun in fun_list:
if fun.ndim != self._in_dim:
raise ValueError('Interpolators input dimension mismatch.')
self._fun_list = fun_list
self._out_dim = len(fun_list)
@property
def in_dim(self):
# type: () -> int
"""Input dimension number."""
return self._in_dim
@property
def out_dim(self):
# type: () -> int
"""Output dimension number."""
return self._out_dim
def get_input_range(self, idx):
# type: (int) -> Tuple[Optional[float], Optional[float]]
"""Returns the input range of the given dimension."""
return self._input_ranges[idx]
def __call__(self, xi):
"""Returns the output vector at the given coordinates.
Parameters
----------
xi : array-like
The coordinates to evaluate, with shape (..., ndim)
Returns
-------
val : numpy.array
The interpolated values at the given coordinates.
"""
xi = np.asarray(xi, dtype=float)
shape_trunc = xi.shape[:-1] # type: Tuple[int, ...]
ans = np.empty(shape_trunc + (self._out_dim, ))
for idx in range(self._out_dim):
ans[..., idx] = self._fun_list[idx](xi)
return ans
def jacobian(self, xi):
"""Calculate the Jacobian matrices of this function at the given coordinates.
Parameters
----------
xi : array-like
The coordinates to evaluate, with shape (..., ndim)
Returns
-------
val : numpy.array
The jacobian matrix at the given coordinates.
"""
xi = np.asarray(xi, dtype=float)
shape_trunc = xi.shape[:-1] # type: Tuple[int, ...]
ans = np.empty(shape_trunc + (self._out_dim, self._in_dim))
for m in range(self._out_dim):
ans[..., m, :] = self._fun_list[m].jacobian(xi)
return ans
def deriv(self, xi, i, j):
"""Compute the derivative of output i with respect to input j
Parameters
----------
xi : array-like
The coordinates to evaluate, with shape (..., ndim)
i : int
output index.
j : int
input index.
Returns
-------
val : numpy.array
The derivatives at the given coordinates.
"""
return self._fun_list[i].deriv(xi, j)
================================================
FILE: bag/math/interpolate.py
================================================
# -*- coding: utf-8 -*-
"""This module defines various interpolation classes.
"""
from typing import List, Tuple, Union, Sequence, Optional
import numpy as np
import scipy.interpolate as interp
import scipy.ndimage.interpolation as imag_interp
from ..math.dfun import DiffFunction
__author__ = 'erichang'
__all__ = ['interpolate_grid', 'LinearInterpolator']
def _scales_to_points(scale_list, values, delta=1e-4):
# type: (List[Tuple[float, float]], np.multiarray.ndarray, float) -> Tuple[List[np.multiarray.ndarray], List[float]]
"""convert scale_list to list of point values and finite difference deltas."""
ndim = len(values.shape)
# error checking
if ndim == 1:
raise ValueError('This class only works for dimension >= 2.')
elif ndim != len(scale_list):
raise ValueError('input and output dimension mismatch.')
points = []
delta_list = []
for idx in range(ndim):
num_pts = values.shape[idx] # type: int
if num_pts < 2:
raise ValueError('Every dimension must have at least 2 points.')
offset, scale = scale_list[idx]
points.append(np.linspace(offset, (num_pts - 1) * scale + offset, num_pts))
delta_list.append(scale * delta)
return points, delta_list
def interpolate_grid(scale_list, values, method='spline',
extrapolate=False, delta=1e-4, num_extrapolate=3):
# type: (List[Tuple[float, float]], np.multiarray.ndarray, str, bool, float, int) -> DiffFunction
"""Interpolates multidimensional data on a regular grid.
returns an Interpolator for the given dataset.
Parameters
----------
scale_list : List[Tuple[float, float]]
a list of (offset, spacing).
values : np.multiarray.ndarray
The output data in N dimensions. The length in each dimension must
be at least 2.
method : str
The interpolation method. Either 'linear', or 'spline'.
Defaults to 'spline'.
extrapolate : bool
True to extrapolate data output of given bounds. Defaults to False.
delta : float
the finite difference step size. Finite difference is only used for
linear interpolation and spline interpolation on 3D data or greater.
Defaults to 1e-4 of the grid spacing.
num_extrapolate: int
If spline interpolation is selected on 3D data or greater, we linearly
extrapolate the given data by this many points to fix behavior near
input boundaries.
Returns
-------
fun : DiffFunction
the interpolator function.
"""
ndim = len(values.shape)
if method == 'linear':
points, delta_list = _scales_to_points(scale_list, values, delta)
return LinearInterpolator(points, values, delta_list, extrapolate=extrapolate)
elif ndim == 1:
return Interpolator1D(scale_list, values, method=method, extrapolate=extrapolate)
elif method == 'spline':
if ndim == 2:
return Spline2D(scale_list, values, extrapolate=extrapolate)
else:
return MapCoordinateSpline(scale_list, values, delta=delta, extrapolate=extrapolate,
num_extrapolate=num_extrapolate)
else:
raise ValueError('Unsupported interpolation method: %s' % method)
class LinearInterpolator(DiffFunction):
"""A linear interpolator on a regular grid for arbitrary dimensions.
This class is backed by scipy.interpolate.RegularGridInterpolator.
Derivatives are calculated using finite difference.
Parameters
----------
points : Sequence[np.multiarray.ndarray]
list of points of each dimension.
values : np.multiarray.ndarray
The output data in N dimensions.
delta_list : List[float]
list of finite difference step size for each axis.
extrapolate : bool
True to extrapolate data output of given bounds. Defaults to False.
"""
def __init__(self, points, values, delta_list, extrapolate=False):
# type: (Sequence[np.multiarray.ndarray], np.multiarray.ndarray, List[float], bool) -> None
input_range = [(pvec[0], pvec[-1]) for pvec in points]
DiffFunction.__init__(self, input_range, delta_list=delta_list)
self._points = points
self._extrapolate = extrapolate
self.fun = interp.RegularGridInterpolator(points, values, method='linear',
bounds_error=not extrapolate,
fill_value=None)
def get_input_points(self, idx):
# type: (int) -> np.multiarray.ndarray
"""Returns the input points for the given dimension."""
return self._points[idx]
def __call__(self, xi):
"""Interpolate at the given coordinate.
Parameters
----------
xi : numpy.array
The coordinates to evaluate, with shape (..., ndim)
Returns
-------
val : numpy.array
The interpolated values at the given coordinates.
"""
ans = self.fun(xi)
if ans.size == 1:
return ans[0]
return ans
def integrate(self, xstart, xstop, axis=-1, logx=False, logy=False, raw=False):
# type: (float, float, int, bool, bool, bool) -> Union[LinearInterpolator, np.ndarray]
"""Integrate away the given axis.
if logx/logy is True, that means this LinearInterpolator is actually used
to do linear interpolation on the logarithm of the actual data. This method
will returns the integral of the actual data.
Parameters
----------
xstart : float
the X start value.
xstop : float
the X stop value.
axis : int
the axis of integration.
If unspecified, this will be the last axis.
logx : bool
True if the values on the given axis are actually the logarithm of
the real values.
logy : bool
True if the Y values are actually the logarithm of the real values.
raw : bool
True to return the raw data points instead of a LinearInterpolator object.
Returns
-------
result : Union[LinearInterpolator, np.ndarray]
float if this interpolator has only 1 dimension, otherwise a new
LinearInterpolator is returned.
"""
if self.delta_list is None:
raise ValueError("Finite differences must be enabled")
if logx != logy:
raise ValueError('Currently only works for linear or log-log relationship.')
ndim = self.ndim
if axis < 0:
axis = ndim - 1
if axis < 0 or axis >= ndim:
raise IndexError('index out of range.')
if len(self._points) < ndim:
raise ValueError("len(self._points) != ndim")
def calculate_integ_x() -> np.ndarray:
# find data points between xstart and xstop
vec = self._points[axis]
start_idx, stop_idx = np.searchsorted(vec, [xstart, xstop])
cur_len = stop_idx - start_idx
if vec[start_idx] > xstart:
cur_len += 1
istart = 1
else:
istart = 0
if vec[stop_idx - 1] < xstop:
cur_len += 1
istop = cur_len - 1
else:
istop = cur_len
integ_x = np.empty(cur_len)
integ_x[istart:istop] = vec[start_idx:stop_idx]
if istart != 0:
integ_x[0] = xstart
if istop != cur_len:
integ_x[cur_len - 1] = xstop
return integ_x
# get all input sample points we need to integrate.
plist = []
integ_x = calculate_integ_x() # type: np.ndarray
new_points = []
new_deltas = []
for axis_idx, vec in enumerate(self._points):
if axis == axis_idx:
plist.append(integ_x)
else:
plist.append(vec)
new_points.append(vec)
new_deltas.append(self.delta_list[axis_idx])
fun_arg = np.stack(np.meshgrid(*plist, indexing='ij'), axis=-1)
values = self.fun(fun_arg)
if logx:
if axis != ndim - 1:
# transpose values so that broadcasting/slicing is easier
new_order = [idx for idx in range(ndim) if idx != axis]
new_order.append(axis)
values = np.transpose(values, axes=new_order)
# integrate given that log-log plot is piece-wise linear
ly1 = values[..., :-1]
ly2 = values[..., 1:]
lx1 = np.broadcast_to(integ_x[:-1], ly1.shape)
lx2 = np.broadcast_to(integ_x[1:], ly1.shape)
m = (ly2 - ly1) / (lx2 - lx1)
x1 = np.exp(lx1)
y1 = np.exp(ly1)
scale = y1 / np.power(x1, m)
log_idx = np.abs(m + 1) < 1e-6
log_idxb = np.invert(log_idx)
area = np.empty(m.shape)
area[log_idx] = scale[log_idx] * (lx2[log_idx] - lx1[log_idx])
mp1 = m[log_idxb] + 1
x2 = np.exp(lx2[log_idxb])
area[log_idxb] = scale[log_idxb] / mp1 * (np.power(x2, mp1) - np.power(x1[log_idxb], mp1))
new_values = np.sum(area, axis=-1) # type: np.multiarray.ndarray
else:
# just use trapezoid integration
new_values = np.trapz(values, x=integ_x, axis=axis) # type: np.multiarray.ndarray
if not raw and new_points:
return LinearInterpolator(new_points, new_values, new_deltas, extrapolate=self._extrapolate)
else:
return new_values
class Interpolator1D(DiffFunction):
"""An interpolator on a regular grid for 1 dimensional data.
This class is backed by scipy.interpolate.InterpolatedUnivariateSpline.
Parameters
----------
scale_list : list[(float, float)]
a list of (offset, spacing) for each input dimension.
values : numpy.array
The output data. Must be 1 dimension.
method : str
extrapolation method. Either 'linear' or 'spline'. Defaults to spline.
extrapolate : bool
True to extrapolate data output of given bounds. Defaults to False.
"""
def __init__(self, scale_list, values, method='spline', extrapolate=False):
# error checking
if len(values.shape) != 1:
raise ValueError('This class only works for 1D data.')
elif len(scale_list) != 1:
raise ValueError('input and output dimension mismatch.')
if method == 'linear':
k = 1
elif method == 'spline':
k = 3
else:
raise ValueError('Unsuppoorted interpolation method: %s' % method)
offset, scale = scale_list[0]
num_pts = values.shape[0]
points = np.linspace(offset, (num_pts - 1) * scale + offset, num_pts) # type: np.multiarray.ndarray
DiffFunction.__init__(self, [(points[0], points[-1])], delta_list=None)
ext = 0 if extrapolate else 2
self.fun = interp.InterpolatedUnivariateSpline(points, values, k=k, ext=ext)
def __call__(self, xi):
"""Interpolate at the given coordinate.
Parameters
----------
xi : numpy.array
The coordinates to evaluate, with shape (..., ndim)
Returns
-------
val : numpy.array
The interpolated values at the given coordinates.
"""
ans = self.fun(xi)
if ans.size == 1:
return ans[0]
return ans
def deriv(self, xi, idx):
"""Calculate the derivative of the spline along the given index.
Parameters
----------
xi : numpy.array
The coordinates to evaluate, with shape (..., ndim)
idx : int
The index to calculate the derivative on.
Returns
-------
val : numpy.array
The derivatives at the given coordinates.
"""
if idx != 0:
raise ValueError('Invalid derivative index: %d' % idx)
ans = self.fun(xi, 1)
if ans.size == 1:
return ans[0]
return ans
class Spline2D(DiffFunction):
"""A spline interpolator on a regular grid for 2D data.
This class is backed by scipy.interpolate.RectBivariateSpline.
Parameters
----------
scale_list : list[(float, float)]
a list of (offset, spacing) for each input dimension.
values : numpy.array
The output data. Must be 2D.
extrapolate : bool
True to extrapolate data output of given bounds. Defaults to False.
"""
def __init__(self, scale_list, values, extrapolate=False):
# error checking
if len(values.shape) != 2:
raise ValueError('This class only works for 2D data.')
elif len(scale_list) != 2:
raise ValueError('input and output dimension mismatch.')
nx, ny = values.shape
offset, scale = scale_list[0]
x = np.linspace(offset, (nx - 1) * scale + offset, nx) # type: np.multiarray.ndarray
offset, scale = scale_list[1]
y = np.linspace(offset, (ny - 1) * scale + offset, ny) # type: np.multiarray.ndarray
self._min = x[0], y[0]
self._max = x[-1], y[-1]
DiffFunction.__init__(self, [(x[0], x[-1]), (y[0], y[-1])], delta_list=None)
self.fun = interp.RectBivariateSpline(x, y, values)
self._extrapolate = extrapolate
def _get_xy(self, xi):
"""Get X and Y array from given coordinates."""
xi = np.asarray(xi, dtype=float)
if xi.shape[-1] != 2:
raise ValueError("The requested sample points xi have dimension %d, "
"but this interpolator has dimension 2" % (xi.shape[-1]))
# check input within bounds.
x = xi[..., 0] # type: np.multiarray.ndarray
y = xi[..., 1] # type: np.multiarray.ndarray
if not self._extrapolate and not np.all((self._min[0] <= x) & (x <= self._max[0]) &
(self._min[1] <= y) & (y <= self._max[1])):
raise ValueError('some inputs are out of bounds.')
return x, y
def __call__(self, xi):
"""Interpolate at the given coordinates.
Parameters
----------
xi : numpy.array
The coordinates to evaluate, with shape (..., ndim)
Returns
-------
val : numpy.array
The interpolated values at the given coordinates.
"""
x, y = self._get_xy(xi)
return self.fun(x, y, grid=False)
def deriv(self, xi, idx):
"""Calculate the derivative of the spline along the given index.
Parameters
----------
xi : numpy.array
The coordinates to evaluate, with shape (..., ndim)
idx : int
The index to calculate the derivative on.
Returns
-------
val : numpy.array
The derivatives at the given coordinates.
"""
if idx < 0 or idx > 1:
raise ValueError('Invalid derivative index: %d' % idx)
x, y = self._get_xy(xi)
if idx == 0:
return self.fun(x, y, dx=1, grid=False)
else:
return self.fun(x, y, dy=1, grid=False)
class MapCoordinateSpline(DiffFunction):
"""A spline interpolator on a regular grid for multidimensional data.
The spline interpolation is done using map_coordinate method in the
scipy.ndimage.interpolation package. The derivative is done using
finite difference.
if extrapolate is True, we use linear interpolation for values outside of
bounds.
Note: By default, map_coordinate uses the nearest value for all points
outside the boundary. This will cause undesired interpolation
behavior near boundary points. To solve this, we linearly
extrapolates the given data for a fixed number of points.
Parameters
----------
scale_list : list[(float, float)]
a list of (offset, spacing) for each input dimension.
values : numpy.array
The output data.
extrapolate : bool
True to linearly extrapolate outside of bounds.
num_extrapolate : int
number of points to extrapolate in each dimension in each direction.
delta : float
the finite difference step size. Defaults to 1e-4 (relative to a spacing of 1).
"""
def __init__(self, scale_list, values, extrapolate=False, num_extrapolate=3,
delta=1e-4):
shape = values.shape
ndim = len(shape)
# error checking
if ndim < 3:
raise ValueError('Data must have 3 or more dimensions.')
elif ndim != len(scale_list):
raise ValueError('input and output dimension mismatch.')
self._scale_list = scale_list
self._max = [n - 1 + num_extrapolate for n in shape]
self._extrapolate = extrapolate
self._ext = num_extrapolate
# linearly extrapolate given values
ext_points = [np.arange(num_extrapolate, n + num_extrapolate) for n in shape]
points, delta_list = _scales_to_points(scale_list, values, delta)
input_ranges = [(pvec[0], pvec[-1]) for pvec in points]
self._extfun = LinearInterpolator(ext_points, values, [delta] * ndim, extrapolate=True)
xi_ext = np.stack(np.meshgrid(*(np.arange(0, n + 2 * num_extrapolate) for n in shape),
indexing='ij', copy=False), axis=-1)
values_ext = self._extfun(xi_ext)
self._filt_values = imag_interp.spline_filter(values_ext)
DiffFunction.__init__(self, input_ranges, delta_list=delta_list)
def _normalize_inputs(self, xi):
"""Normalize the inputs."""
xi = np.asarray(xi, dtype=float)
if xi.shape[-1] != self.ndim:
raise ValueError("The requested sample points xi have dimension %d, "
"but this interpolator has dimension %d" % (xi.shape[-1], self.ndim))
xi = np.atleast_2d(xi.copy())
for idx, (offset, scale) in enumerate(self._scale_list):
xi[..., idx] -= offset
xi[..., idx] /= scale
# take extension input account.
xi += self._ext
return xi
def __call__(self, xi):
"""Interpolate at the given coordinate.
Parameters
----------
xi : numpy.array
The coordinates to evaluate, with shape (..., ndim)
Returns
-------
val : numpy.array
The interpolated values at the given coordinates.
"""
ext = self._ext
ndim = self.ndim
xi = self._normalize_inputs(xi)
ans_shape = xi.shape[:-1]
xi = xi.reshape(-1, ndim)
ext_idx_vec = False
for idx in range(self.ndim):
ext_idx_vec = ext_idx_vec | (xi[:, idx] < ext) | (xi[:, idx] > self._max[idx])
int_idx_vec = ~ext_idx_vec
xi_ext = xi[ext_idx_vec, :]
xi_int = xi[int_idx_vec, :]
ans = np.empty(xi.shape[0])
ans[int_idx_vec] = imag_interp.map_coordinates(self._filt_values, xi_int.T, mode='nearest', prefilter=False)
if xi_ext.size > 0:
if not self._extrapolate:
raise ValueError('some inputs are out of bounds.')
ans[ext_idx_vec] = self._extfun(xi_ext)
if ans.size == 1:
return ans[0]
return ans.reshape(ans_shape)
================================================
FILE: bag/mdao/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/mdao/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package contains various openmdao related modules.
"""
================================================
FILE: bag/mdao/components.py
================================================
# -*- coding: utf-8 -*-
"""This module defines various OpenMDAO component classes.
"""
import numpy as np
import openmdao.api as omdao
class VecFunComponent(omdao.Component):
"""A component based on a list of functions.
A component that evaluates multiple functions on the given inputs, then
returns the result as an 1D array. Each of the inputs may be a scalar or
a vector with the same size as the output. If a vector input is given,
each function will use a different element of the vector.
Parameters
----------
output_name : str
output name.
fun_list : list[bag.math.dfun.DiffFunction]
list of interpolator functions, one for each dimension.
params : list[str]
list of parameter names. Parameter names may repeat, in which case the
same parameter will be used for multiple arguments of the function.
vector_params : set[str]
set of parameters that are vector instead of scalar. If a parameter
is a vector, it will be the same size as the output, and each function
only takes in the corresponding element of the parameter.
"""
def __init__(self, output_name, fun_list, params,
vector_params=None):
omdao.Component.__init__(self)
vector_params = vector_params or set()
self._output = output_name
self._out_dim = len(fun_list)
self._in_dim = len(params)
self._params = params
self._unique_params = {}
self._fun_list = fun_list
for par in params:
adj = par in vector_params
shape = self._out_dim if adj else 1
if par not in self._unique_params:
# linear check, but small list so should be fine.
self.add_param(par, val=np.zeros(shape))
self._unique_params[par] = len(self._unique_params), adj
# construct chain rule jacobian matrix
self._chain_jacobian = np.zeros((self._in_dim, len(self._unique_params)))
for idx, par in enumerate(params):
self._chain_jacobian[idx, self._unique_params[par][0]] = 1
self.add_output(output_name, val=np.zeros(self._out_dim))
def __call__(self, **kwargs):
"""Evaluate on the given inputs.
Parameters
----------
kwargs : dict[str, np.array or float]
the inputs as a dictionary.
Returns
-------
out : np.array
the output array.
"""
tmp = {}
self.solve_nonlinear(kwargs, tmp)
return tmp[self._output]
def _get_inputs(self, params):
"""Given parameter values, construct inputs for functions.
Parameters
----------
params : VecWrapper, optional
VecWrapper containing parameters. (p)
Returns
-------
ans : list[list[float]]
input lists.
"""
ans = np.empty((self._out_dim, self._in_dim))
for idx, name in enumerate(self._params):
ans[:, idx] = params[name]
return ans
def solve_nonlinear(self, params, unknowns, resids=None):
"""Compute the output parameter.
Parameters
----------
params : VecWrapper, optional
VecWrapper containing parameters. (p)
unknowns : VecWrapper, optional
VecWrapper containing outputs and states. (u)
resids : VecWrapper, optional
VecWrapper containing residuals. (r)
"""
xi_mat = self._get_inputs(params)
tmp = np.empty(self._out_dim)
for idx in range(self._out_dim):
tmp[idx] = self._fun_list[idx](xi_mat[idx, :])
unknowns[self._output] = tmp
def linearize(self, params, unknowns=None, resids=None):
"""Compute the Jacobian of the parameter.
Parameters
----------
params : VecWrapper, optional
VecWrapper containing parameters. (p)
unknowns : VecWrapper, optional
VecWrapper containing outputs and states. (u)
resids : VecWrapper, optional
VecWrapper containing residuals. (r)
"""
# print('rank {} computing jac for {}'.format(self.comm.rank, self._outputs))
xi_mat = self._get_inputs(params)
jf = np.empty((self._out_dim, self._in_dim))
for k, fun in enumerate(self._fun_list):
jf[k, :] = fun.jacobian(xi_mat[k, :])
jmat = np.dot(jf, self._chain_jacobian)
jdict = {}
for par, (pidx, adj) in self._unique_params.items():
tmp = jmat[:, pidx]
if adj:
tmp = np.diag(tmp)
jdict[self._output, par] = tmp
return jdict
================================================
FILE: bag/mdao/core.py
================================================
# -*- coding: utf-8 -*-
"""This module defines core BAG openmdao classes."""
import numpy as np
import networkx as nx
import openmdao.api as omdao
import bag.util.parse
from .components import VecFunComponent
class GroupBuilder(object):
"""A class that builds new OpenMDAO groups.
This class provides a simple interface to define new variables as function of
other variables, and it tracks the variable dependencies using a directed
acyclic graph.
"""
def __init__(self):
self._g = nx.DiGraph()
self._input_vars = set()
def _add_node(self, name, ndim, **kwargs):
"""Helper method to add a node and keep track of input variables."""
self._g.add_node(name, ndim=ndim, **kwargs)
self._input_vars.add(name)
def _add_edge(self, parent, child):
"""Helper method to add an edge and update input variables."""
self._g.add_edge(parent, child)
try:
self._input_vars.remove(child)
except KeyError:
pass
def get_inputs(self):
"""Returns a set of current input variable names.
Returns
-------
input_vars : set[str]
a set of input variable names.
"""
return self._input_vars.copy()
def get_variables(self):
"""Returns a list of variables.
Returns
-------
var_list : list[str]
a list of variables.
"""
return list(self._g.nodes_iter())
def get_variable_info(self, name):
"""Returns the range and dimension of the given variable.
Parameters
----------
name : str
variable name.
Returns
-------
min : float
minimum value.
max : float
maximum value.
ndim : int
variable dimension.
"""
nattr = self._g.node[name]
return nattr.copy()
def add_fun(self, var_name, fun_list, params, param_ranges, vector_params=None):
"""Add a new variable defined by the given list of functions.
Parameters
----------
var_name : str
variable name.
fun_list : list[bag.math.interpolate.Interpolator]
list of functions, one for each dimension.
params : list[str]
list of parameter names. Parameter names may repeat, in which case the
same parameter will be used for multiple arguments of the function.
param_ranges : dict[str, (float, float)]
a dictionary of parameter valid range.
vector_params : set[str]
set of parameters that are vector instead of scalar. If a parameter
is a vector, it will be the same size as the output, and each function
only takes in the corresponding element of the parameter.
"""
vector_params = vector_params or set()
ndim = len(fun_list)
# error checking
for par in params:
if par not in param_ranges:
raise ValueError('Valid range of %s not specified.' % par)
# add inputs
for par, (par_min, par_max) in param_ranges.items():
par_dim = ndim if par in vector_params else 1
if par not in self._g:
# add input to graph if it's not in there.
self._add_node(par, par_dim)
nattrs = self._g.node[par]
if nattrs['ndim'] != par_dim:
# error checking.
raise ValueError('Variable %s has dimension mismatch.' % par)
# update input range
nattrs['min'] = max(par_min, nattrs.get('min', par_min))
nattrs['max'] = min(par_max, nattrs.get('max', par_max))
# add current variable
if var_name not in self._g:
self._add_node(var_name, ndim)
nattrs = self._g.node[var_name]
# error checking.
if nattrs['ndim'] != ndim:
raise ValueError('Variable %s has dimension mismatch.' % var_name)
if self._g.in_degree(var_name) > 0:
raise Exception('Variable %s already has other dependencies.' % var_name)
nattrs['fun_list'] = fun_list
nattrs['params'] = params
nattrs['vec_params'] = vector_params
for parent in param_ranges.keys():
self._add_edge(parent, var_name)
def add_var(self, variable, vmin, vmax, ndim=1):
"""Adds a new independent variable.
Parameters
----------
variable : str
the variable to add
vmin : float
the minimum allowable value.
vmax : float
the maximum allowable value.
ndim : int
the dimension of the variable. Defaults to 1.
"""
if variable in self._g:
raise Exception('Variable %s already exists.' % variable)
self._add_node(variable, ndim, min=vmin, max=vmax)
def set_input_limit(self, var, equals=None, lower=None, upper=None):
"""Sets the limit on the given input variable.
Parameters
----------
var : str
name of the variable.
equals : float or None
if given, the equality value.
lower : float or None
if given, the minimum.
upper : float or None
if given, the maximum.
"""
if var in self._g:
if self._g.in_degree(var) > 0:
raise Exception('Variable %s is not an input variable' % var)
nattr = self._g.node[var]
if equals is not None:
nattr['equals'] = equals
lower = upper = equals
print(var, lower, upper)
if lower is not None:
nattr['min'] = max(nattr.get('min', lower), lower)
if upper is not None:
nattr['max'] = min(nattr.get('max', upper), upper)
print(var, nattr['min'], nattr['max'])
def add_expr(self, eqn, ndim):
"""Adds a new variable with the given expression.
Parameters
----------
eqn : str
An equation of the form " = ", where var
is the output variable name, and expr is the expression.
All variables in expr must be already added.
ndim : int
the dimension of the output variable.
"""
variable, expr = eqn.split('=', 1)
variable = variable.strip()
expr = expr.strip()
if variable not in self._g:
self._add_node(variable, ndim)
nattrs = self._g.node[variable]
if nattrs['ndim'] != ndim:
raise Exception('Dimension mismatch for %s' % variable)
if self._g.in_degree(variable) > 0:
raise Exception('%s already depends on other variables' % variable)
invars = bag.util.parse.get_variables(expr)
for parent in invars:
if parent not in self._g:
raise Exception('Variable %s is not defined.' % parent)
self._add_edge(parent, variable)
nattrs['expr'] = expr
def build(self, debug=False):
"""Returns a OpenMDAO Group from the variable graph.
Parameters
----------
debug : bool
True to print debug messages.
Returns
-------
grp : omdao.Group
the OpenMDAO group that computes all variables.
input_bounds : dict[str, any]
a dictionary from input variable name to (min, max, ndim) tuple.
"""
input_bounds = {}
ndim_dict = {}
if not nx.is_directed_acyclic_graph(self._g):
raise Exception('Dependency loop detected')
grp = omdao.Group()
prom = ['*']
for var in nx.topological_sort(self._g):
nattrs = self._g.node[var]
ndim = nattrs['ndim']
ndim_dict[var] = ndim
if self._g.in_degree(var) == 0:
if debug:
# input variable
print('Input variable: %s' % var)
# range checking
vmin, vmax = nattrs['min'], nattrs['max']
veq = nattrs.get('equals', None)
if vmin > vmax:
raise Exception('Variable %s input range not valid.' % var)
input_bounds[var] = veq, vmin, vmax, ndim
else:
init_vals = {par: np.zeros(ndim_dict[par]) for par in self._g.predecessors_iter(var)}
comp_name = 'comp__%s' % var
if 'expr' in nattrs:
eqn = '{}={}'.format(var, nattrs['expr'])
init_vals[var] = np.zeros(ndim)
# noinspection PyTypeChecker
grp.add(comp_name, omdao.ExecComp(eqn, **init_vals), promotes=prom)
elif 'fun_list' in nattrs:
params = nattrs['params']
fun_list = nattrs['fun_list']
vec_params = nattrs['vec_params']
comp = VecFunComponent(var, fun_list, params, vector_params=vec_params)
# noinspection PyTypeChecker
grp.add(comp_name, comp, promotes=prom)
else:
raise Exception('Unknown attributes: {}'.format(nattrs))
return grp, input_bounds
================================================
FILE: bag/simulation/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/simulation/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package defines various utility classes for running simulations and data post-processing.
"""
================================================
FILE: bag/simulation/core.py
================================================
# -*- coding: utf-8 -*-
from typing import TYPE_CHECKING, Optional, Dict, Any, Tuple, List, Iterable, Sequence
import abc
import importlib
import itertools
import os
import yaml
from bag import float_to_si_string
from bag.io import read_yaml, open_file, load_sim_results, save_sim_results, load_sim_file
from bag.layout import RoutingGrid, TemplateDB
from bag.concurrent.core import batch_async_task
from bag import BagProject
if TYPE_CHECKING:
import numpy as np
from bag.core import Testbench
class TestbenchManager(object, metaclass=abc.ABCMeta):
"""A class that creates and setups up a testbench for simulation, then save the result.
This class is used by MeasurementManager to run simulations.
Parameters
----------
data_fname : str
Simulation data file name.
tb_name : str
testbench name.
impl_lib : str
implementation library name.
specs : Dict[str, Any]
testbench specs.
sim_view_list : Sequence[Tuple[str, str]]
simulation view list
env_list : Sequence[str]
simulation environments list.
"""
def __init__(self,
data_fname, # type: str
tb_name, # type: str
impl_lib, # type: str
specs, # type: Dict[str, Any]
sim_view_list, # type: Sequence[Tuple[str, str]]
env_list, # type: Sequence[str]
):
# type: (...) -> None
self.data_fname = os.path.abspath(data_fname)
self.tb_name = tb_name
self.impl_lib = impl_lib
self.specs = specs
self.sim_view_list = sim_view_list
self.env_list = env_list
@abc.abstractmethod
def setup_testbench(self, tb):
# type: (Testbench) -> None
"""Configure the simulation state of the given testbench.
No need to call update_testbench(), set_simulation_environments(), and
set_simulation_view(). These are called for you.
Parameters
----------
tb : Testbench
the simulation Testbench instance.
"""
pass
async def setup_and_simulate(self, prj: BagProject,
sch_params: Dict[str, Any]) -> Dict[str, Any]:
if sch_params is None:
print('loading testbench %s' % self.tb_name)
tb = prj.load_testbench(self.impl_lib, self.tb_name)
else:
print('Creating testbench %s' % self.tb_name)
tb = self._create_tb_schematic(prj, sch_params)
print('Configuring testbench %s' % self.tb_name)
tb.set_simulation_environments(self.env_list)
self.setup_testbench(tb)
for cell_name, view_name in self.sim_view_list:
tb.set_simulation_view(self.impl_lib, cell_name, view_name)
tb.update_testbench()
# run simulation and save/return raw result
print('Simulating %s' % self.tb_name)
save_dir = await tb.async_run_simulation()
print('Finished simulating %s' % self.tb_name)
results = load_sim_results(save_dir)
save_sim_results(results, self.data_fname)
return results
@classmethod
def record_array(cls, output_dict, data_dict, arr, arr_name, sweep_params):
# type: (Dict[str, Any], Dict[str, Any], np.ndarray, str, List[str]) -> None
"""Add the given numpy array into BAG's data structure dictionary.
This method adds the given numpy array to output_dict, and make sure
sweep parameter information are treated properly.
Parameters
----------
output_dict : Dict[str, Any]
the output dictionary.
data_dict : Dict[str, Any]
the raw simulation data dictionary.
arr : np.ndarray
the numpy array to record.
arr_name : str
name of the given numpy array.
sweep_params : List[str]
a list of sweep parameters for thhe given array.
"""
if 'sweep_params' in output_dict:
swp_info = output_dict['sweep_params']
else:
swp_info = {}
output_dict['sweep_params'] = swp_info
# record sweep parameters information
for var in sweep_params:
if var not in output_dict:
output_dict[var] = data_dict[var]
swp_info[arr_name] = sweep_params
output_dict[arr_name] = arr
def _create_tb_schematic(self, prj, sch_params):
# type: (BagProject, Dict[str, Any]) -> Testbench
"""Helper method to create a testbench schematic.
Parmaeters
----------
prj : BagProject
the BagProject instance.
sch_params : Dict[str, Any]
the testbench schematic parameters dictionary.
Returns
-------
tb : Testbench
the simulation Testbench instance.
"""
tb_lib = self.specs['tb_lib']
tb_cell = self.specs['tb_cell']
tb_sch = prj.create_design_module(tb_lib, tb_cell)
tb_sch.design(**sch_params)
tb_sch.implement_design(self.impl_lib, top_cell_name=self.tb_name)
return prj.configure_testbench(self.impl_lib, self.tb_name)
class MeasurementManager(object, metaclass=abc.ABCMeta):
"""A class that handles circuit performance measurement.
This class handles all the steps needed to measure a specific performance
metric of the device-under-test. This may involve creating and simulating
multiple different testbenches, where configuration of successive testbenches
depends on previous simulation results. This class reduces the potentially
complex measurement tasks into a few simple abstract methods that designers
simply have to implement.
Parameters
----------
data_dir : str
Simulation data directory.
meas_name : str
measurement setup name.
impl_lib : str
implementation library name.
specs : Dict[str, Any]
the measurement specification dictionary.
wrapper_lookup : Dict[str, str]
the DUT wrapper cell name lookup table.
sim_view_list : Sequence[Tuple[str, str]]
simulation view list
env_list : Sequence[str]
simulation environments list.
"""
def __init__(self, # type: MeasurementManager
data_dir, # type: str
meas_name, # type: str
impl_lib, # type: str
specs, # type: Dict[str, Any]
wrapper_lookup, # type: Dict[str, str]
sim_view_list, # type: Sequence[Tuple[str, str]]
env_list, # type: Sequence[str]
):
# type: (...) -> None
self.data_dir = os.path.abspath(data_dir)
self.impl_lib = impl_lib
self.meas_name = meas_name
self.specs = specs
self.wrapper_lookup = wrapper_lookup
self.sim_view_list = sim_view_list
self.env_list = env_list
os.makedirs(self.data_dir, exist_ok=True)
@abc.abstractmethod
def get_initial_state(self):
# type: () -> str
"""Returns the initial FSM state."""
return ''
# noinspection PyUnusedLocal
def get_testbench_info(self, # type: MeasurementManager
state, # type: str
prev_output, # type: Optional[Dict[str, Any]]
):
# type: (...) -> Tuple[str, str, Dict[str, Any], Optional[Dict[str, Any]]]
"""Get information about the next testbench.
Override this method to perform more complex operations.
Parameters
----------
state : str
the current FSM state.
prev_output : Optional[Dict[str, Any]]
the previous post-processing output.
Returns
-------
tb_name : str
cell name of the next testbench. Should incorporate self.meas_name to avoid
collision with testbench for other designs.
tb_type : str
the next testbench type.
tb_specs : str
the testbench specification dictionary.
tb_params : Optional[Dict[str, Any]]
the next testbench schematic parameters. If we are reusing an existing
testbench, this should be None.
"""
tb_type = state
tb_name = self.get_testbench_name(tb_type)
tb_specs = self.get_testbench_specs(tb_type).copy()
tb_params = self.get_default_tb_sch_params(tb_type)
return tb_name, tb_type, tb_specs, tb_params
@abc.abstractmethod
def process_output(self, state, data, tb_manager):
# type: (str, Dict[str, Any], TestbenchManager) -> Tuple[bool, str, Dict[str, Any]]
"""Process simulation output data.
Parameters
----------
state : str
the current FSM state
data : Dict[str, Any]
simulation data dictionary.
tb_manager : TestbenchManager
the testbench manager object.
Returns
-------
done : bool
True if this measurement is finished.
next_state : str
the next FSM state.
output : Dict[str, Any]
a dictionary containing post-processed data.
"""
return False, '', {}
def get_testbench_name(self, tb_type):
# type: (str) -> str
"""Returns a default testbench name given testbench type."""
return '%s_TB_%s' % (self.meas_name, tb_type)
async def async_measure_performance(self,
prj: BagProject,
load_from_file: bool = False) -> Dict[str, Any]:
"""A coroutine that performs measurement.
The measurement is done like a FSM. On each iteration, depending on the current
state, it creates a new testbench (or reuse an existing one) and simulate it.
It then post-process the simulation data to determine the next FSM state, or
if the measurement is done.
Parameters
----------
prj : BagProject
the BagProject instance.
load_from_file : bool
If True, then load existing simulation data instead of running actual simulation.
Returns
-------
output : Dict[str, Any]
the last dictionary returned by process_output().
"""
cur_state = self.get_initial_state()
prev_output = None
done = False
while not done:
# create and setup testbench
tb_name, tb_type, tb_specs, tb_sch_params = self.get_testbench_info(cur_state,
prev_output)
tb_package = tb_specs['tb_package']
tb_cls_name = tb_specs['tb_class']
tb_module = importlib.import_module(tb_package)
tb_cls = getattr(tb_module, tb_cls_name)
raw_data_fname = os.path.join(self.data_dir, '%s.hdf5' % cur_state)
tb_manager = tb_cls(raw_data_fname, tb_name, self.impl_lib, tb_specs,
self.sim_view_list, self.env_list)
if load_from_file:
print('Measurement %s in state %s, '
'load sim data from file.' % (self.meas_name, cur_state))
if os.path.isfile(raw_data_fname):
cur_results = load_sim_file(raw_data_fname)
else:
print('Cannot find data file, simulating...')
cur_results = await tb_manager.setup_and_simulate(prj, tb_sch_params)
else:
cur_results = await tb_manager.setup_and_simulate(prj, tb_sch_params)
# process and save simulation data
print('Measurement %s in state %s, '
'processing data from %s' % (self.meas_name, cur_state, tb_name))
done, next_state, prev_output = self.process_output(cur_state, cur_results, tb_manager)
with open_file(os.path.join(self.data_dir, '%s.yaml' % cur_state), 'w') as f:
yaml.dump(prev_output, f)
cur_state = next_state
return prev_output
def get_state_output(self, state):
# type: (str) -> Dict[str, Any]
"""Get the post-processed output of the given state."""
file_name = os.path.join(self.data_dir, '%s.yaml' % state)
return read_yaml(file_name)
def get_testbench_specs(self, tb_type):
# type: (str) -> Dict[str, Any]
"""Helper method to get testbench specifications."""
return self.specs['testbenches'][tb_type]
def get_default_tb_sch_params(self, tb_type):
# type: (str) -> Dict[str, Any]
"""Helper method to return a default testbench schematic parameters dictionary.
This method loads default values from specification file, the fill in dut_lib
and dut_cell for you.
Parameters
----------
tb_type : str
the testbench type.
Returns
-------
sch_params : Dict[str, Any]
the default schematic parameters dictionary.
"""
tb_specs = self.get_testbench_specs(tb_type)
wrapper_type = tb_specs['wrapper_type']
if 'sch_params' in tb_specs:
tb_params = tb_specs['sch_params'].copy()
else:
tb_params = {}
tb_params['dut_lib'] = self.impl_lib
tb_params['dut_cell'] = self.wrapper_lookup[wrapper_type]
return tb_params
class DesignManager(object):
"""A class that manages instantiating design instances and running simulations.
This class provides various methods to allow you to sweep design parameters
and generate multiple instances at once. It also provides methods for running
simulations and helps you interface with TestbenchManager instances.
Parameters
----------
prj : Optional[BagProject]
The BagProject instance.
spec_file : str
the specification file name or the data directory.
"""
def __init__(self, prj, spec_file):
# type: (Optional[BagProject], str) -> None
self.prj = prj
self._specs = None
if os.path.isfile(spec_file):
self._specs = read_yaml(spec_file)
self._root_dir = os.path.abspath(self._specs['root_dir'])
elif os.path.isdir(spec_file):
self._root_dir = os.path.abspath(spec_file)
self._specs = read_yaml(os.path.join(self._root_dir, 'specs.yaml'))
else:
raise ValueError('%s is neither data directory or specification file.' % spec_file)
self._swp_var_list = tuple(sorted(self._specs['sweep_params'].keys()))
@classmethod
def load_state(cls, prj, root_dir):
# type: (BagProject, str) -> DesignManager
"""Create the DesignManager instance corresponding to data in the given directory."""
return cls(prj, root_dir)
@classmethod
def get_measurement_name(cls, dsn_name, meas_type):
# type: (str, str) -> str
"""Returns the measurement name.
Parameters
----------
dsn_name : str
design cell name.
meas_type : str
measurement type.
Returns
-------
meas_name : str
measurement name
"""
return '%s_MEAS_%s' % (dsn_name, meas_type)
@classmethod
def get_wrapper_name(cls, dut_name, wrapper_name):
# type: (str, str) -> str
"""Returns the wrapper cell name corresponding to the given DUT."""
return '%s_WRAPPER_%s' % (dut_name, wrapper_name)
@property
def specs(self):
# type: () -> Dict[str, Any]
"""Return the specification dictionary."""
return self._specs
@property
def swp_var_list(self):
# type: () -> Tuple[str, ...]
return self._swp_var_list
async def extract_design(self, lib_name: str, dsn_name: str,
rcx_params: Optional[Dict[str, Any]]) -> None:
"""A coroutine that runs LVS/RCX on a given design.
Parameters
----------
lib_name : str
library name.
dsn_name : str
design cell name.
rcx_params : Optional[Dict[str, Any]]
extraction parameters dictionary.
"""
print('Running LVS on %s' % dsn_name)
lvs_passed, lvs_log = await self.prj.async_run_lvs(lib_name, dsn_name)
if not lvs_passed:
raise ValueError('LVS failed for %s. Log file: %s' % (dsn_name, lvs_log))
print('LVS passed on %s' % dsn_name)
print('Running RCX on %s' % dsn_name)
rcx_passed, rcx_log = await self.prj.async_run_rcx(lib_name, dsn_name,
rcx_params=rcx_params)
if not rcx_passed:
raise ValueError('RCX failed for %s. Log file: %s' % (dsn_name, rcx_log))
print('RCX passed on %s' % dsn_name)
async def verify_design(self, lib_name: str, dsn_name: str,
load_from_file: bool = False) -> None:
"""Run all measurements on the given design.
Parameters
----------
lib_name : str
library name.
dsn_name : str
design cell name.
load_from_file : bool
If True, then load existing simulation data instead of running actual simulation.
"""
meas_list = self.specs['measurements']
summary_fname = self.specs['summary_fname']
view_name = self.specs['view_name']
env_list = self.specs['env_list']
wrapper_list = self.specs['dut_wrappers']
wrapper_lookup = {'': dsn_name}
for wrapper_config in wrapper_list:
wrapper_type = wrapper_config['name']
wrapper_lookup[wrapper_type] = self.get_wrapper_name(dsn_name, wrapper_type)
result_summary = {}
dsn_data_dir = os.path.join(self._root_dir, dsn_name)
for meas_specs in meas_list:
meas_type = meas_specs['meas_type']
meas_package = meas_specs['meas_package']
meas_cls_name = meas_specs['meas_class']
out_fname = meas_specs['out_fname']
meas_name = self.get_measurement_name(dsn_name, meas_type)
data_dir = self.get_measurement_directory(dsn_name, meas_type)
meas_module = importlib.import_module(meas_package)
meas_cls = getattr(meas_module, meas_cls_name)
meas_manager = meas_cls(data_dir, meas_name, lib_name, meas_specs,
wrapper_lookup, [(dsn_name, view_name)], env_list)
print('Performing measurement %s on %s' % (meas_name, dsn_name))
meas_res = await meas_manager.async_measure_performance(self.prj,
load_from_file=load_from_file)
print('Measurement %s finished on %s' % (meas_name, dsn_name))
with open_file(os.path.join(data_dir, out_fname), 'w') as f:
yaml.dump(meas_res, f)
result_summary[meas_type] = meas_res
with open_file(os.path.join(dsn_data_dir, summary_fname), 'w') as f:
yaml.dump(result_summary, f)
async def main_task(self, lib_name: str, dsn_name: str,
rcx_params: Optional[Dict[str, Any]],
extract: bool = True,
measure: bool = True,
load_from_file: bool = False) -> None:
"""The main coroutine."""
if extract:
await self.extract_design(lib_name, dsn_name, rcx_params)
if measure:
await self.verify_design(lib_name, dsn_name, load_from_file=load_from_file)
def characterize_designs(self, generate=True, measure=True, load_from_file=False):
# type: (bool, bool, bool) -> None
"""Sweep all designs and characterize them.
Parameters
----------
generate : bool
If True, create schematic/layout and run LVS/RCX.
measure : bool
If True, run all measurements.
load_from_file : bool
If True, measurements will load existing simulation data
instead of running simulations.
"""
if generate:
extract = self.specs['view_name'] != 'schematic'
self.create_designs(extract)
else:
extract = False
rcx_params = self.specs.get('rcx_params', None)
impl_lib = self.specs['impl_lib']
dsn_name_list = [self.get_design_name(combo_list)
for combo_list in self.get_combinations_iter()]
coro_list = [self.main_task(impl_lib, dsn_name, rcx_params, extract=extract,
measure=measure, load_from_file=load_from_file)
for dsn_name in dsn_name_list]
results = batch_async_task(coro_list)
if results is not None:
for val in results:
if isinstance(val, Exception):
raise val
def get_result(self, dsn_name):
# type: (str) -> Dict[str, Any]
"""Returns the measurement result summary dictionary.
Parameters
----------
dsn_name : str
the design name.
Returns
-------
result : Dict[str, Any]
the result dictionary.
"""
fname = os.path.join(self._root_dir, dsn_name, self.specs['summary_fname'])
summary = read_yaml(fname)
return summary
def test_layout(self, gen_sch=True):
# type: (bool) -> None
"""Create a test schematic and layout for debugging purposes"""
sweep_params = self.specs['sweep_params']
dsn_name = self.specs['dsn_basename'] + '_TEST'
val_list = tuple((sweep_params[key][0] for key in self.swp_var_list))
lay_params = self.get_layout_params(val_list)
temp_db = self.make_tdb()
print('create test layout')
sch_params_list = self.create_dut_layouts([lay_params], [dsn_name], temp_db)
if gen_sch:
print('create test schematic')
self.create_dut_schematics(sch_params_list, [dsn_name], gen_wrappers=False)
print('done')
def create_designs(self, create_layout):
# type: (bool) -> None
"""Create DUT schematics/layouts.
"""
if self.prj is None:
raise ValueError('BagProject instance is not given.')
temp_db = self.make_tdb()
# make layouts
dsn_name_list, lay_params_list, combo_list_list = [], [], []
for combo_list in self.get_combinations_iter():
dsn_name = self.get_design_name(combo_list)
lay_params = self.get_layout_params(combo_list)
dsn_name_list.append(dsn_name)
lay_params_list.append(lay_params)
combo_list_list.append(combo_list)
if create_layout:
print('creating all layouts.')
sch_params_list = self.create_dut_layouts(lay_params_list, dsn_name_list, temp_db)
else:
print('schematic simulation, skipping layouts.')
sch_params_list = [self.get_schematic_params(combo_list)
for combo_list in self.get_combinations_iter()]
print('creating all schematics.')
self.create_dut_schematics(sch_params_list, dsn_name_list, gen_wrappers=True)
print('design generation done.')
def get_swp_var_values(self, var):
# type: (str) -> List[Any]
"""Returns a list of valid sweep variable values.
Parameter
---------
var : str
the sweep variable name.
Returns
-------
val_list : List[Any]
the sweep values of the given variable.
"""
return self.specs['sweep_params'][var]
def get_combinations_iter(self):
# type: () -> Iterable[Tuple[Any, ...]]
"""Returns an iterator of schematic parameter combinations we sweep over.
Returns
-------
combo_iter : Iterable[Tuple[Any, ...]]
an iterator of tuples of schematic parameters values that we sweep over.
"""
swp_par_dict = self.specs['sweep_params']
return itertools.product(*(swp_par_dict[var] for var in self.swp_var_list))
def get_dsn_name_iter(self):
# type: () -> Iterable[str]
"""Returns an iterator over design names.
Returns
-------
dsn_name_iter : Iterable[str]
an iterator of design names.
"""
return (self.get_design_name(combo_list) for combo_list in self.get_combinations_iter())
def get_measurement_directory(self, dsn_name, meas_type):
meas_name = self.get_measurement_name(dsn_name, meas_type)
return os.path.join(self._root_dir, dsn_name, meas_name)
def make_tdb(self):
# type: () -> TemplateDB
"""Create and return a new TemplateDB object.
Returns
-------
tdb : TemplateDB
the TemplateDB object.
"""
if self.prj is None:
raise ValueError('BagProject instance is not given.')
target_lib = self.specs['impl_lib']
grid_specs = self.specs['routing_grid']
layers = grid_specs['layers']
spaces = grid_specs['spaces']
widths = grid_specs['widths']
bot_dir = grid_specs['bot_dir']
width_override = grid_specs.get('width_override', None)
routing_grid = RoutingGrid(self.prj.tech_info, layers, spaces, widths, bot_dir, width_override=width_override)
tdb = TemplateDB('', routing_grid, target_lib, use_cybagoa=True)
return tdb
def get_layout_params(self, val_list):
# type: (Tuple[Any, ...]) -> Dict[str, Any]
"""Returns the layout dictionary from the given sweep parameter values."""
lay_params = self.specs['layout_params'].copy()
for var, val in zip(self.swp_var_list, val_list):
lay_params[var] = val
return lay_params
def get_schematic_params(self, val_list):
# type: (Tuple[Any, ...]) -> Dict[str, Any]
"""Returns the layout dictionary from the given sweep parameter values."""
lay_params = self.specs['schematic_params'].copy()
for var, val in zip(self.swp_var_list, val_list):
lay_params[var] = val
return lay_params
def create_dut_schematics(self, sch_params_list, cell_name_list, gen_wrappers=True):
# type: (Sequence[Dict[str, Any]], Sequence[str], bool) -> None
dut_lib = self.specs['dut_lib']
dut_cell = self.specs['dut_cell']
impl_lib = self.specs['impl_lib']
wrapper_list = self.specs['dut_wrappers']
inst_list, name_list = [], []
for sch_params, cur_name in zip(sch_params_list, cell_name_list):
dsn = self.prj.create_design_module(dut_lib, dut_cell)
dsn.design(**sch_params)
inst_list.append(dsn)
name_list.append(cur_name)
if gen_wrappers:
for wrapper_config in wrapper_list:
wrapper_name = wrapper_config['name']
wrapper_lib = wrapper_config['lib']
wrapper_cell = wrapper_config['cell']
wrapper_params = wrapper_config['params'].copy()
wrapper_params['dut_lib'] = impl_lib
wrapper_params['dut_cell'] = cur_name
dsn = self.prj.create_design_module(wrapper_lib, wrapper_cell)
dsn.design(**wrapper_params)
inst_list.append(dsn)
name_list.append(self.get_wrapper_name(cur_name, wrapper_name))
self.prj.batch_schematic(impl_lib, inst_list, name_list=name_list)
def create_dut_layouts(self, lay_params_list, cell_name_list, temp_db):
# type: (Sequence[Dict[str, Any]], Sequence[str], TemplateDB) -> Sequence[Dict[str, Any]]
"""Create multiple layouts"""
if self.prj is None:
raise ValueError('BagProject instance is not given.')
cls_package = self.specs['layout_package']
cls_name = self.specs['layout_class']
lay_module = importlib.import_module(cls_package)
temp_cls = getattr(lay_module, cls_name)
temp_list, sch_params_list = [], []
for lay_params in lay_params_list:
template = temp_db.new_template(params=lay_params, temp_cls=temp_cls, debug=False)
temp_list.append(template)
sch_params_list.append(template.sch_params)
temp_db.batch_layout(self.prj, temp_list, cell_name_list)
return sch_params_list
def get_design_name(self, combo_list):
# type: (Sequence[Any, ...]) -> str
"""Generate cell names based on sweep parameter values."""
name_base = self.specs['dsn_basename']
suffix = ''
for var, val in zip(self.swp_var_list, combo_list):
if isinstance(val, str):
suffix += '_%s_%s' % (var, val)
elif isinstance(val, int):
suffix += '_%s_%d' % (var, val)
elif isinstance(val, float):
suffix += '_%s_%s' % (var, float_to_si_string(val))
else:
raise ValueError('Unsupported parameter type: %s' % (type(val)))
return name_base + suffix
================================================
FILE: bag/simulation/core_v2.py
================================================
from __future__ import annotations
from typing import (
TYPE_CHECKING, Optional, Dict, Any, Type, cast, List
)
import abc
from pathlib import Path
import numpy as np
from ..io.sim_data import load_sim_results, save_sim_results, load_sim_file
from ..concurrent.core import batch_async_task
from ..core import _import_class_from_str
from ..util.immutable import to_immutable
if TYPE_CHECKING:
from ..core import BagProject
from ..core import Testbench
from bag.util.immutable import ImmutableType
class TestbenchManager(abc.ABC):
"""A class that creates and setups up a testbench for simulation, then save the result.
This class is used by MeasurementManager to run simulations.
Parameters
----------
work_dir : Path
working directory path.
"""
def __init__(self, work_dir: Path) -> None:
self._work_dir = work_dir.resolve()
self._work_dir.mkdir(parents=True, exist_ok=True)
self._specs = None
@property
def work_dir(self) -> Path:
return self._work_dir
@property
def specs(self):
return self._specs
@property
def sim_vars(self):
return self.specs.get('sim_vars', {})
# noinspection PyMethodMayBeStatic
def pre_setup(self, tb_params: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
"""Override to perform any operations prior to calling the setup() function.
Parameters
----------
tb_params :
the test bench schematic parameters. None means the previous test bench will be reused.
This dictionary should not be modified.
Returns
-------
new_params :
the schematic parameters to use. Could be a modified copy of the original.
"""
return tb_params
def setup(self, bprj, impl_lib, impl_cell, sim_view_list, env_list,
tb_dict, wrapper_dict=None, gen_tb=True, gen_wrapper=True,
run_sim=True) -> Optional[Testbench]:
tb_dict = self.pre_setup(tb_dict)
self._specs = tb_dict
if wrapper_dict is None:
wrapper_dict = tb_dict.pop('wrapper', None)
has_wrapper = wrapper_dict is not None
wrapper_lib = wrapper_cell = wrapped_cell = wrapper_params = None
if has_wrapper:
wrapper_lib = wrapper_dict['wrapper_lib']
wrapper_cell = wrapper_dict['wrapper_cell']
wrapper_params = wrapper_dict.get('params', {})
wrapper_suffix = wrapper_dict.get('wrapper_suffix', '')
if not wrapper_suffix:
wrapper_suffix = f'{wrapper_cell}'
wrapped_cell = f'{impl_cell}_{wrapper_suffix}'
tb_lib = tb_dict['tb_lib']
tb_cell = tb_dict['tb_cell']
tb_params = tb_dict.get('tb_params', {})
tb_suffix = tb_dict.get('tb_suffix', '')
if not tb_suffix:
tb_suffix = f'{tb_cell}'
tb_name = f'{impl_cell}_{tb_suffix}'
if has_wrapper and gen_wrapper:
print(f'Generating wrapper {impl_lib}_{wrapped_cell}')
master = bprj.create_design_module(lib_name=wrapper_lib, cell_name=wrapper_cell)
bprj.replace_dut_in_wrapper(wrapper_params, impl_lib, impl_cell)
master.design(**wrapper_params)
master.implement_design(impl_lib, wrapped_cell)
print('wrapper generated.')
if gen_tb:
print(f'Generating testbench {impl_cell}_{tb_name}')
tb_master = bprj.create_design_module(tb_lib, tb_cell)
dut_cell = wrapped_cell if has_wrapper else impl_cell
tb_master.design(dut_lib=impl_lib, dut_cell=dut_cell, **tb_params)
tb_master.implement_design(impl_lib, tb_name)
print('testbench generated.')
tb = bprj.configure_testbench(impl_lib, tb_name)
else:
if run_sim:
print(f'loading testbench {impl_lib}_{tb_name}')
tb = bprj.load_testbench(impl_lib, tb_name)
else:
return None
print(f'Configuring testbench {tb_name}')
sim_swp_params = tb_dict.get('sim_swp_params', {})
sim_vars = tb_dict.get('sim_vars', {})
sim_outputs = tb_dict.get('sim_outputs', {})
tb.set_simulation_environments(env_list)
for cell_name, view_name in sim_view_list:
tb.set_simulation_view(impl_lib, cell_name, view_name)
for key, val in sim_vars.items():
tb.set_parameter(key, val)
for key, val in sim_swp_params.items():
tb.set_sweep_parameter(key, **val)
for key, val in sim_outputs.items():
tb.add_output(key, val)
tb.update_testbench()
print(f'Testbench configured.')
return tb
async def setup_and_simulate(self, bprj, impl_lib, impl_cell, sim_view_list, env_list, tb_dict,
wrapper_dict, gen_tb, gen_wrapper, run_sim):
tb: Testbench = self.setup(bprj, impl_lib=impl_lib, impl_cell=impl_cell,
sim_view_list=sim_view_list, env_list=env_list,
tb_dict=tb_dict, wrapper_dict=wrapper_dict, gen_tb=gen_tb,
gen_wrapper=gen_wrapper, run_sim=run_sim)
if run_sim:
print(f'Simulating {tb.cell}')
save_dir = await tb.async_run_simulation()
print(f'Finished simulating {tb.cell}')
results = load_sim_results(save_dir)
results_dir = str(self.work_dir / impl_cell / f'{tb.cell}_data.hdf5')
save_sim_results(results, results_dir)
return results
def simulate(self, bprj, impl_lib, impl_cell, sim_view_list, env_list, tb_dict,
wrapper_dict=None, gen_tb=True, gen_wrapper=True, run_sim=True):
coro = self.setup_and_simulate(bprj, impl_lib=impl_lib, impl_cell=impl_cell,
sim_view_list=sim_view_list, env_list=env_list,
tb_dict=tb_dict, wrapper_dict=wrapper_dict, gen_tb=gen_tb,
gen_wrapper=gen_wrapper, run_sim=run_sim)
results = batch_async_task([coro])[0]
if isinstance(results, Exception):
raise results
return results
def load_results(self, impl_cell, tb_dict):
self._specs = tb_dict
tb_cell = tb_dict['tb_cell']
tb_suffix = tb_dict.get('tb_suffix', '')
if not tb_suffix:
tb_suffix = f'{tb_cell}'
tb_name = f'{impl_cell}_{tb_suffix}'
tb_fname = self.work_dir / impl_cell / f'{tb_name}_data.hdf5'
if tb_fname.exists():
return load_sim_file(str(tb_fname))
raise ValueError(f'simulation results does not exist in {str(tb_fname)}')
class MeasurementManager(abc.ABC):
def __init__(self, work_dir: Path, mm_specs: Dict[str, Any]) -> None:
self._work_dir = work_dir
self._specs = mm_specs
self.tb_managers: Dict[str, TestbenchManager] = {}
self.tb_params: Dict[str, Dict[str, Any]] = {}
self._wrapper_lookup: Dict[ImmutableType, Dict[str, Any]] = {}
# fill up tb_managers and tb_params
self._prepare_tb_specs()
self.gen_wrapper: bool = True
self.gen_tb: bool = True
self.run_sims: bool = True
@property
def specs(self):
return self._specs
@property
def work_dir(self):
return self._work_dir
def _prepare_tb_specs(self) -> None:
# creates testbench manager objects and fills up the mappings
testbenches = self.specs['testbenches']
for tb_name, tb_dict in testbenches.items():
tbm_cls = _import_class_from_str(tb_dict['tbm_cls'])
tbm_cls = cast(Type[TestbenchManager], tbm_cls)
self.tb_params[tb_name] = tb_dict
self.tb_managers[tb_name] = tbm_cls(self._work_dir)
def _prepare_tbm_dict(self, impl_cell, tbm_dict, extract):
# adds sim_view_list and env_list to tbm_dict if they don't exist, so that after this
# function there must be sim_view_list and sim_envs entries in tbm_dict
if 'sim_view_list' not in tbm_dict:
try:
view_name = self.specs['view_name']
tbm_dict['sim_view_list'] = [(impl_cell, view_name)]
except KeyError:
default_sim_view_list = self.specs.get('sim_view_list', [])
if not default_sim_view_list:
view_name = 'netlist' if extract else 'schematic'
default_sim_view_list.append((impl_cell, view_name))
tbm_dict['sim_view_list'] = default_sim_view_list
if 'sim_envs' not in tbm_dict:
try:
default_env_list = self.specs['sim_envs']
tbm_dict['sim_envs'] = default_env_list
except KeyError:
raise ValueError('Did you forget to specify simulation environment?')
def _wrapper_exists(self, wrapper: ImmutableType) -> bool:
# checks if the wrapper (around impl_lib, impl_cell) has been created to avoid recreation
return wrapper in self._wrapper_lookup
def run_tb(self, bprj, impl_lib, impl_cell, tb_name, tbm_dict=None, extract=True,
load_results=False):
# if tb_dict is None the default tb_dict is used
if tbm_dict is None:
tbm_dict = self.tb_params[tb_name]
tb_obj: TestbenchManager = self.tb_managers[tb_name]
if load_results:
return tb_obj.load_results(impl_cell, tbm_dict)
wrapper = tbm_dict['wrapper']
wrapper_key = to_immutable(wrapper)
gen_wrapper = not self._wrapper_exists(wrapper_key)
gen_wrapper = self.gen_wrapper and gen_wrapper
# inherit default sim_envs and sim_view_list from self.specs
self._prepare_tbm_dict(impl_cell, tbm_dict, extract)
sim_view_list = tbm_dict['sim_view_list']
sim_envs = tbm_dict['sim_envs']
results = tb_obj.simulate(bprj, impl_lib, impl_cell, sim_view_list=sim_view_list,
env_list=sim_envs, tb_dict=tbm_dict, gen_tb=self.gen_tb,
gen_wrapper=gen_wrapper, run_sim=self.run_sims)
if not gen_wrapper:
self._wrapper_lookup[wrapper_key] = wrapper
return results
@abc.abstractmethod
def run_flow(self, bprj: BagProject, impl_lib: str, impl_cell: str,
load_results: bool = False, extract: bool = True) -> Any:
"""
Defines the FSM in code rather than passing state indicators through a dictionary
use self.run_tb to orchestrate test benches and modify their parameters if necessary
Don't call this method directly, call measure instead
Parameters
----------
bprj: BagProject
BagProject object
impl_lib:
DUT implementation library
impl_cell
DUT implementation cell
load_results:
True to load results, this is used when debugging post processing functions
extract:
True to use post-layout extracted view for simulations
Returns
-------
Any post processed result, even returning nothing is also an option
"""
raise NotImplementedError
def measure(self, bprj: BagProject, impl_lib: str, impl_cell: str, load_results: bool = False,
gen_wrapper: bool = True, gen_tb: bool = True, run_sims: bool = True,
extract: bool = True) -> Any:
self.gen_wrapper = gen_wrapper
self.gen_tb = gen_tb
self.run_sims = run_sims
return self.run_flow(bprj, impl_lib, impl_cell, load_results, extract)
================================================
FILE: bag/tech/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/tech/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package contains various technology related utilities, such as transistor characterization.
"""
================================================
FILE: bag/tech/core.py
================================================
# -*- coding: utf-8 -*-
"""This module contains commonly used technology related classes and functions.
"""
import os
import abc
import itertools
from typing import List, Union, Tuple, Dict, Any, Optional, Set
import numpy as np
import h5py
import openmdao.api as omdao
from bag.core import BagProject
from ..math.interpolate import interpolate_grid
from bag.math.dfun import VectorDiffFunction, DiffFunction
from ..mdao.core import GroupBuilder
from ..io import fix_string, to_bytes
from ..simulation.core import SimulationManager
def _equal(a, b, rtol, atol):
"""Returns True if a == b. a and b are both strings, floats or numpy arrays."""
# python 2/3 compatibility: convert raw bytes to string
a = fix_string(a)
b = fix_string(b)
if isinstance(a, str):
return a == b
return np.allclose(a, b, rtol=rtol, atol=atol)
def _equal_list(a, b, rtol, atol):
"""Returns True if a == b. a and b are list of strings/floats/numpy arrays."""
if len(a) != len(b):
return False
for a_item, b_item in zip(a, b):
if not _equal(a_item, b_item, rtol, atol):
return False
return True
def _index_in_list(item_list, item, rtol, atol):
"""Returns index of item in item_list, with tolerance checking for floats."""
for idx, test in enumerate(item_list):
if _equal(test, item, rtol, atol):
return idx
return -1
def _in_list(item_list, item, rtol, atol):
"""Returns True if item is in item_list, with tolerance checking for floats."""
return _index_in_list(item_list, item, rtol, atol) >= 0
class CircuitCharacterization(SimulationManager, metaclass=abc.ABCMeta):
"""A class that handles characterization of a circuit.
This class sweeps schematic parameters and run a testbench with a single analysis.
It will then save the simulation data in a format CharDB understands.
For now, this class will overwrite existing data, so please backup if you need to.
Parameters
----------
prj : BagProject
the BagProject instance.
spec_file : str
the SimulationManager specification file.
tb_type : str
the testbench type name. The parameter dictionary corresponding to this
testbench should have the following entries (in addition to those required
by Simulation Manager:
outputs :
list of testbench output names to save.
constants :
constant values used to identify this simulation run.
sweep_params:
a dictionary from testbench parameters to (start, stop, num_points)
sweep tuple.
compression : str
HDF5 compression method.
"""
def __init__(self, prj, spec_file, tb_type, compression='gzip'):
super(CircuitCharacterization, self).__init__(prj, spec_file)
self._compression = compression
self._outputs = self.specs[tb_type]['outputs']
self._constants = self.specs[tb_type]['constants']
self._sweep_params = self.specs[tb_type]['sweep_params']
def record_results(self, data, tb_type, val_list):
# type: (Dict[str, Any], str, Tuple[Any, ...]) -> None
"""Record simulation results to file.
Override implementation in SimulationManager in order to save data
in a format that CharDB understands.
"""
env_list = self.specs['sim_envs']
tb_specs = self.specs[tb_type]
results_dir = tb_specs['results_dir']
os.makedirs(results_dir, exist_ok=True)
fname = os.path.join(results_dir, 'data.hdf5')
with h5py.File(fname, 'w') as f:
for key, val in self._constants.items():
f.attrs[key] = val
for key, val in self._sweep_params.items():
f.attrs[key] = val
for env in env_list:
env_result, sweep_list = self._get_env_result(data, env)
grp = f.create_group('%d' % len(f))
for key, val in zip(self.swp_var_list, val_list):
grp.attrs[key] = val
# h5py workaround: explicitly store strings as encoded unicode data
grp.attrs['env'] = to_bytes(env)
grp.attrs['sweep_params'] = [to_bytes(swp) for swp in sweep_list]
for name, val in env_result.items():
grp.create_dataset(name, data=val, compression=self._compression)
def get_sim_results(self, tb_type, val_list):
# type: (str, Tuple[Any, ...]) -> Dict[str, Any]
# TODO: implement this.
raise NotImplementedError('not implemented yet.')
def _get_env_result(self, sim_results, env):
"""Extract results from a given simulation environment from the given data.
all output sweep parameter order and data shape must be the same.
Parameters
----------
sim_results : dict[string, any]
the simulation results dictionary
env : str
the target simulation environment
Returns
-------
results : dict[str, any]
the results from a given simulation environment.
sweep_list : list[str]
a list of sweep parameter order.
"""
if 'corner' not in sim_results:
# no corner sweep anyways
results = {output: sim_results[output] for output in self._outputs}
sweep_list = sim_results['sweep_params'][self._outputs[0]]
return results, sweep_list
corner_list = sim_results['corner'].tolist()
results = {}
# we know all sweep order and shape is the same.
test_name = self._outputs[0]
sweep_list = list(sim_results['sweep_params'][test_name])
shape = sim_results[test_name].shape
# make numpy array slice index list
index_list = [slice(0, l) for l in shape]
if 'corner' in sweep_list:
idx = sweep_list.index('corner')
index_list[idx] = corner_list.index(env)
del sweep_list[idx]
# store outputs in results
for output in self._outputs:
results[output] = sim_results[output][index_list]
return results, sweep_list
class CharDB(abc.ABC):
"""The abstract base class of a database of characterization data.
This class provides useful query/optimization methods and ways to store/retrieve
data.
Parameters
----------
root_dir : str
path to the root characterization data directory. Supports environment variables.
constants : Dict[str, Any]
constants dictionary.
discrete_params : List[str]
a list of parameters that should take on discrete values.
init_params : Dict[str, Any]
a dictionary of initial parameter values. All parameters should be specified,
and None should be used if the parameter value is not set.
env_list : List[str]
list of simulation environments to consider.
update : bool
By default, CharDB saves and load post-processed data directly. If update is True,
CharDB will update the post-process data from raw simulation data. Defaults to
False.
rtol : float
relative tolerance used to compare constants/sweep parameters/sweep attributes.
atol : float
relative tolerance used to compare constants/sweep parameters/sweep attributes.
compression : str
HDF5 compression method. Used only during post-processing.
method : str
interpolation method.
opt_package : str
default Python optimization package. Supports 'scipy' or 'pyoptsparse'. Defaults
to 'scipy'.
opt_method : str
default optimization method. Valid values depends on the optimization package.
Defaults to 'SLSQP'.
opt_settings : Optional[Dict[str, Any]]
optimizer specific settings.
"""
def __init__(self, # type: CharDB
root_dir, # type: str
constants, # type: Dict[str, Any]
discrete_params, # type: List[str]
init_params, # type: Dict[str, Any]
env_list, # type: List[str]
update=False, # type: bool
rtol=1e-5, # type: float
atol=1e-18, # type: float
compression='gzip', # type: str
method='spline', # type: str
opt_package='scipy', # type: str
opt_method='SLSQP', # type: str
opt_settings=None, # type: Optional[Dict[str, Any]]
**kwargs
):
# type: (...) -> None
root_dir = os.path.abspath(os.path.expandvars(root_dir))
if not os.path.isdir(root_dir):
# error checking
raise ValueError('Directory %s not found.' % root_dir)
if 'env' in discrete_params:
discrete_params.remove('env')
if opt_settings is None:
opt_settings = {}
else:
pass
if opt_method == 'IPOPT' and not opt_settings:
# set default IPOPT settings
opt_settings['option_file_name'] = ''
self._discrete_params = discrete_params
self._params = init_params.copy()
self._env_list = env_list
self._config = dict(opt_package=opt_package,
opt_method=opt_method,
opt_settings=opt_settings,
rtol=rtol,
atol=atol,
method=method,
)
cache_fname = self.get_cache_file(root_dir, constants)
if not os.path.isfile(cache_fname) or update:
sim_fname = self.get_sim_file(root_dir, constants)
results = self._load_sim_data(sim_fname, constants, discrete_params)
sim_data, total_params, total_values, self._constants = results
self._data = self.post_process_data(sim_data, total_params, total_values, self._constants)
# save to cache
with h5py.File(cache_fname, 'w') as f:
for key, val in self._constants.items():
f.attrs[key] = val
sp_grp = f.create_group('sweep_params')
# h5py workaround: explicitly store strings as encoded unicode data
sp_grp.attrs['sweep_order'] = [to_bytes(swp) for swp in total_params]
for par, val_list in zip(total_params, total_values):
if val_list.dtype.kind == 'U':
# unicode array, convert to raw bytes array
val_list = val_list.astype('S')
sp_grp.create_dataset(par, data=val_list, compression=compression)
data_grp = f.create_group('data')
for name, data_arr in self._data.items():
data_grp.create_dataset(name, data=data_arr, compression=compression)
else:
# load from cache
with h5py.File(cache_fname, 'r') as f:
self._constants = dict(iter(f.attrs.items()))
sp_grp = f['sweep_params']
total_params = [fix_string(swp) for swp in sp_grp.attrs['sweep_order']]
total_values = [self._convert_hdf5_array(sp_grp[par][()]) for par in total_params]
data_grp = f['data']
self._data = {name: data_grp[name][()] for name in data_grp}
# change axes location so discrete parameters are at the start of sweep_params
env_disc_params = ['env'] + discrete_params
for idx, dpar in enumerate(env_disc_params):
if total_params[idx] != dpar:
# swap
didx = total_params.index(dpar)
ptmp = total_params[idx]
vtmp = total_values[idx]
total_params[idx] = total_params[didx]
total_values[idx] = total_values[didx]
total_params[didx] = ptmp
total_values[didx] = vtmp
for key, val in self._data.items():
self._data[key] = np.swapaxes(val, idx, didx)
sidx = len(self._discrete_params) + 1
self._cont_params = total_params[sidx:]
self._cont_values = total_values[sidx:]
self._discrete_values = total_values[1:sidx]
self._env_values = total_values[0]
# get lazy function table.
shape = [total_values[idx].size for idx in range(len(env_disc_params))]
fun_name_iter = itertools.chain(iter(self._data.keys()), self.derived_parameters())
# noinspection PyTypeChecker
self._fun = {name: np.full(shape, None, dtype=object) for name in fun_name_iter}
@staticmethod
def _convert_hdf5_array(arr):
# type: (np.ndarray) -> np.ndarray
"""Check if raw bytes array, if so convert to unicode array."""
if arr.dtype.kind == 'S':
return arr.astype('U')
return arr
def _load_sim_data(self, # type: CharDB
fname, # type: str
constants, # type: Dict[str, Any]
discrete_params # type: List[str]
):
# type: (...) -> Tuple[Dict[str, np.ndarray], List[str], List[np.ndarray], Dict[str, Any]]
"""Returns the simulation data.
Parameters
----------
fname : str
the simulation filename.
constants : Dict[str, Any]
the constants dictionary.
discrete_params : List[str]
a list of parameters that should take on discrete values.
Returns
-------
data_dict : Dict[str, np.ndarray]
a dictionary from output name to data as numpy array.
master_attrs : List[str]
list of attribute name for each dimension of numpy array.
master_values : List[np.ndarray]
list of attribute values for each dimension.
file_constants : Dict[str, Any]
the constants dictionary in file.
"""
if not os.path.exists(fname):
raise ValueError('Simulation file %s not found.' % fname)
rtol, atol = self.get_config('rtol'), self.get_config('atol') # type: float
master_attrs = None
master_values = None
master_dict = None
file_constants = None
with h5py.File(fname, 'r') as f:
# check constants is consistent
for key, val in constants.items():
if not _equal(val, f.attrs[key], rtol, atol):
raise ValueError('sim file attr %s = %s != %s' % (key, f.attrs[key], val))
# simple error checking.
if len(f) == 0:
raise ValueError('simulation file has no data.')
# check that attributes sweep forms regular grid.
attr_table = {}
for gname in f:
grp = f[gname]
for key, val in grp.attrs.items():
# convert raw bytes to unicode
# python 2/3 compatibility: convert raw bytes to string
val = fix_string(val)
if key != 'sweep_params':
if key not in attr_table:
attr_table[key] = []
val_list = attr_table[key]
if not _in_list(val_list, val, rtol, atol):
val_list.append(val)
expected_len = 1
for val in attr_table.values():
expected_len *= len(val)
if expected_len != len(f):
raise ValueError('Attributes of f does not form complete sweep. '
'Expect length = %d, but actually = %d.' % (expected_len, len(f)))
# check all discrete parameters in attribute table.
for disc_par in discrete_params:
if disc_par not in attr_table:
raise ValueError('Discrete attribute %s not found' % disc_par)
# get attribute order
attr_order = sorted(attr_table.keys())
# check all non-discrete attribute value list lies on regular grid
attr_values = [np.array(sorted(attr_table[attr])) for attr in attr_order]
for attr, aval_list in zip(attr_order, attr_values):
if attr not in discrete_params and attr != 'env':
test_vec = np.linspace(aval_list[0], aval_list[-1], len(aval_list), endpoint=True)
if not np.allclose(test_vec, aval_list, rtol=rtol, atol=atol):
raise ValueError('Attribute %s values do not lie on regular grid' % attr)
# consolidate all data into one giant numpy array.
# first compute numpy array shape
test_grp = f['0']
sweep_params = [fix_string(tmpvar) for tmpvar in test_grp.attrs['sweep_params']]
# get constants dictionary
file_constants = {}
for key, val in f.attrs.items():
if key not in sweep_params:
file_constants[key] = val
master_attrs = attr_order + sweep_params
swp_values = [np.linspace(f.attrs[var][0], f.attrs[var][1], f.attrs[var][2],
endpoint=True) for var in sweep_params] # type: List[np.array]
master_values = attr_values + swp_values
master_shape = [len(val_list) for val_list in master_values]
master_index = [slice(0, n) for n in master_shape]
master_dict = {}
for gname in f:
grp = f[gname]
# get index of the current group in the giant array.
# Note: using linear search to compute index now, but attr_val_list should be small.
for aidx, (attr, aval_list) in enumerate(zip(attr_order, attr_values)):
master_index[aidx] = _index_in_list(aval_list, grp.attrs[attr], rtol, atol)
for output in grp:
dset = grp[output]
if output not in master_dict:
master_dict[output] = np.empty(master_shape, dtype=dset.dtype)
master_dict[output][master_index] = dset
return master_dict, master_attrs, master_values, file_constants
def __getitem__(self, param):
# type: (str) -> Any
"""Returns the given parameter value.
Parameters
----------
param : str
parameter name.
Returns
-------
val : Any
parameter value.
"""
return self._params[param]
def __setitem__(self, key, value):
# type: (str, Any) -> None
"""Sets the given parameter value.
Parameters
----------
key : str
parameter name.
value : Any
parameter value. None to unset.
"""
rtol, atol = self.get_config('rtol'), self.get_config('atol')
if key in self._discrete_params:
if value is not None:
idx = self._discrete_params.index(key)
if not _in_list(self._discrete_values[idx], value, rtol, atol):
raise ValueError('Cannot set discrete variable %s value to %s' % (key, value))
elif key in self._cont_params:
if value is not None:
idx = self._cont_params.index(key)
val_list = self._cont_values[idx]
if value < val_list[0] or value > val_list[-1]:
raise ValueError('Variable %s value %s out of bounds.' % (key, value))
else:
raise ValueError('Unknown variable %s.' % key)
self._params[key] = value
def get_config(self, name):
# type: (str) -> Any
"""Returns the configuration value.
Parameters
----------
name : str
configuration name.
Returns
-------
val : Any
configuration value.
"""
return self._config[name]
def set_config(self, name, value):
# type: (str, Any) -> None
"""Sets the configuration value.
Parameters
----------
name : str
configuration name.
value : Any
configuration value.
"""
if name not in self._config:
raise ValueError('Unknown configuration %s' % name)
self._config[name] = value
@property
def env_list(self):
# type: () -> List[str]
"""The list of simulation environments to consider."""
return self._env_list
@env_list.setter
def env_list(self, new_env_list):
# type: (List[str]) -> None
"""Sets the list of simulation environments to consider."""
self._env_list = new_env_list
@classmethod
def get_sim_file(cls, root_dir, constants):
# type: (str, Dict[str, Any]) -> str
"""Returns the simulation data file name.
Parameters
----------
root_dir : str
absolute path to the root characterization data directory.
constants : Dict[str, Any]
constants dictionary.
Returns
-------
fname : str
the simulation data file name.
"""
raise NotImplementedError('Not implemented')
@classmethod
def get_cache_file(cls, root_dir, constants):
# type: (str, Dict[str, Any]) -> str
"""Returns the post-processed characterization data file name.
Parameters
----------
root_dir : str
absolute path to the root characterization data directory.
constants : Dict[str, Any]
constants dictionary.
Returns
-------
fname : str
the post-processed characterization data file name.
"""
raise NotImplementedError('Not implemented')
@classmethod
def post_process_data(cls, sim_data, sweep_params, sweep_values, constants):
# type: (Dict[str, np.ndarray], List[str], List[np.ndarray], Dict[str, Any]) -> Dict[str, np.ndarray]
"""Postprocess simulation data.
Parameters
----------
sim_data : Dict[str, np.ndarray]
the simulation data as a dictionary from output name to numpy array.
sweep_params : List[str]
list of parameter name for each dimension of numpy array.
sweep_values : List[np.ndarray]
list of parameter values for each dimension.
constants : Dict[str, Any]
the constants dictionary.
Returns
-------
data : Dict[str, np.ndarray]
a dictionary of post-processed data.
"""
raise NotImplementedError('Not implemented')
@classmethod
def derived_parameters(cls):
# type: () -> List[str]
"""Returns a list of derived parameters."""
return []
@classmethod
def compute_derived_parameters(cls, fdict):
# type: (Dict[str, DiffFunction]) -> Dict[str, DiffFunction]
"""Compute derived parameter functions.
Parameters
----------
fdict : Dict[str, DiffFunction]
a dictionary from core parameter name to the corresponding function.
Returns
-------
deriv_dict : Dict[str, DiffFunction]
a dictionary from derived parameter name to the corresponding function.
"""
return {}
def _get_function_index(self, **kwargs):
# type: (Any) -> List[int]
"""Returns the function index corresponding to given discrete parameter values.
simulation environment index will be set to 0
Parameters
----------
**kwargs :
discrete parameter values.
Returns
-------
fidx_list : List[int]
the function index.
"""
rtol, atol = self.get_config('rtol'), self.get_config('atol')
fidx_list = [0]
for par, val_list in zip(self._discrete_params, self._discrete_values):
val = kwargs.get(par, self[par])
if val is None:
raise ValueError('Parameter %s value not specified' % par)
val_idx = _index_in_list(val_list, val, rtol, atol)
if val_idx < 0:
raise ValueError('Discrete parameter %s have illegal value %s' % (par, val))
fidx_list.append(val_idx)
return fidx_list
def _get_function_helper(self, name, fidx_list):
# type: (str, Union[List[int], Tuple[int]]) -> DiffFunction
"""Helper method for get_function()
Parameters
----------
name : str
name of the function.
fidx_list : Union[List[int], Tuple[int]]
function index.
Returns
-------
fun : DiffFunction
the interpolator function.
"""
# get function table index
fidx_list = tuple(fidx_list)
ftable = self._fun[name]
if ftable[fidx_list] is None:
if name in self._data:
# core parameter
char_data = self._data[name]
# get scale list and data index
scale_list = []
didx = list(fidx_list) # type: List[Union[int, slice]]
for vec in self._cont_values:
scale_list.append((vec[0], vec[1] - vec[0]))
didx.append(slice(0, vec.size))
# make interpolator.
cur_data = char_data[didx]
method = self.get_config('method')
ftable[fidx_list] = interpolate_grid(scale_list, cur_data, method=method, extrapolate=True)
else:
# derived parameter
core_fdict = {fn: self._get_function_helper(fn, fidx_list) for fn in self._data}
deriv_fdict = self.compute_derived_parameters(core_fdict)
for fn, deriv_fun in deriv_fdict.items():
self._fun[fn][fidx_list] = deriv_fun
return ftable[fidx_list]
def get_function(self, name, env='', **kwargs):
# type: (str, str, **Any) -> Union[VectorDiffFunction, DiffFunction]
"""Returns a function for the given output.
Parameters
----------
name : str
name of the function.
env : str
if not empty, we will return function for just the given simulation environment.
**kwargs : Any
dictionary of discrete parameter values.
Returns
-------
output : Union[VectorDiffFunction, DiffFunction]
the output vector function.
"""
fidx_list = self._get_function_index(**kwargs)
if not env:
fun_list = []
for env in self.env_list:
occur_list = np.where(self._env_values == env)[0]
if occur_list.size == 0:
raise ValueError('environment %s not found.')
env_idx = occur_list[0]
fidx_list[0] = env_idx
fun_list.append(self._get_function_helper(name, fidx_list))
return VectorDiffFunction(fun_list)
else:
occur_list = np.where(self._env_values == env)[0]
if occur_list.size == 0:
raise ValueError('environment %s not found.')
env_idx = occur_list[0]
fidx_list[0] = env_idx
return self._get_function_helper(name, fidx_list)
def get_fun_sweep_params(self):
# type: () -> Tuple[List[str], List[Tuple[float, float]]]
"""Returns interpolation function sweep parameter names and values.
Returns
-------
sweep_params : List[str]
list of parameter names.
sweep_range : List[Tuple[float, float]]
list of parameter range
"""
return self._cont_params, [(vec[0], vec[-1]) for vec in self._cont_values]
def _get_fun_arg(self, **kwargs):
# type: (Any) -> np.ndarray
"""Make numpy array of interpolation function arguments."""
val_list = []
for par in self._cont_params:
val = kwargs.get(par, self[par])
if val is None:
raise ValueError('Parameter %s value not specified.' % par)
val_list.append(val)
return np.array(val_list)
def query(self, **kwargs):
# type: (Any) -> Dict[str, np.ndarray]
"""Query the database for the values associated with the given parameters.
All parameters must be specified.
Parameters
----------
**kwargs :
parameter values.
Returns
-------
results : Dict[str, np.ndarray]
the characterization results.
"""
results = {}
arg = self._get_fun_arg(**kwargs)
for name in self._data:
fun = self.get_function(name, **kwargs)
results[name] = fun(arg)
for var in itertools.chain(self._discrete_params, self._cont_params):
results[var] = kwargs.get(var, self[var])
results.update(self.compute_derived_parameters(results))
return results
def minimize(self, # type: CharDB
objective, # type: str
define=None, # type: List[Tuple[str, int]]
cons=None, # type: Dict[str, Dict[str, float]]
vector_params=None, # type: Set[str]
debug=False, # type: bool
**kwargs
):
# type: (...) -> Dict[str, Union[np.ndarray, float]]
"""Find operating point that minimizes the given objective.
Parameters
----------
objective : str
the objective to minimize. Must be a scalar.
define : List[Tuple[str, int]]
list of expressions to define new variables. Each
element of the list is a tuple of string and integer. The string
contains a python assignment that computes the variable from
existing ones, and the integer indicates the variable shape.
Note that define can also be used to enforce relationships between
existing variables. Using transistor as an example, defining
'vgs = vds' will force the vgs of vds of the transistor to be
equal.
cons : Dict[str, Dict[str, float]]
a dictionary from variable name to constraints of that variable.
see OpenMDAO documentations for details on constraints.
vector_params : Set[str]
set of input variables that are vector instead of scalar. An input
variable is a vector if it can change across simulation environments.
debug : bool
True to enable debugging messages. Defaults to False.
**kwargs :
known parameter values.
Returns
-------
results : Dict[str, Union[np.ndarray, float]]
the results dictionary.
"""
cons = cons or {}
fidx_list = self._get_function_index(**kwargs)
builder = GroupBuilder()
params_ranges = dict(zip(self._cont_params,
((vec[0], vec[-1]) for vec in self._cont_values)))
# add functions
fun_name_iter = itertools.chain(iter(self._data.keys()), self.derived_parameters())
for name in fun_name_iter:
fun_list = []
for idx, env in enumerate(self.env_list):
fidx_list[0] = idx
fun_list.append(self._get_function_helper(name, fidx_list))
builder.add_fun(name, fun_list, self._cont_params, params_ranges,
vector_params=vector_params)
# add expressions
for expr, ndim in define:
builder.add_expr(expr, ndim)
# update input bounds from constraints
input_set = builder.get_inputs()
var_list = builder.get_variables()
for name in input_set:
if name in cons:
setup = cons[name]
if 'equals' in setup:
eq_val = setup['equals']
builder.set_input_limit(name, equals=eq_val)
else:
vmin = vmax = None
if 'lower' in setup:
vmin = setup['lower']
if 'upper' in setup:
vmax = setup['upper']
builder.set_input_limit(name, lower=vmin, upper=vmax)
# build the group and make the problem
grp, input_bounds = builder.build()
top = omdao.Problem()
top.root = grp
opt_package = self.get_config('opt_package') # type: str
opt_settings = self.get_config('opt_settings')
if opt_package == 'scipy':
driver = top.driver = omdao.ScipyOptimizer()
print_opt_name = 'disp'
elif opt_package == 'pyoptsparse':
driver = top.driver = omdao.pyOptSparseDriver()
print_opt_name = 'print_results'
else:
raise ValueError('Unknown optimization package: %s' % opt_package)
driver.options['optimizer'] = self.get_config('opt_method')
driver.options[print_opt_name] = debug
driver.opt_settings.update(opt_settings)
# add constraints
constants = {}
for name, setup in cons.items():
if name not in input_bounds:
# add constraint
driver.add_constraint(name, **setup)
# add inputs
for name in input_set:
eq_val, lower, upper, ndim = input_bounds[name]
val = kwargs.get(name, self[name]) # type: float
if val is None:
val = eq_val
comp_name = 'comp__%s' % name
if val is not None:
val = np.atleast_1d(np.ones(ndim) * val)
constants[name] = val
top.root.add(comp_name, omdao.IndepVarComp(name, val=val), promotes=[name])
else:
avg = (lower + upper) / 2.0
span = upper - lower
val = np.atleast_1d(np.ones(ndim) * avg)
top.root.add(comp_name, omdao.IndepVarComp(name, val=val), promotes=[name])
driver.add_desvar(name, lower=lower, upper=upper, adder=-avg, scaler=1.0 / span)
# driver.add_desvar(name, lower=lower, upper=upper)
# add objective and setup
driver.add_objective(objective)
top.setup(check=debug)
# somehow html file is not viewable.
if debug:
omdao.view_model(top, outfile='CharDB_debug.html')
# set constants
for name, val in constants.items():
top[name] = val
top.run()
results = {var: kwargs.get(var, self[var]) for var in self._discrete_params}
for var in var_list:
results[var] = top[var]
return results
================================================
FILE: bag/tech/mos.py
================================================
# -*- coding: utf-8 -*-
"""This module contains transistor characterization and optimization related classes.
"""
import os
# import pyoptsparse
import numpy as np
from .core import CharDB
class MosCharDB(CharDB):
"""The mosfet characterization database.
This class holds transistor characterization data and provides useful query methods.
Parameters
----------
root_dir : str
path to the root characterization data directory.
mos_type : str
the transistor type. Either 'pch' or 'nch'.
discrete_params : list[str]
a list of parameters that should take on discrete values instead of being interpolated.
Usually intent, length, or transistor width (for finfets).
env_list : list[str]
list of simulation environments to consider.
update : bool
True to update post-processed data from raw simulation data.
intent : str or None
the threshold flavor name.
l : float or None
the channel length, in meters.
w : int or float or None
the transistor width, in meters or number of fins.
vgs : float or None
the Vgs voltage.
vds : float or None
the Vds voltage.
vbs : float or None
the Vbs voltage.
**kwargs :
additional characterization database parameters. See documentation for CharDB.
"""
_raw_data_names = ['ids', 'y11', 'y12', 'y13', 'y21', 'y22', 'y23', 'y31', 'y32', 'y33']
def __init__(self, root_dir, mos_type, discrete_params, env_list,
intent=None, l=None, w=None, vgs=None, vds=None, vbs=None,
**kwargs):
constants = dict(mos_type=mos_type)
init_params = dict(intent=intent, l=l, w=w, vgs=vgs, vds=vds, vbs=vbs)
CharDB.__init__(self, root_dir, constants, discrete_params, init_params, env_list, **kwargs)
@classmethod
def get_sim_file(cls, root_dir, constants):
"""Returns the simulation data file name.
Parameters
----------
root_dir : str
absolute path to the root characterization data directory.
constants : dict[string, any]
constants dictionary.
Returns
-------
fname : str
the simulation data file name.
"""
return os.path.join(root_dir, '%s.hdf5' % constants['mos_type'])
@classmethod
def get_cache_file(cls, root_dir, constants):
"""Returns the post-processed characterization data file name.
Parameters
----------
root_dir : str
absolute path to the root characterization data directory.
constants : dict[string, any]
constants dictionary.
Returns
-------
fname : str
the post-processed characterization data file name.
"""
return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))
@classmethod
def post_process_data(cls, sim_data, sweep_params, sweep_values, constants):
"""Postprocess simulation data.
Parameters
----------
sim_data : dict[string, np.array]
the simulation data as a dictionary from output name to numpy array.
sweep_params : list[str]
list of parameter name for each dimension of numpy array.
sweep_values : list[numpy.array]
list of parameter values for each dimension.
constants : dict[string, any]
the constants dictionary.
Returns
-------
data : dict[str, np.array]
a dictionary of post-processed data.
"""
# compute small signal parameters
w = 2 * np.pi * constants['char_freq']
fg = constants['fg']
ids = sim_data['ids']
gm = (sim_data['y21'].real - sim_data['y31'].real) / 2.0
gds = (sim_data['y22'].real - sim_data['y32'].real) / 2.0
gb = (sim_data['y33'].real - sim_data['y23'].real) / 2.0 - gm - gds
cgd = -0.5 / w * (sim_data['y12'].imag + sim_data['y21'].imag)
cgs = -0.5 / w * (sim_data['y13'].imag + sim_data['y31'].imag)
cds = -0.5 / w * (sim_data['y23'].imag + sim_data['y32'].imag)
cgb = sim_data['y11'].imag / w - cgd - cgs
cdb = sim_data['y22'].imag / w - cds - cgd
csb = sim_data['y33'].imag / w - cgs - cds
ss_data = dict(
ids=ids / fg,
gm=gm / fg,
gds=gds / fg,
gb=gb / fg,
cgd=cgd / fg,
cgs=cgs / fg,
cds=cds / fg,
cgb=cgb / fg,
cdb=cdb / fg,
csb=csb / fg,
)
return ss_data
@classmethod
def derived_parameters(cls):
"""Returns a list of derived parameters."""
return ['cgg', 'cdd', 'css', 'cbb', 'vstar', 'gain', 'ft']
@classmethod
def compute_derived_parameters(cls, fdict):
"""Compute derived parameter functions.
Parameters
----------
fdict : dict[string, bag.math.dfun.DiffFunction]
a dictionary from core parameter name to the corresponding function.
Returns
-------
deriv_dict : dict[str, bag.math.dfun.DiffFunction]
a dictionary from derived parameter name to the corresponding function.
"""
cgg = fdict['cgd'] + fdict['cgs'] + fdict['cgb']
return dict(
cgg=cgg,
cdd=fdict['cgd'] + fdict['cds'] + fdict['cdb'],
css=fdict['cgs'] + fdict['cds'] + fdict['csb'],
cbb=fdict['cgb'] + fdict['cdb'] + fdict['csb'],
vstar=2.0 * (fdict['ids'] / fdict['gm']),
gain=fdict['gm'] / fdict['gds'],
ft=fdict['gm'] / (2.0 * np.pi * cgg),
)
class MosCharGDDB(CharDB):
"""The mosfet characterization database.
This class holds transistor characterization data and provides useful query methods.
Parameters
----------
root_dir : str
path to the root characterization data directory.
mos_type : str
the transistor type. Either 'pch' or 'nch'.
discrete_params : list[str]
a list of parameters that should take on discrete values instead of being interpolated.
Usually intent, length, or transistor width (for finfets).
env_list : list[str]
list of simulation environments to consider.
update : bool
True to update post-processed data from raw simulation data.
intent : str or None
the threshold flavor name.
l : float or None
the channel length, in meters.
w : int or float or None
the transistor width, in meters or number of fins.
vgs : float or None
the Vgs voltage.
vds : float or None
the Vds voltage.
vbs : float or None
the Vbs voltage.
**kwargs :
additional characterization database parameters. See documentation for CharDB.
"""
_raw_data_names = ['ids', 'y11', 'y12', 'y21', 'y22']
def __init__(self, root_dir, mos_type, discrete_params, env_list,
intent=None, l=None, w=None, vgs=None, vds=None,
**kwargs):
constants = dict(mos_type=mos_type)
init_params = dict(intent=intent, l=l, w=w, vgs=vgs, vds=vds)
CharDB.__init__(self, root_dir, constants, discrete_params, init_params, env_list, **kwargs)
@classmethod
def get_sim_file(cls, root_dir, constants):
"""Returns the simulation data file name.
Parameters
----------
root_dir : str
absolute path to the root characterization data directory.
constants : dict[string, any]
constants dictionary.
Returns
-------
fname : str
the simulation data file name.
"""
return os.path.join(root_dir, '%s.hdf5' % constants['mos_type'])
@classmethod
def get_cache_file(cls, root_dir, constants):
"""Returns the post-processed characterization data file name.
Parameters
----------
root_dir : str
absolute path to the root characterization data directory.
constants : dict[string, any]
constants dictionary.
Returns
-------
fname : str
the post-processed characterization data file name.
"""
return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))
@classmethod
def post_process_data(cls, sim_data, sweep_params, sweep_values, constants):
"""Postprocess simulation data.
Parameters
----------
sim_data : dict[string, np.array]
the simulation data as a dictionary from output name to numpy array.
sweep_params : list[str]
list of parameter name for each dimension of numpy array.
sweep_values : list[numpy.array]
list of parameter values for each dimension.
constants : dict[string, any]
the constants dictionary.
Returns
-------
data : dict[str, np.array]
a dictionary of post-processed data.
"""
# compute small signal parameters
w = 2 * np.pi * constants['char_freq']
fg = constants['fg']
ids = sim_data['ids']
gm = sim_data['y21'].real
gds = sim_data['y22'].real
cgd = -0.5 / w * (sim_data['y12'].imag + sim_data['y21'].imag)
cgs = sim_data['y11'].imag / w - cgd
cds = sim_data['y22'].imag / w - cgd
ss_data = dict(
ids=ids / fg,
gm=gm / fg,
gds=gds / fg,
cgd=cgd / fg,
cgs=cgs / fg,
cds=cds / fg,
)
return ss_data
@classmethod
def derived_parameters(cls):
"""Returns a list of derived parameters."""
return ['cgg', 'cdd', 'vstar', 'gain', 'ft']
@classmethod
def compute_derived_parameters(cls, fdict):
"""Compute derived parameter functions.
Parameters
----------
fdict : dict[string, bag.math.dfun.DiffFunction]
a dictionary from core parameter name to the corresponding function.
Returns
-------
deriv_dict : dict[str, bag.math.dfun.DiffFunction]
a dictionary from derived parameter name to the corresponding function.
"""
cgg = fdict['cgd'] + fdict['cgs']
return dict(
cgg=cgg,
cdd=fdict['cgd'] + fdict['cds'],
vstar=2.0 * (fdict['ids'] / fdict['gm']),
gain=fdict['gm'] / fdict['gds'],
ft=fdict['gm'] / (2.0 * np.pi * cgg),
)
================================================
FILE: bag/util/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/util/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package defines various utilities classes.
"""
================================================
FILE: bag/util/cache.py
================================================
# -*- coding: utf-8 -*-
"""This module defines classes used to cache existing design masters
"""
from typing import Sequence, Dict, Set, Any, Optional, TypeVar, Type, Callable, Iterable
import sys
import os
import time
import numbers
import importlib
import abc
from collections import OrderedDict
from ..io import readlines_iter, write_file, fix_string
from .search import BinaryIterator
def _get_unique_name(basename, *args):
# type: (str, *Iterable[str]) -> str
"""Returns a unique name that's not used yet.
This method appends an index to the given basename. Binary
search is used to achieve logarithmic run time.
Parameters
----------
basename : str
the base name.
*args :
a list of containers of used names.
Returns
-------
new_name : str
the unique name.
"""
new_name = basename
exist = False
for used_names in args:
if new_name in used_names:
# the original name just works
exist = True
break
if not exist:
return new_name
bin_iter = BinaryIterator(1, None)
while bin_iter.has_next():
cur_name = '%s_%d' % (basename, bin_iter.get_next())
exist = False
for used_names in args:
if cur_name in used_names:
# the original name just works
exist = True
break
if exist:
bin_iter.up()
else:
bin_iter.save()
bin_iter.down()
last_save = bin_iter.get_last_save()
assert last_save is not None, "No save marker defined"
return '%s_%d' % (basename, last_save)
class ClassImporter(object):
"""A class that dynamically imports Python class from a definition file.
This class is used to import design modules to enable code reuse and design collaboration.
Parameters
----------
lib_defs : str
path to the design library definition file.
"""
def __init__(self, lib_defs):
"""Create a new design database instance.
"""
lib_defs = os.path.abspath(lib_defs)
if not os.path.exists(lib_defs):
raise Exception("design library definition file %s not found" % lib_defs)
self.lib_defs = lib_defs
self.libraries = {}
for line in readlines_iter(lib_defs):
line = line.strip()
# ignore comments and empty lines
if line and not line.startswith('#'):
lib_name, lib_path = line.split()
lib_path = os.path.abspath(os.path.expandvars(lib_path))
check_path = os.path.join(lib_path, lib_name)
if not os.path.exists(check_path):
raise Exception('Library %s not found.' % check_path)
# make sure every library is on python path, so we can import it.
if lib_path not in sys.path:
sys.path.append(lib_path)
self.libraries[lib_name] = lib_path
def append_library(self, lib_name, lib_path):
"""Adds a new library to the library definition file.
Parameters
----------
lib_name : str
name of the library.
lib_path : str
path to this library.
"""
if lib_name not in self.libraries:
lib_path = os.path.abspath(lib_path)
self.libraries[lib_name] = lib_path
write_file(self.lib_defs, '%s %s\n' % (lib_name, lib_path), append=True)
def get_library_path(self, lib_name):
"""Returns the location of the given library.
Parameters
----------
lib_name : str
the library name.
Returns
-------
lib_path : str or None
the location of the library, or None if library not defined.
"""
return self.libraries.get(lib_name, None)
def get_class(self, lib_name, cell_name):
"""Returns the Python class with the given library and cell name.
Parameters
----------
lib_name : str
design module library name.
cell_name : str
design module cell name
Returns
-------
cls : class
the corresponding Python class.
"""
if lib_name not in self.libraries:
raise Exception("Library %s not listed in definition "
"file %s" % (lib_name, self.lib_defs))
module_name = '%s.%s' % (lib_name, cell_name)
module_cls = '%s__%s' % (lib_name, cell_name)
lib_package = importlib.import_module(lib_name)
cell_package = importlib.import_module(module_name, package=lib_package)
return getattr(cell_package, module_cls)
class DesignMaster(abc.ABC):
"""A design master instance.
This class represents a design master in the design database.
Parameters
----------
master_db : MasterDB
the master database.
lib_name : str
the generated instance library name.
params : Dict[str, Any]
the parameters dictionary.
used_names : Set[str]
a set of already used cell names.
**kwargs :
optional parameters.
"""
def __init__(self, master_db, lib_name, params, used_names, **kwargs):
# type: (MasterDB, str, Dict[str, Any], Set[str], **Any) -> None
self._master_db = master_db
self._lib_name = lib_name
self._used_names = used_names
# set parameters
params_info = self.get_params_info()
default_params = self.get_default_param_values()
self._cell_name = "" # type: str
self.params = {} # type: Dict[str, Any]
if params_info is None:
# compatibility with old schematics generators
self.params.update(params)
self._prelim_key = self.to_immutable_id((self._get_qualified_name(), params))
self._key = None
else:
self.populate_params(params, params_info, default_params, **kwargs)
# get unique cell name
self._prelim_key = self.compute_unique_key()
self.update_master_info()
self.children = None
self._finalized = False
def update_master_info(self):
self._cell_name = _get_unique_name(self.get_master_basename(), self._used_names)
self._key = self.compute_unique_key()
def populate_params(self, table, params_info, default_params, **kwargs):
# type: (Dict[str, Any], Dict[str, str], Dict[str, Any], **Any) -> None
"""Fill params dictionary with values from table and default_params"""
for key, desc in params_info.items():
if key not in table:
if key not in default_params:
raise ValueError('Parameter %s not specified. Description:\n%s' % (key, desc))
else:
self.params[key] = default_params[key]
else:
self.params[key] = table[key]
# add hidden parameters
hidden_params = kwargs.get('hidden_params', {})
for name, value in hidden_params.items():
self.params[name] = table.get(name, value)
@classmethod
def to_immutable_id(cls, val):
# type: (Any) -> Any
"""Convert the given object to an immutable type for use as keys in dictionary.
"""
# python 2/3 compatibility: convert raw bytes to string
val = fix_string(val)
if val is None or isinstance(val, numbers.Number) or isinstance(val, str):
return val
elif isinstance(val, list) or isinstance(val, tuple):
return tuple((cls.to_immutable_id(item) for item in val))
elif isinstance(val, dict):
return tuple(((k, cls.to_immutable_id(val[k])) for k in sorted(val.keys())))
elif isinstance(val, set):
return tuple((k for k in sorted(val)))
elif hasattr(val, 'get_immutable_key') and callable(val.get_immutable_key):
return val.get_immutable_key()
else:
raise Exception('Unrecognized value %s with type %s' % (str(val), type(val)))
@classmethod
@abc.abstractmethod
def get_params_info(cls):
# type: () -> Optional[Dict[str, str]]
"""Returns a dictionary from parameter names to descriptions.
Returns
-------
param_info : Optional[Dict[str, str]]
dictionary from parameter names to descriptions.
"""
return None
@classmethod
def get_default_param_values(cls):
# type: () -> Dict[str, Any]
"""Returns a dictionary containing default parameter values.
Override this method to define default parameter values. As good practice,
you should avoid defining default values for technology-dependent parameters
(such as channel length, transistor width, etc.), but only define default
values for technology-independent parameters (such as number of tracks).
Returns
-------
default_params : Dict[str, Any]
dictionary of default parameter values.
"""
return {}
@abc.abstractmethod
def get_master_basename(self):
# type: () -> str
"""Returns the base name to use for this instance.
Returns
-------
basename : str
the base name for this instance.
"""
return ''
@abc.abstractmethod
def get_content(self, lib_name, rename_fun):
# type: (str, Callable[[str], str]) -> Any
"""Returns the content of this master instance.
Parameters
----------
lib_name : str
the library to create the design masters in.
rename_fun : Callable[[str], str]
a function that renames design masters.
Returns
-------
content : Any
the master content data structure.
"""
return None
@property
def master_db(self):
# type: () -> MasterDB
"""Returns the database used to create design masters."""
return self._master_db
@property
def lib_name(self):
# type: () -> str
"""The master library name"""
return self._lib_name
@property
def cell_name(self):
# type: () -> str
"""The master cell name"""
return self._cell_name
@property
def key(self):
# type: () -> Optional[Any]
"""A unique key representing this master."""
return self._key
@property
def finalized(self):
# type: () -> bool
"""Returns True if this DesignMaster is finalized."""
return self._finalized
@property
def prelim_key(self):
# type: () -> Any
"""Returns a preliminary unique key. For compatibility with old schematic generators."""
return self._prelim_key
def _get_qualified_name(self):
# type: () -> str
"""Returns the qualified name of this class."""
my_module = self.__class__.__module__
if my_module is None or my_module == str.__class__.__module__:
return self.__class__.__name__
else:
return my_module + '.' + self.__class__.__name__
def finalize(self):
# type: () -> None
"""Finalize this master instance.
"""
self._finalized = True
def compute_unique_key(self):
# type: () -> Any
"""Returns a unique hashable object (usually tuple or string) that represents this instance.
Returns
-------
unique_id : Any
a hashable unique ID representing the given parameters.
"""
return self.to_immutable_id((self._get_qualified_name(), self.params))
MasterType = TypeVar('MasterType', bound=DesignMaster)
class MasterDB(abc.ABC):
"""A database of existing design masters.
This class keeps track of existing design masters and maintain design dependency hierarchy.
Parameters
----------
lib_name : str
the cadence library to put all generated templates in.
lib_defs : str
generator library definition file path. If empty, then assume user supplies
Python class directly.
name_prefix : str
generated master name prefix.
name_suffix : str
generated master name suffix.
"""
def __init__(self, lib_name, lib_defs='', name_prefix='', name_suffix=''):
# type: (str, str, str, str) -> None
self._lib_name = lib_name
self._name_prefix = name_prefix
self._name_suffix = name_suffix
self._used_cell_names = set() # type: Set[str]
self._importer = ClassImporter(lib_defs) if os.path.isfile(lib_defs) else None
self._key_lookup = {} # type: Dict[Any, Any]
self._master_lookup = {} # type: Dict[Any, DesignMaster]
self._rename_dict = {} # type: Dict[str, str]
def clear(self):
"""Clear all existing schematic masters."""
self._key_lookup.clear()
self._master_lookup.clear()
self._rename_dict.clear()
@abc.abstractmethod
def create_master_instance(self, gen_cls, lib_name, params, used_cell_names, **kwargs):
# type: (Type[MasterType], str, Dict[str, Any], Set[str], **Any) -> MasterType
"""Create a new non-finalized master instance.
This instance is used to determine if we created this instance before.
Parameters
----------
gen_cls : Type[MasterType]
the generator Python class.
lib_name : str
generated instance library name.
params : Dict[str, Any]
instance parameters dictionary.
used_cell_names : Set[str]
a set of all used cell names.
**kwargs : Any
optional arguments for the generator.
Returns
-------
master : MasterType
the non-finalized generated instance.
"""
raise NotImplementedError('not implemented')
@abc.abstractmethod
def create_masters_in_db(self, lib_name, content_list, debug=False):
# type: (str, Sequence[Any], bool) -> None
"""Create the masters in the design database.
Parameters
----------
lib_name : str
library to create the designs in.
content_list : Sequence[Any]
a list of the master contents. Must be created in this order.
debug : bool
True to print debug messages
"""
pass
@property
def lib_name(self):
# type: () -> str
"""Returns the master library name."""
return self._lib_name
@property
def cell_prefix(self):
# type: () -> str
"""Returns the cell name prefix."""
return self._name_prefix
@cell_prefix.setter
def cell_prefix(self, new_val):
# type: (str) -> None
"""Change the cell name prefix."""
self._name_prefix = new_val
@property
def cell_suffix(self):
# type: () -> str
"""Returns the cell name suffix."""
return self._name_suffix
@cell_suffix.setter
def cell_suffix(self, new_val):
# type: (str) -> None
"""Change the cell name suffix."""
self._name_suffix = new_val
@property
def used_cell_names(self):
# type: () -> Set[str]
return self._used_cell_names
def format_cell_name(self, cell_name):
# type: (str) -> str
"""Returns the formatted cell name.
Parameters
----------
cell_name : str
the original cell name.
Returns
-------
final_name : str
the new cell name.
"""
cell_name = self._rename_dict.get(cell_name, cell_name)
return '%s%s%s' % (self._name_prefix, cell_name, self._name_suffix)
def append_library(self, lib_name, lib_path):
# type: (str, str) -> None
"""Adds a new library to the library definition file.
Parameters
----------
lib_name : str
name of the library.
lib_path : str
path to this library.
"""
if self._importer is None:
raise ValueError('Cannot add generator library; library definition file not specified.')
self._importer.append_library(lib_name, lib_path)
def get_library_path(self, lib_name):
# type: (str) -> Optional[str]
"""Returns the location of the given library.
Parameters
----------
lib_name : str
the library name.
Returns
-------
lib_path : Optional[str]
the location of the library, or None if library not defined.
"""
if self._importer is None:
raise ValueError('Cannot get generator library path; '
'library definition file not specified.')
return self._importer.get_library_path(lib_name)
def get_generator_class(self, lib_name, cell_name):
# type: (str, str) -> Any
"""Returns the corresponding generator Python class.
Parameters
----------
lib_name : str
template library name.
cell_name : str
generator cell name
Returns
-------
temp_cls : Any
the corresponding Python class.
"""
if self._importer is None:
raise ValueError('Cannot get generator class; library definition file not specified.')
return self._importer.get_class(lib_name, cell_name)
def new_master(self, # type: MasterDB
lib_name='', # type: str
cell_name='', # type: str
params=None, # type: Optional[Dict[str, Any]]
gen_cls=None, # type: Optional[Type[MasterType]]
debug=False, # type: bool
**kwargs):
# type: (...) -> MasterType
"""Create a generator instance.
Parameters
----------
lib_name : str
generator library name.
cell_name : str
generator name
params : Optional[Dict[str, Any]]
the parameter dictionary.
gen_cls : Optional[Type[MasterType]]
the generator class to instantiate. Overrides lib_name and cell_name.
debug : bool
True to print debug messages.
**kwargs :
optional arguments for generator.
Returns
-------
master : MasterType
the generator instance.
"""
if params is None:
params = {}
if gen_cls is None:
gen_cls = self.get_generator_class(lib_name, cell_name)
master = self.create_master_instance(gen_cls, self._lib_name, params,
self._used_cell_names, **kwargs)
key = master.key
if key is None:
prelim_key = master.prelim_key
if prelim_key in self._key_lookup:
key = self._key_lookup[prelim_key]
master = self._master_lookup[key]
if debug:
print('master cached')
else:
if debug:
print('finalizing master')
# In case master.finalize has a generates a child with the same cell name, add master name now
self._used_cell_names.add(master.cell_name)
start = time.time()
master.finalize()
end = time.time()
key = master.key
self._key_lookup[prelim_key] = key
if key in self._master_lookup:
master = self._master_lookup[key]
self._used_cell_names.add(master.cell_name)
else:
self.register_master(key, master)
if debug:
print('finalizing master took %.4g seconds' % (end - start))
else:
if key in self._master_lookup:
master = self._master_lookup[key]
if debug:
print('master cached')
else:
if debug:
print('finalizing master')
# In case master.finalize has a generates a child with the same cell name, add master name now
self._used_cell_names.add(master.cell_name)
start = time.time()
master.finalize()
end = time.time()
self.register_master(key, master)
if debug:
print('finalizing master took %.4g seconds' % (end - start))
return master
def register_master(self, key, master):
self._master_lookup[key] = master
self._used_cell_names.add(master.cell_name)
def instantiate_masters(self,
master_list, # type: Sequence[DesignMaster]
name_list=None, # type: Optional[Sequence[Optional[str]]]
lib_name='', # type: str
debug=False, # type: bool
rename_dict=None, # type: Optional[Dict[str, str]]
):
# type: (...) -> None
"""create all given masters in the database.
Parameters
----------
master_list : Sequence[DesignMaster]
list of masters to instantiate.
name_list : Optional[Sequence[Optional[str]]]
list of master cell names. If not given, default names will be used.
lib_name : str
Library to create the masters in. If empty or None, use default library.
debug : bool
True to print debugging messages
rename_dict : Optional[Dict[str, str]]
optional master cell renaming dictionary.
"""
if name_list is None:
name_list = [None] * len(master_list) # type: Sequence[Optional[str]]
else:
if len(name_list) != len(master_list):
raise ValueError("Master list and name list length mismatch.")
# configure renaming dictionary. Verify that renaming dictionary is one-to-one.
rename = self._rename_dict
rename.clear()
reverse_rename = {} # type: Dict[str, str]
if rename_dict:
for key, val in rename_dict.items():
if key != val:
if val in reverse_rename:
raise ValueError('Both %s and %s are renamed '
'to %s' % (key, reverse_rename[val], val))
rename[key] = val
reverse_rename[val] = key
for master, name in zip(master_list, name_list):
if name is not None and name != master.cell_name:
cur_name = master.cell_name
if name in reverse_rename:
raise ValueError('Both %s and %s are renamed '
'to %s' % (cur_name, reverse_rename[name], name))
rename[cur_name] = name
reverse_rename[name] = cur_name
if name in self._used_cell_names:
# name is an already used name, so we need to rename it to something else
name2 = _get_unique_name(name, self._used_cell_names, reverse_rename)
rename[name] = name2
reverse_rename[name2] = name
if debug:
print('Retrieving master contents')
# use ordered dict so that children are created before parents.
info_dict = OrderedDict() # type: Dict[str, DesignMaster]
start = time.time()
for master, top_name in zip(master_list, name_list):
self._instantiate_master_helper(info_dict, master)
end = time.time()
if not lib_name:
lib_name = self.lib_name
if not lib_name:
raise ValueError('master library name is not specified.')
content_list = [master.get_content(lib_name, self.format_cell_name)
for master in info_dict.values()]
if debug:
print('master content retrieval took %.4g seconds' % (end - start))
self.create_masters_in_db(lib_name, content_list, debug=debug)
def _instantiate_master_helper(self, info_dict, master):
# type: (Dict[str, DesignMaster], DesignMaster) -> None
"""Helper method for batch_layout().
Parameters
----------
info_dict : Dict[str, DesignMaster]
dictionary from existing master cell name to master objects.
master : DesignMaster
the master object to create.
"""
# get template master for all children
for master_key in master.children:
child_temp = self._master_lookup[master_key]
if child_temp.cell_name not in info_dict:
self._instantiate_master_helper(info_dict, child_temp)
# get template master for this cell.
info_dict[master.cell_name] = self._master_lookup[master.key]
================================================
FILE: bag/util/immutable.py
================================================
"""This module defines various immutable and hashable data types.
"""
from __future__ import annotations
from typing import TypeVar, Any, Generic, Dict, Iterable, Tuple, Union, Optional, overload
import sys
import bisect
from collections import Hashable, Mapping, Sequence
T = TypeVar('T')
U = TypeVar('U')
ImmutableType = Union[None, Hashable, Tuple[Hashable, ...]]
def combine_hash(a: int, b: int) -> int:
"""Combine the two given hash values.
Parameter
---------
a : int
the first hash value.
b : int
the second hash value.
Returns
-------
hash : int
the combined hash value.
"""
# algorithm taken from boost::hash_combine
return sys.maxsize & (a ^ (b + 0x9e3779b9 + (a << 6) + (a >> 2)))
class ImmutableList(Hashable, Sequence, Generic[T]):
"""An immutable homogeneous list."""
def __init__(self, values: Optional[Sequence[T]] = None) -> None:
if values is None:
self._content = []
self._hash = 0
elif isinstance(values, ImmutableList):
self._content = values._content
self._hash = values._hash
else:
self._content = values
self._hash = 0
for v in values:
self._hash = combine_hash(self._hash, hash(v))
@classmethod
def sequence_equal(cls, a: Sequence[T], b: Sequence[T]) -> bool:
if len(a) != len(b):
return False
for av, bv in zip(a, b):
if av != bv:
return False
return True
def __repr__(self) -> str:
return repr(self._content)
def __eq__(self, other: Any) -> bool:
return (isinstance(other, ImmutableList) and self._hash == other._hash and
self.sequence_equal(self._content, other._content))
def __hash__(self) -> int:
return self._hash
def __bool__(self) -> bool:
return len(self) > 0
def __len__(self) -> int:
return len(self._content)
def __iter__(self) -> Iterable[T]:
return iter(self._content)
@overload
def __getitem__(self, idx: int) -> T: ...
@overload
def __getitem__(self, idx: slice) -> ImmutableList[T]: ...
def __getitem__(self, idx) -> T:
if isinstance(idx, int):
return self._content[idx]
return ImmutableList(self._content[idx])
def __contains__(self, val: Any) -> bool:
return val in self._content
class ImmutableSortedDict(Hashable, Mapping, Generic[T, U]):
"""An immutable dictionary with sorted keys."""
def __init__(self,
table: Optional[Mapping[T, Any]] = None) -> None:
if table is not None:
if isinstance(table, ImmutableSortedDict):
self._keys = table._keys
self._vals = table._vals
self._hash = table._hash
else:
self._keys = ImmutableList(sorted(table.keys()))
self._vals = ImmutableList([to_immutable(table[k]) for k in self._keys])
self._hash = combine_hash(hash(self._keys), hash(self._vals))
else:
self._keys = ImmutableList([])
self._vals = ImmutableList([])
self._hash = combine_hash(hash(self._keys), hash(self._vals))
def __repr__(self) -> str:
return repr(list(zip(self._keys, self._vals)))
def __eq__(self, other: Any) -> bool:
return (isinstance(other, ImmutableSortedDict) and
self._hash == other._hash and
self._keys == other._keys and
self._vals == other._vals)
def __hash__(self) -> int:
return self._hash
def __bool__(self) -> bool:
return len(self) > 0
def __len__(self) -> int:
return len(self._keys)
def __iter__(self) -> Iterable[T]:
return iter(self._keys)
def __contains__(self, item: Any) -> bool:
idx = bisect.bisect_left(self._keys, item)
return idx != len(self._keys) and self._keys[idx] == item
def __getitem__(self, item: T) -> U:
idx = bisect.bisect_left(self._keys, item)
if idx == len(self._keys) or self._keys[idx] != item:
raise KeyError('Key not found: {}'.format(item))
return self._vals[idx]
def get(self, item: T, default: Optional[U] = None) -> Optional[U]:
idx = bisect.bisect_left(self._keys, item)
if idx == len(self._keys) or self._keys[idx] != item:
return default
return self._vals[idx]
def keys(self) -> Iterable[T]:
return iter(self._keys)
def values(self) -> Iterable[U]:
return iter(self._vals)
def items(self) -> Iterable[Tuple[T, U]]:
return zip(self._keys, self._vals)
def copy(self, append: Optional[Dict[T, Any]] = None) -> ImmutableSortedDict[T, U]:
if append is None:
return self.__class__(self)
else:
tmp = self.to_dict()
tmp.update(append)
return self.__class__(tmp)
def to_dict(self) -> Dict[T, U]:
return dict(zip(self._keys, self._vals))
Param = ImmutableSortedDict[str, Any]
def to_immutable(obj: Any) -> ImmutableType:
"""Convert the given Python object into an immutable type."""
if obj is None:
return obj
if isinstance(obj, Hashable):
# gets around cases of tuple of un-hashable types.
try:
hash(obj)
return obj
except TypeError:
pass
if isinstance(obj, tuple):
return tuple((to_immutable(v) for v in obj))
if isinstance(obj, list):
return ImmutableList([to_immutable(v) for v in obj])
if isinstance(obj, set):
return ImmutableList([to_immutable(v) for v in sorted(obj)])
if isinstance(obj, dict):
return ImmutableSortedDict(obj)
raise ValueError('Cannot convert the following object to immutable type: {}'.format(obj))
================================================
FILE: bag/util/interval.py
================================================
# -*- coding: utf-8 -*-
"""This module provides data structure that keeps track of intervals.
"""
from typing import List, Optional, Tuple, Any, Iterable, Generator
import bisect
class IntervalSet(object):
"""A data structure that keeps track of disjoint 1D integer intervals.
Each interval has a value associated with it. If not specified, the value defaults to None.
Parameters
----------
intv_list : Optional[Iterable[Tuple[int, int]]]
the sorted initial interval list.
val_list : Optional[Iterable[Any]]
the initial values list.
"""
def __init__(self, intv_list=None, val_list=None):
# type: (Optional[Iterable[Tuple[int, int]]], Optional[Iterable[Any]]) -> None
self._start_list = [] # type: List[int]
self._end_list = [] # type: List[int]
if intv_list is None:
self._val_list = [] # type: List[Any]
else:
for v0, v1 in intv_list:
self._start_list.append(v0)
self._end_list.append(v1)
if val_list is None:
self._val_list = [None] * len(self._start_list)
else:
self._val_list = list(val_list)
def __contains__(self, key):
# type: (Tuple[int, int]) -> bool
"""Returns True if this IntervalSet contains the given interval.
Parameters
----------
key : Tuple[int, int]
the interval to test.
Returns
-------
contains : bool
True if this IntervalSet contains the given interval.
"""
idx = self._get_first_overlap_idx(key)
return idx >= 0 and key[0] == self._start_list[idx] and key[1] == self._end_list[idx]
def __getitem__(self, intv):
# type: (Tuple[int, int]) -> Any
"""Returns the value associated with the given interval.
Raises KeyError if the given interval is not in this IntervalSet.
Parameters
----------
intv : Tuple[int, int]
the interval to query.
Returns
-------
val : Any
the value associated with the given interval.
"""
idx = self._get_first_overlap_idx(intv)
if idx < 0 or intv[0] != self._start_list[idx] or intv[1] != self._end_list[idx]:
raise KeyError('Invalid interval: %s' % repr(intv))
return self._val_list[idx]
def __setitem__(self, intv, value):
# type: (Tuple[int, int], Any) -> None
"""Update the value associated with the given interval.
Raises KeyError if the given interval is not in this IntervalSet.
Parameters
----------
intv : Tuple[int, int]
the interval to update.
value : Any
the new value.
"""
idx = self._get_first_overlap_idx(intv)
if idx < 0:
self.add(intv, value)
elif intv[0] != self._start_list[idx] or intv[1] != self._end_list[idx]:
raise KeyError('Invalid interval: %s' % repr(intv))
else:
self._val_list[idx] = value
def __iter__(self):
# type: () -> Iterable[Tuple[int, int]]
"""Iterates over intervals in this IntervalSet in increasing order.
Yields
------
intv : Tuple[int, int]
the next interval.
"""
return zip(self._start_list, self._end_list)
def __len__(self):
# type: () -> int
"""Returns the number of intervals in this IntervalSet.
Returns
-------
length : int
number of intervals in this set.
"""
return len(self._start_list)
def get_start(self):
# type: () -> int
"""Returns the start of the first interval.
Returns
-------
start : int
the start of the first interval.
"""
return self._start_list[0]
def get_end(self):
# type: () -> int
"""Returns the end of the last interval.
Returns
-------
end : int
the end of the last interval.
"""
return self._end_list[-1]
def get_interval(self, idx):
# type: (int) -> Tuple[int, int]
if idx < 0:
idx += len(self._start_list)
if idx < 0:
raise IndexError('Invalid index: %d' % idx)
if idx >= len(self._start_list):
raise IndexError('Invalid index: %d' % idx)
return self._start_list[idx], self._end_list[idx]
def copy(self):
# type: () -> IntervalSet
"""Create a copy of this interval set.
Returns
-------
intv_set : IntervalSet
a copy of this IntervalSet.
"""
return IntervalSet(intv_list=list(zip(self._start_list, self._end_list)),
val_list=self._val_list)
def _get_first_overlap_idx(self, intv, abut=False):
# type: (Tuple[int, int], bool) -> int
"""Returns the index of the first interval that overlaps with the given interval.
Parameters
----------
intv : Tuple[int, int]
the given interval.
abut : bool
True to return abutted interval too.
Returns
-------
idx : int
the index of the overlapping interval. If no overlapping intervals are
found, -(idx + 1) is returned, where idx is the index to insert the interval.
"""
start, end = intv
if not self._start_list:
return -1
# find the smallest start index greater than start
idx = bisect.bisect_right(self._start_list, start)
if idx == 0:
# all interval's starting point is greater than start
test = self._start_list[0]
return 0 if test < end or (abut and test == end) else -1
# interval where start index is less than or equal to start
test_idx = idx - 1
test = self._end_list[test_idx]
if start < test or (abut and start == test):
# start is covered by the interval; overlaps.
return test_idx
elif idx < len(self._start_list) and \
(self._start_list[idx] < end or (abut and self._start_list[idx] == end)):
# _start_list[idx] covered by interval.
return idx
else:
# no overlap interval found
return -(idx + 1)
def _get_last_overlap_idx(self, intv, abut=False):
# type: (Tuple[int, int], bool) -> int
"""Returns the index of the last interval that overlaps with the given interval.
Parameters
----------
intv : Tuple[int, int]
the given interval.
abut : bool
True to return abutted interval too.
Returns
-------
idx : int
the index of the overlapping interval. If no overlapping intervals are
found, -(idx + 1) is returned, where idx is the index to insert the interval.
"""
start, end = intv
if not self._start_list:
return -1
# find the smallest start index greater than end
idx = bisect.bisect_right(self._start_list, end)
if idx == 0:
# all interval's starting point is greater than end
return -1
# interval where start index is less than or equal to end
test_idx = idx - 1
test = self._end_list[test_idx]
if test > start or (abut and test == start):
return test_idx
return -(idx + 1)
def has_overlap(self, intv):
# type: (Tuple[int, int]) -> bool
"""Returns True if the given interval overlaps at least one interval in this set.
Parameters
----------
intv : Tuple[int, int]
the given interval.
Returns
-------
has_overlap : bool
True if there is at least one interval in this set that overlaps with the given one.
"""
return self._get_first_overlap_idx(intv) >= 0
def has_single_cover(self, intv):
# type: (Tuple[int, int]) -> bool
"""Returns True if the given interval is completed covered by a single interval."""
idx = self._get_first_overlap_idx(intv)
if idx < 0:
return False
return self._start_list[idx] <= intv[0] and self._end_list[idx] >= intv[1]
def remove(self, intv):
# type: (Tuple[int, int]) -> bool
"""Removes the given interval from this IntervalSet.
Parameters
----------
intv : Tuple[int, int]
the interval to remove.
Returns
-------
success : bool
True if the given interval is found and removed. False otherwise.
"""
idx = self._get_first_overlap_idx(intv)
if idx < 0:
return False
if intv[0] == self._start_list[idx] and intv[1] == self._end_list[idx]:
del self._start_list[idx]
del self._end_list[idx]
del self._val_list[idx]
return True
return False
def get_intersection(self, other):
# type: (IntervalSet) -> IntervalSet
"""Returns the intersection of two IntervalSets.
the new IntervalSet will have all values set to None.
Parameters
----------
other : IntervalSet
the other IntervalSet.
Returns
-------
intersection : IntervalSet
a new IntervalSet containing all intervals present in both sets.
"""
idx1 = idx2 = 0
len1 = len(self._start_list)
len2 = len(other._start_list)
intvs = []
while idx1 < len1 and idx2 < len2:
intv1 = self._start_list[idx1], self._end_list[idx1]
intv2 = other._start_list[idx2], other._end_list[idx2]
test = max(intv1[0], intv2[0]), min(intv1[1], intv2[1])
if test[1] > test[0]:
intvs.append(test)
if intv1[1] < intv2[1]:
idx1 += 1
elif intv2[1] < intv1[1]:
idx2 += 1
else:
idx1 += 1
idx2 += 1
return IntervalSet(intv_list=intvs)
def get_complement(self, total_intv):
# type: (Tuple[int, int]) -> IntervalSet
"""Returns a new IntervalSet that's the complement of this one.
The new IntervalSet will have all values set to None.
Parameters
----------
total_intv : Tuple[int, int]
the universal interval. All intervals in this IntervalSet must be as subinterval
of the universal interval.
Returns
-------
complement : IntervalSet
the complement of this IntervalSet.
"""
return IntervalSet(intv_list=self.complement_iter(total_intv))
def complement_iter(self, total_intv):
# type: (Tuple[int, int]) -> Generator[Tuple[int, int], None, None]
"""Iterate over all intervals that;s the complement of this one."""
if not self._start_list:
yield total_intv
elif self._start_list[0] < total_intv[0] or total_intv[1] < self._end_list[-1]:
raise ValueError('The given interval [{0}, {1}) is '
'not a valid universal interval'.format(*total_intv))
else:
marker = total_intv[0]
for start, end in zip(self._start_list, self._end_list):
if marker < start:
yield marker, start
marker = end
if marker < total_intv[1]:
yield marker, total_intv[1]
def remove_all_overlaps(self, intv):
# type: (Tuple[int, int]) -> None
"""Remove all intervals in this set that overlaps with the given interval.
Parameters
----------
intv : Tuple[int, int]
the given interval
"""
sidx = self._get_first_overlap_idx(intv)
if sidx >= 0:
eidx = self._get_last_overlap_idx(intv) + 1
del self._start_list[sidx:eidx]
del self._end_list[sidx:eidx]
del self._val_list[sidx:eidx]
def add(self, intv, val=None, merge=False, abut=False):
# type: (Tuple[int, int], Any, bool, bool) -> bool
"""Adds the given interval to this IntervalSet.
Can only add interval that does not overlap with any existing ones, unless merge is True.
Parameters
----------
intv : Tuple[int, int]
the interval to add.
val : Any
the value associated with the given interval.
merge : bool
If true, the given interval will be merged with any existing intervals
that overlaps with it. The merged interval will have the given value.
abut : bool
True to count merge abutting intervals.
Returns
-------
success : bool
True if the given interval is added.
"""
abut = abut and merge
bidx = self._get_first_overlap_idx(intv, abut=abut)
if bidx >= 0:
if not merge:
return False
eidx = self._get_last_overlap_idx(intv, abut=abut)
new_start = min(self._start_list[bidx], intv[0])
new_end = max(self._end_list[eidx], intv[1])
del self._start_list[bidx:eidx + 1]
del self._end_list[bidx:eidx + 1]
del self._val_list[bidx:eidx + 1]
self._start_list.insert(bidx, new_start)
self._end_list.insert(bidx, new_end)
self._val_list.insert(bidx, val)
return True
else:
# insert interval
idx = -bidx - 1
self._start_list.insert(idx, intv[0])
self._end_list.insert(idx, intv[1])
self._val_list.insert(idx, val)
return True
def subtract(self, intv):
# type: (Tuple[int, int]) -> List[Tuple[int, int]]
"""Subtract the given interval from this IntervalSet.
Parameters
----------
intv : Tuple[int, int]
the interval to subtract.
Returns
-------
remaining_intvs : List[Tuple[int, int]]
intervals created from subtraction.
"""
bidx = self._get_first_overlap_idx(intv)
insert_intv = []
if bidx >= 0:
eidx = self._get_last_overlap_idx(intv)
insert_val = []
if self._start_list[bidx] < intv[0]:
insert_intv.append((self._start_list[bidx], intv[0]))
insert_val.append(self._val_list[bidx])
if intv[1] < self._end_list[eidx]:
insert_intv.append((intv[1], self._end_list[eidx]))
insert_val.append(self._val_list[eidx])
del self._start_list[bidx:eidx + 1]
del self._end_list[bidx:eidx + 1]
del self._val_list[bidx:eidx + 1]
insert_idx = bidx
for (new_start, new_end), val in zip(insert_intv, insert_val):
self._start_list.insert(insert_idx, new_start)
self._end_list.insert(insert_idx, new_end)
self._val_list.insert(insert_idx, val)
insert_idx += 1
return insert_intv
def items(self):
# type: () -> Iterable[Tuple[Tuple[int, int], Any]]
"""Iterates over intervals and values in this IntervalSet
The intervals are returned in increasing order.
Yields
------
intv : Tuple[Tuple[int, int]
the interval.
val : Any
the value associated with the interval.
"""
return zip(self.__iter__(), self._val_list)
def intervals(self):
# type: () -> Iterable[Tuple[int, int]]
"""Iterates over intervals in this IntervalSet
The intervals are returned in increasing order.
Yields
------
intv : Tuple[int, int]
the interval.
"""
return self.__iter__()
def values(self):
# type: () -> Iterable[Any]
"""Iterates over values in this IntervalSet
The values correspond to intervals in increasing order.
Yields
------
val : Any
the value.
"""
return self._val_list.__iter__()
def overlap_items(self, intv):
# type: (Tuple[int, int]) -> Generator[Tuple[Tuple[int, int], Any], None, None]
"""Iterates over intervals and values overlapping the given interval.
Parameters
----------
intv : Tuple[int, int]
the interval.
Yields
-------
ovl_intv : Tuple[int, int]
the overlapping interval.
val : Any
value associated with ovl_intv.
"""
sidx = self._get_first_overlap_idx(intv)
if sidx >= 0:
eidx = self._get_last_overlap_idx(intv) + 1
for idx in range(sidx, eidx):
yield (self._start_list[idx], self._end_list[idx]), self._val_list[idx]
def overlap_intervals(self, intv):
# type: (Tuple[int, int]) -> Generator[Tuple[int, int], None, None]
"""Iterates over intervals overlapping the given interval.
Parameters
----------
intv : Tuple[int, int]
the interval.
Yields
-------
ovl_intv : Tuple[int, int]
the overlapping interval.
"""
sidx = self._get_first_overlap_idx(intv)
if sidx >= 0:
eidx = self._get_last_overlap_idx(intv) + 1
for idx in range(sidx, eidx):
yield self._start_list[idx], self._end_list[idx]
def overlap_values(self, intv):
# type: (Tuple[int, int]) -> Generator[Any, None, None]
"""Iterates over values of intervals overlapping the given interval.
Parameters
----------
intv : Tuple[int, int]
the interval.
Yields
-------
ovl_intv : Tuple[int, int]
the overlapping interval.
"""
sidx = self._get_first_overlap_idx(intv)
if sidx >= 0:
eidx = self._get_last_overlap_idx(intv) + 1
for idx in range(sidx, eidx):
yield self._val_list[idx]
def get_first_overlap_item(self, intv):
# type: (Tuple[int, int]) -> Optional[Tuple[Tuple[int, int], Any]]
"""Returns the first item with interval that overlaps the given one."""
idx = self._get_first_overlap_idx(intv)
if idx < 0:
return None
return (self._start_list[idx], self._end_list[idx]), self._val_list[idx]
def transform(self, scale=1, shift=0):
# type: (int, int) -> IntervalSet
"""Return a new IntervalSet under the given transformation.
Parameters
----------
scale : int
multiple all interval values by this scale. Either 1 or -1.
shift : int
add this amount to all intervals.
Returns
-------
intv_set : IntervalSet
the transformed IntervalSet.
"""
if scale < 0:
new_start = [-v + shift for v in reversed(self._end_list)]
new_end = [-v + shift for v in reversed(self._start_list)]
new_val = list(reversed(self._val_list))
else:
new_start = [v + shift for v in self._start_list]
new_end = [v + shift for v in self._end_list]
new_val = list(self._val_list)
result = self.__class__.__new__(self.__class__)
result._start_list = new_start
result._end_list = new_end
result._val_list = new_val
return result
================================================
FILE: bag/util/parse.py
================================================
# -*- coding: utf-8 -*-
"""This module defines parsing utility methods.
"""
import ast
class ExprVarScanner(ast.NodeVisitor):
"""
This node visitor collects all variable names found in the
AST, and excludes names of functions. Variables having
dotted names are not supported.
"""
def __init__(self):
self.varnames = set()
# noinspection PyPep8Naming
def visit_Name(self, node):
self.varnames.add(node.id)
# noinspection PyPep8Naming
def visit_Call(self, node):
if not isinstance(node.func, ast.Name):
self.visit(node.func)
for arg in node.args:
self.visit(arg)
# noinspection PyPep8Naming
def visit_Attribute(self, node):
# ignore attributes
pass
def get_variables(expr):
"""Parses the given Python expression and return a list of all variables.
Parameters
----------
expr : str
An expression string that we want to parse for variable names.
Returns
-------
var_list : list[str]
Names of variables from the given expression.
"""
root = ast.parse(expr, mode='exec')
scanner = ExprVarScanner()
scanner.visit(root)
return list(scanner.varnames)
================================================
FILE: bag/util/search.py
================================================
# -*- coding: utf-8 -*-
"""This module provides search related utilities.
"""
from typing import Optional, Callable, Any
from collections import namedtuple
MinCostResult = namedtuple('MinCostResult', ['x', 'xmax', 'vmax', 'nfev'])
class BinaryIterator(object):
"""A class that performs binary search over integers.
This class supports both bounded or unbounded binary search, and
you can also specify a step size.
Parameters
----------
low : int
the lower bound (inclusive).
high : Optional[int]
the upper bound (exclusive). None for unbounded binary search.
step : int
the step size. All return values will be low + N * step
"""
def __init__(self, low, high, step=1):
# type: (int, Optional[int], int) -> None
if not isinstance(low, int) or not isinstance(step, int):
raise ValueError('low and step must be integers.')
self._offset = low
self._step = step
self._high = None # type: Optional[int]
self._low = 0 # type: int
self._current = 0 # type: int
self._save_marker = None # type: Optional[int]
if high is not None:
if not isinstance(high, int):
raise ValueError('high must be None or integer.')
nmax = (high - low) // step
if low + step * nmax < high:
nmax += 1
self._high = nmax
self._current = (self._low + self._high) // 2
else:
self._high = None
self._current = self._low
self._save_marker = None
self._save_info = None
def set_current(self, val):
# type: (int) -> None
"""Set the value of the current marker."""
if (val - self._offset) % self._step != 0:
raise ValueError('value %d is not multiple of step size.' % val)
self._current = (val - self._offset) // self._step
def has_next(self):
# type: () -> bool
"""returns True if this iterator is not finished yet."""
return self._high is None or self._low < self._high
def get_next(self):
# type: () -> int
"""Returns the next value to look at."""
return self._current * self._step + self._offset
def up(self):
# type: () -> None
"""Increment this iterator."""
self._low = self._current + 1
if self._high is not None:
self._current = (self._low + self._high) // 2
else:
if self._current > 0:
self._current *= 2
else:
self._current = 1
def down(self):
# type: () -> None
"""Decrement this iterator."""
self._high = self._current
self._current = (self._low + self._high) // 2
def save(self):
# type: () -> None
"""Save the current index."""
self._save_marker = self._current
def save_info(self, info):
# type: (Any) -> None
"""Save current information."""
self.save()
self._save_info = info
def get_last_save(self):
# type: () -> Optional[int]
"""Returns the last saved index."""
if self._save_marker is None:
return None
return self._save_marker * self._step + self._offset
def get_last_save_info(self):
# type: () -> Any
"""Return last save information."""
return self._save_info
class FloatBinaryIterator(object):
"""A class that performs binary search over floating point numbers.
This class supports both bounded or unbounded binary search, and terminates
when we can guarantee the given error tolerance.
Parameters
----------
low : float
the lower bound.
high : Optional[float]
the upper bound. None for unbounded binary search.
tol : float
we will guarantee that the final solution will be within this
tolerance.
search_step : float
for unbounded binary search, this is the initial step size when
searching for upper bound.
"""
def __init__(self, low, high, tol=1.0, search_step=1.0):
# type: (float, Optional[float], float, float) -> None
self._offset = low
self._tol = tol
self._high = None # type: Optional[float]
self._low = 0.0 # type: float
self._search_step = search_step
self._save_marker = None # type: Optional[float]
if high is not None:
self._high = high - low
self._current = self._high / 2
else:
self._high = None
self._current = 0
self._save_marker = None
self._save_info = None
def has_next(self):
# type: () -> bool
"""returns True if this iterator is not finished yet."""
return self._high is None or self._low + 2 * self._tol < self._high
def get_next(self):
# type: () -> float
"""Returns the next value to look at."""
return self._current + self._offset
def up(self):
# type: () -> None
"""Increment this iterator."""
self._low = self._current
if self._high is not None:
self._current = (self._low + self._high) / 2
else:
if self._current != 0:
self._current *= 2
else:
self._current = self._search_step
def down(self):
# type: () -> None
"""Decrement this iterator."""
self._high = self._current
self._current = (self._low + self._high) / 2
def save(self):
# type: () -> None
"""Save the current index"""
self._save_marker = self._current
def save_info(self, info):
# type: (Any) -> None
"""Save current information."""
self.save()
self._save_info = info
def get_last_save(self):
# type: () -> Optional[float]
"""Returns the last saved index."""
if self._save_marker is None:
return None
return self._save_marker + self._offset
def get_last_save_info(self):
# type: () -> Any
"""Return last save information."""
return self._save_info
def minimize_cost_binary(f, vmin, start=0, stop=None, step=1, save=None, nfev=0):
# type: (Callable[[int], float], float, int, Optional[int], int, Optional[int], int) -> MinCostResult
"""Minimize cost given minimum output constraint using binary search.
Given discrete function f, find the minimum integer x such that f(x) >= vmin using binary search.
This algorithm only works if f is monotonically increasing, or if f monontonically increases
then monontonically decreases, but stop is given and f(stop) >= vmin.
Parameters
----------
f : Callable[[int], float]
a function that takes a single integer and output a scalar value. Must monotonically
increase then monotonically decrease.
vmin : float
the minimum output value.
start : int
the input lower bound.
stop : Optional[int]
the input upper bound. Use None for unbounded binary search.
step : int
the input step. function will only be evaulated at the points start + step * N
save : Optional[int]
If not none, this value will be returned if no solution is found.
nfev : int
number of function calls already made.
Returns
-------
result : MinCostResult
the MinCostResult named tuple, with attributes:
x : Optional[int]
the minimum integer such that f(x) >= vmin. If no such x exists, this will be None.
nfev : int
total number of function calls made.
"""
bin_iter = BinaryIterator(start, stop, step=step)
while bin_iter.has_next():
x_cur = bin_iter.get_next()
v_cur = f(x_cur)
nfev += 1
if v_cur >= vmin:
save = x_cur
bin_iter.down()
else:
bin_iter.up()
return MinCostResult(x=save, xmax=None, vmax=None, nfev=nfev)
def minimize_cost_golden(f, vmin, offset=0, step=1, maxiter=1000):
# type: (Callable[[int], float], float, int, int, Optional[int]) -> MinCostResult
"""Minimize cost given minimum output constraint using golden section/binary search.
Given discrete function f that monotonically increases then monotonically decreases,
find the minimum integer x such that f(x) >= vmin.
This method uses Fibonacci search to find the upper bound of x. If the upper bound
is found, a binary search is performed in the interval to find the solution. If
vmin is close to the maximum of f, a golden section search is performed to attempt
to find x.
Parameters
----------
f : Callable[[int], float]
a function that takes a single integer and output a scalar value. Must monotonically
increase then monotonically decrease.
vmin : float
the minimum output value.
offset : int
the input lower bound. We will for x in the range [offset, infinity).
step : int
the input step. function will only be evaulated at the points offset + step * N
maxiter : Optional[int]
maximum number of iterations to perform. If None, will run indefinitely.
Returns
-------
result : MinCostResult
the MinCostResult named tuple, with attributes:
x : Optional[int]
the minimum integer such that f(x) >= vmin. If no such x exists, this will be None.
xmax : Optional[int]
the value at which f achieves its maximum. This is set only if x is None
vmax : Optional[float]
the maximum value of f. This is set only if x is None.
nfev : int
total number of function calls made.
"""
fib2 = fib1 = fib0 = 0
cur_idx = 0
nfev = 0
xmax = vmax = v_prev = None
while maxiter is None or nfev < maxiter:
v_cur = f(step * fib0 + offset)
nfev += 1
if v_cur >= vmin:
# found upper bound, use binary search to find answer
stop = step * fib0 + offset
return minimize_cost_binary(f, vmin, start=step * (fib1 + 1) + offset,
stop=stop, save=stop, step=step, nfev=nfev)
else:
if vmax is not None and v_cur <= vmax:
if cur_idx <= 3:
# special case: 0 <= xmax < 3, and we already checked all possibilities, so
# we know vmax < vmin. There is no solution and just return.
return MinCostResult(x=None, xmax=step * xmax + offset, vmax=vmax, nfev=nfev)
else:
# we found the bracket that encloses maximum, perform golden section search
a, x, b = fib2, fib1, fib0
fx = v_prev
while x > a + 1 or b > x + 1:
u = a + b - x
fu = f(step * u + offset)
nfev += 1
if fu >= fx:
if u > x:
a, x = x, u
fx = fu
else:
x, b = u, x
fx = fu
if fx >= vmin:
# found upper bound, use binary search to find answer
stop = step * x + offset
return minimize_cost_binary(f, vmin, start=step * (a + 1) + offset,
stop=stop, save=stop, step=step, nfev=nfev)
else:
if u > x:
b = u
else:
a = u
# golden section search terminated, we found the maximum and it is less than vmin
return MinCostResult(x=None, xmax=step * x + offset, vmax=fx, nfev=nfev)
else:
# still not close to maximum, continue searching
vmax = v_prev = v_cur
xmax = fib0
cur_idx += 1
if cur_idx <= 3:
fib2, fib1, fib0 = fib1, fib0, cur_idx
else:
fib2, fib1, fib0 = fib1, fib0, fib1 + fib0
raise ValueError('Maximum number of iteration achieved')
def minimize_cost_binary_float(f, vmin, start, stop, tol=1e-8, save=None, nfev=0):
# type: (Callable[[float], float], float, float, float, float, float, int) -> MinCostResult
"""Minimize cost given minimum output constraint using binary search.
Given discrete function f and an interval, find minimum input x such that f(x) >= vmin using binary search.
This algorithm only works if f is monotonically increasing, or if f monontonically increases
then monontonically decreases, and f(stop) >= vmin.
Parameters
----------
f : Callable[[int], float]
a function that takes a single integer and output a scalar value. Must monotonically
increase then monotonically decrease.
vmin : float
the minimum output value.
start : float
the input lower bound.
stop : float
the input upper bound.
tol : float
output tolerance.
save : Optional[float]
If not none, this value will be returned if no solution is found.
nfev : int
number of function calls already made.
Returns
-------
result : MinCostResult
the MinCostResult named tuple, with attributes:
x : Optional[float]
the minimum x such that f(x) >= vmin. If no such x exists, this will be None.
nfev : int
total number of function calls made.
"""
bin_iter = FloatBinaryIterator(start, stop, tol=tol)
while bin_iter.has_next():
x_cur = bin_iter.get_next()
v_cur = f(x_cur)
nfev += 1
if v_cur >= vmin:
save = x_cur
bin_iter.down()
else:
bin_iter.up()
return MinCostResult(x=save, xmax=None, vmax=None, nfev=nfev)
def minimize_cost_golden_float(f, vmin, start, stop, tol=1e-8, maxiter=1000):
# type: (Callable[[float], float], float, float, float, float, int) -> MinCostResult
"""Minimize cost given minimum output constraint using golden section/binary search.
Given discrete function f that monotonically increases then monotonically decreases,
find the minimum integer x such that f(x) >= vmin.
This method uses Fibonacci search to find the upper bound of x. If the upper bound
is found, a binary search is performed in the interval to find the solution. If
vmin is close to the maximum of f, a golden section search is performed to attempt
to find x.
Parameters
----------
f : Callable[[int], float]
a function that takes a single integer and output a scalar value. Must monotonically
increase then monotonically decrease.
vmin : float
the minimum output value.
start : float
the input lower bound.
stop : float
the input upper bound.
tol : float
the solution tolerance.
maxiter : int
maximum number of iterations to perform.
Returns
-------
result : MinCostResult
the MinCostResult named tuple, with attributes:
x : Optional[int]
the minimum integer such that f(x) >= vmin. If no such x exists, this will be None.
xmax : Optional[int]
the value at which f achieves its maximum. This is set only if x is None
vmax : Optional[float]
the maximum value of f. This is set only if x is None.
nfev : int
total number of function calls made.
"""
fa = f(start)
if fa >= vmin:
# solution found at start
return MinCostResult(x=start, xmax=None, vmax=None, nfev=1)
fb = f(stop) # type: Optional[float]
if fb is None:
raise TypeError("f(stop) returned None instead of float")
if fb >= vmin:
# found upper bound, use binary search to find answer
return minimize_cost_binary_float(f, vmin, start, stop, tol=tol, save=stop, nfev=2)
# solution is somewhere in middle
gr = (5**0.5 + 1) / 2
delta = (stop - start) / gr
c = stop - delta
d = start + delta
fc = f(c) # type: Optional[float]
if fc is None:
raise TypeError("f(c) returned None instead of float")
if fc >= vmin:
# found upper bound, use binary search to find answer
return minimize_cost_binary_float(f, vmin, start, c, tol=tol, save=stop, nfev=3)
fd = f(d) # type: Optional[float]
if fd is None:
raise TypeError("f(d) returned None instead of float")
if fd >= vmin:
# found upper bound, use binary search to find answer
return minimize_cost_binary_float(f, vmin, start, c, tol=tol, save=stop, nfev=4)
if fc > fd:
a, b, d = start, d, c
c = b - (b - a) / gr
fb, fc, fd = fd, None, fc
else:
a, b, c = c, stop, d
d = a + (b - a) / gr
fa, fc, fd = fc, fd, None
nfev = 4
while abs(b - a) > tol and nfev < maxiter:
if fc is None:
fc = f(c)
else:
fd = f(d)
assert fc is not None, 'Either fc or fd was None and the above should have set it'
assert fd is not None, 'Either fc or fd was None and the above should have set it'
nfev += 1
if fc > fd:
if fc >= vmin:
return minimize_cost_binary_float(f, vmin, a, c, tol=tol, save=stop, nfev=nfev)
b, d = d, c
c = b - (b - a) / gr
fb, fc, fd = fd, None, fc
else:
if fd >= vmin:
return minimize_cost_binary_float(f, vmin, a, d, tol=tol, save=stop, nfev=nfev)
a, c = c, d
d = a + (b - a) / gr
fa, fc, fd = fc, fd, None
test = (a + b) / 2
vmax = f(test)
nfev += 1
if vmax >= vmin:
return MinCostResult(x=test, xmax=test, vmax=vmax, nfev=nfev)
else:
return MinCostResult(x=None, xmax=test, vmax=vmax, nfev=nfev)
================================================
FILE: bag/verification/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/verification/__init__.py
================================================
# -*- coding: utf-8 -*-
"""This package contains LVS/RCX related verification methods.
"""
from typing import Any
import importlib
from .base import Checker
__all__ = ['make_checker', 'Checker']
def make_checker(checker_cls, tmp_dir, **kwargs):
# type: (str, str, **Any) -> Checker
"""Returns a checker object.
Parameters
-----------
checker_cls : str
the Checker class absolute path name.
tmp_dir : str
directory to save temporary files in.
**kwargs : Any
keyword arguments needed to create a Checker object.
"""
sections = checker_cls.split('.')
module_str = '.'.join(sections[:-1])
class_str = sections[-1]
module = importlib.import_module(module_str)
return getattr(module, class_str)(tmp_dir, **kwargs)
================================================
FILE: bag/verification/base.py
================================================
# -*- coding: utf-8 -*-
"""This module defines Checker, an abstract base class that handles LVS/RCX."""
from typing import TYPE_CHECKING, List, Dict, Any, Tuple, Sequence, Optional
import abc
from ..io.template import new_template_env
from ..concurrent.core import SubProcessManager
if TYPE_CHECKING:
from ..concurrent.core import FlowInfo, ProcInfo
class Checker(abc.ABC):
"""A class that handles LVS/RCX.
Parameters
----------
tmp_dir : str
temporary directory to save files in.
"""
def __init__(self, tmp_dir):
# type: (str) -> None
self.tmp_dir = tmp_dir
self._tmp_env = new_template_env('bag.verification', 'templates')
@abc.abstractmethod
def get_rcx_netlists(self, lib_name, cell_name):
# type: (str, str) -> List[str]
"""Returns a list of generated extraction netlist file names.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell_name
Returns
-------
netlists : List[str]
a list of generated extraction netlist file names. The first index is the main netlist.
"""
return []
@abc.abstractmethod
async def async_run_lvs(self, lib_name, cell_name, sch_view='schematic',
lay_view='layout', params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Tuple[bool, str]
"""A coroutine for running LVS.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
sch_view : str
schematic view name. Optional.
lay_view : str
layout view name. Optional.
params : Optional[Dict[str, Any]]
optional LVS parameter values.
kwargs : Any
optional keyword arguments.
gds_layout_path : str
Path to the gds of the layout. If passed, do not export layout, instead copy gds
Returns
-------
success : bool
True if LVS succeeds.
log_fname : str
LVS log file name.
"""
return False, ''
@abc.abstractmethod
async def async_run_rcx(self, lib_name, cell_name, sch_view='schematic',
lay_view='layout', params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Tuple[Optional[str], str]
"""A coroutine for running RCX.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
sch_view : str
schematic view name. Optional.
lay_view : str
layout view name. Optional.
params : Optional[Dict[str, Any]]
optional RCX parameter values.
kwargs : Any
optional keyword arguments.
gds_layout_path : str
Path to the gds of the layout. If passed, do not export layout, instead copy gds
Returns
-------
netlist : Optional[str]
The RCX netlist file name. None if RCX failed, empty if no extracted
netlist is generated
log_fname : str
RCX log file name.
"""
return '', ''
@abc.abstractmethod
async def async_export_layout(self, lib_name, cell_name, out_file,
view_name='layout', params=None):
# type: (str, str, str, str, Optional[Dict[str, Any]]) -> str
"""A coroutine for exporting layout.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
view_name : str
layout view name.
out_file : str
output file name.
params : Optional[Dict[str, Any]]
optional export parameter values.
Returns
-------
log_fname : str
log file name.
"""
return ''
@abc.abstractmethod
async def async_export_schematic(self, lib_name, cell_name, out_file,
view_name='schematic', params=None):
# type: (str, str, str, str, Optional[Dict[str, Any]]) -> str
"""A coroutine for exporting schematic.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
view_name : str
schematic view name.
out_file : str
output file name.
params : Optional[Dict[str, Any]]
optional export parameter values.
Returns
-------
log_fname : str
log file name.
"""
return ''
def render_file_template(self, temp_name, params):
# type: (str, Dict[str, Any]) -> str
"""Returns the rendered content from the given template file."""
template = self._tmp_env.get_template(temp_name)
return template.render(**params)
def render_string_template(self, content, params):
# type: (str, Dict[str, Any]) -> str
"""Returns the rendered content from the given template string."""
template = self._tmp_env.from_string(content)
return template.render(**params)
class SubProcessChecker(Checker, abc.ABC):
"""An implementation of :class:`Checker` using :class:`SubProcessManager`.
Parameters
----------
tmp_dir : str
temporary file directory.
max_workers : int
maximum number of parallel processes.
cancel_timeout : float
timeout for cancelling a subprocess.
"""
def __init__(self, tmp_dir, max_workers, cancel_timeout):
# type: (str, int, float) -> None
Checker.__init__(self, tmp_dir)
self._manager = SubProcessManager(max_workers=max_workers, cancel_timeout=cancel_timeout)
@abc.abstractmethod
def setup_lvs_flow(self, lib_name, cell_name, sch_view='schematic',
lay_view='layout', params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]
"""This method performs any setup necessary to configure a LVS subprocess flow.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
sch_view : str
schematic view name.
lay_view : str
layout view name.
params : Optional[Dict[str, Any]]
optional LVS parameter values.
kwargs : Any
optional keyword arguments.
gds_layout_path : str
Path to the gds of the layout. If passed, do not export layout, instead copy gds
Returns
-------
flow_info : Sequence[FlowInfo]
the LVS flow information list. Each element is a tuple of:
args : Union[str, Sequence[str]]
command to run, as string or list of string arguments.
log : str
log file name.
env : Optional[Dict[str, str]]
environment variable dictionary. None to inherit from parent.
cwd : Optional[str]
working directory path. None to inherit from parent.
vfun : Sequence[Callable[[Optional[int], str], Any]]
a function to validate if it is ok to execute the next process. The output of the
last function is returned. The first argument is the return code, the
second argument is the log file name.
"""
return []
@abc.abstractmethod
def setup_rcx_flow(self, lib_name, cell_name, sch_view='schematic',
lay_view='layout', params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]
"""This method performs any setup necessary to configure a RCX subprocess flow.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
sch_view : str
schematic view name.
lay_view : str
layout view name.
params : Optional[Dict[str, Any]]
optional RCX parameter values.
kwargs : Any
optional keyword arguments.
gds_layout_path : str
Path to the gds of the layout. If passed, do not export layout, instead copy gds
Returns
-------
flow_info : Sequence[FlowInfo]
the RCX flow information list. Each element is a tuple of:
args : Union[str, Sequence[str]]
command to run, as string or list of string arguments.
log : str
log file name.
env : Optional[Dict[str, str]]
environment variable dictionary. None to inherit from parent.
cwd : Optional[str]
working directory path. None to inherit from parent.
vfun : Sequence[Callable[[Optional[int], str], Any]]
a function to validate if it is ok to execute the next process. The output of the
last function is returned. The first argument is the return code, the
second argument is the log file name.
"""
return []
@abc.abstractmethod
def setup_export_layout(self, lib_name, cell_name, out_file, view_name='layout', params=None):
# type: (str, str, str, str, Optional[Dict[str, Any]]) -> ProcInfo
"""This method performs any setup necessary to export layout.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
out_file : str
output file name.
view_name : str
layout view name.
params : Optional[Dict[str, Any]]
optional export parameter values.
Returns
-------
args : Union[str, Sequence[str]]
command to run, as string or list of string arguments.
log : str
log file name.
env : Optional[Dict[str, str]]
environment variable dictionary. None to inherit from parent.
cwd : Optional[str]
working directory path. None to inherit from parent.
"""
return '', '', None, None
@abc.abstractmethod
def setup_export_schematic(self, lib_name, cell_name, out_file,
view_name='schematic', params=None):
# type: (str, str, str, str, Optional[Dict[str, Any]]) -> ProcInfo
"""This method performs any setup necessary to export schematic.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell name.
out_file : str
output file name.
view_name : str
layout view name.
params : Optional[Dict[str, Any]]
optional export parameter values.
Returns
-------
args : Union[str, Sequence[str]]
command to run, as string or list of string arguments.
log : str
log file name.
env : Optional[Dict[str, str]]
environment variable dictionary. None to inherit from parent.
cwd : Optional[str]
working directory path. None to inherit from parent.
"""
return '', '', None, None
async def async_run_lvs(self, lib_name: str, cell_name: str,
sch_view: str = 'schematic',
lay_view: str = 'layout',
params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Tuple[bool, str]:
flow_info = self.setup_lvs_flow(lib_name, cell_name, sch_view, lay_view, params, **kwargs)
return await self._manager.async_new_subprocess_flow(flow_info)
async def async_run_rcx(self, lib_name: str, cell_name: str,
sch_view: str = 'schematic',
lay_view: str = 'layout',
params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Tuple[str, str]:
flow_info = self.setup_rcx_flow(lib_name, cell_name, sch_view, lay_view, params, **kwargs)
return await self._manager.async_new_subprocess_flow(flow_info)
async def async_export_layout(self, lib_name: str, cell_name: str,
out_file: str, view_name: str = 'layout',
params: Optional[Dict[str, Any]] = None) -> str:
proc_info = self.setup_export_layout(lib_name, cell_name, out_file, view_name, params)
await self._manager.async_new_subprocess(*proc_info)
return proc_info[1]
async def async_export_schematic(self, lib_name: str, cell_name: str,
out_file: str, view_name: str = 'layout',
params: Optional[Dict[str, Any]] = None) -> str:
proc_info = self.setup_export_schematic(lib_name, cell_name, out_file, view_name, params)
await self._manager.async_new_subprocess(*proc_info)
return proc_info[1]
================================================
FILE: bag/verification/calibre.py
================================================
# -*- coding: utf-8 -*-
"""This module implements LVS/RCX using Calibre and stream out from Virtuoso.
"""
from typing import TYPE_CHECKING, Optional, List, Tuple, Dict, Any, Sequence
import os
import subprocess
import shutil
from .virtuoso import VirtuosoChecker
from ..io import read_file, open_temp, readlines_iter
if TYPE_CHECKING:
from .base import FlowInfo
# noinspection PyUnusedLocal
def _all_pass(retcode, log_file):
return True
# noinspection PyUnusedLocal
def lvs_passed(retcode, log_file):
# type: (int, str) -> Tuple[bool, str]
"""Check if LVS passed
Parameters
----------
retcode : int
return code of the LVS process.
log_file : str
log file name.
Returns
-------
success : bool
True if LVS passed.
log_file : str
the log file name.
"""
if not os.path.isfile(log_file):
return False, ''
test_str = 'LVS completed. CORRECT.'
LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = LogCheck.communicate()
return stdout.decode() != '', log_file
# noinspection PyUnusedLocal
def query_passed(retcode, log_file):
# type: (int, str) -> Tuple[bool, str]
"""Check if query passed
Parameters
----------
retcode : int
return code of the query process.
log_file : str
log file name.
Returns
-------
success : bool
True if query passed.
log_file : str
the log file name.
"""
if not os.path.isfile(log_file):
return False, ''
test_str = 'OK: Terminating.'
LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = LogCheck.communicate()
return stdout.decode() != '', log_file
class Calibre(VirtuosoChecker):
"""A subclass of VirtuosoChecker that uses Calibre for verification.
Parameters
----------
tmp_dir : string
temporary directory to save files in.
lvs_run_dir : str
the LVS run directory.
lvs_runset : str
the LVS runset filename.
rcx_run_dir : str
the RCX run directory.
rcx_runset : str
the RCX runset filename.
source_added_file : str
the Calibre source.added file location. Environment variable is supported.
Default value is '$DK/Calibre/lvs/source.added'.
rcx_mode : str
the RC extraction mode. Either 'pex' or 'xact' or 'starrc'. Defaults to 'pex'.
xact_rules : str
the XACT rules file name.
"""
def __init__(self, tmp_dir, lvs_run_dir, lvs_runset, rcx_run_dir, rcx_runset,
source_added_file='$DK/Calibre/lvs/source.added', rcx_mode='pex',
xact_rules='', **kwargs):
max_workers = kwargs.get('max_workers', None)
cancel_timeout = kwargs.get('cancel_timeout_ms', None)
rcx_params = kwargs.get('rcx_params', {})
lvs_params = kwargs.get('lvs_params', {})
rcx_link_files = kwargs.get('rcx_link_files', None)
if cancel_timeout is not None:
cancel_timeout /= 1e3
VirtuosoChecker.__init__(self, tmp_dir, max_workers, cancel_timeout, source_added_file)
self.default_rcx_params = rcx_params
self.default_lvs_params = lvs_params
self.lvs_run_dir = os.path.abspath(rcx_run_dir if (rcx_mode == 'starrc' or rcx_mode == 'qrc') else lvs_run_dir)
self.lvs_runset = lvs_runset
self.rcx_run_dir = os.path.abspath(rcx_run_dir)
self.rcx_runset = rcx_runset
self.rcx_link_files = rcx_link_files
self.xact_rules = xact_rules
self.rcx_mode = rcx_mode
def get_rcx_netlists(self, lib_name, cell_name):
# type: (str, str) -> List[str]
"""Returns a list of generated extraction netlist file names.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell_name
Returns
-------
netlists : List[str]
a list of generated extraction netlist file names. The first index is the main netlist.
"""
# PVS generate schematic cellviews directly.
if self.rcx_mode == 'starrc' or self.rcx_mode == 'qrc':
return ['%s.spf' % cell_name]
else:
return ['%s.pex.netlist' % cell_name,
# '%s.pex.netlist.pex' % cell_name,
# '%s.pex.netlist.%s.pxi' % (cell_name, cell_name),
]
def setup_lvs_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',
params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]
run_dir = os.path.join(self.lvs_run_dir, lib_name, cell_name)
os.makedirs(run_dir, exist_ok=True)
lay_file, sch_file = self._get_lay_sch_files(run_dir)
# add schematic/layout export to flow
flow_list = []
# Check if gds layout is provided
gds_layout_path = kwargs.pop('gds_layout_path', None)
# If not provided the gds layout, need to export layout
if not gds_layout_path:
cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)
flow_list.append((cmd, log, env, cwd, _all_pass))
# If provided gds layout, do not export layout, just copy gds
else:
if not os.path.exists(gds_layout_path):
raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')
with open_temp(prefix='copy', dir=run_dir, delete=True) as f:
copy_log_file = f.name
copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]
flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))
cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view, None)
flow_list.append((cmd, log, env, cwd, _all_pass))
lvs_params_actual = self.default_lvs_params.copy()
if params is not None:
lvs_params_actual.update(params)
with open_temp(prefix='lvsLog', dir=run_dir, delete=False) as logf:
log_file = logf.name
# generate new runset
runset_content = self.modify_lvs_runset(run_dir, lib_name, cell_name, lay_view, lay_file,
sch_file, lvs_params_actual)
# save runset
with open_temp(dir=run_dir, delete=False) as runset_file:
runset_fname = runset_file.name
runset_file.write(runset_content)
cmd = ['calibre', '-gui', '-lvs', '-runset', runset_fname, '-batch']
flow_list.append((cmd, log_file, None, run_dir, lvs_passed))
return flow_list
def setup_rcx_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',
params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]
# update default RCX parameters.
rcx_params_actual = self.default_rcx_params.copy()
if params is not None:
rcx_params_actual.update(params)
run_dir = os.path.join(self.rcx_run_dir, lib_name, cell_name)
os.makedirs(run_dir, exist_ok=True)
# make symlinks
query_input = None
if self.rcx_link_files:
for source_file in self.rcx_link_files:
base_name = os.path.basename(source_file)
targ_file = os.path.join(run_dir, base_name)
if 'query' in base_name:
query_input = targ_file
if not os.path.exists(targ_file):
os.symlink(source_file, targ_file)
lay_file, sch_file = self._get_lay_sch_files(run_dir)
with open_temp(prefix='rcxLog', dir=run_dir, delete=False) as logf:
log_file = logf.name
flow_list = []
# Check if gds layout is provided
gds_layout_path = kwargs.pop('gds_layout_path', None)
# If not provided the gds layout, need to export layout
if not gds_layout_path:
cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)
flow_list.append((cmd, log, env, cwd, _all_pass))
# If provided gds layout, do not export layout, just copy gds
else:
if not os.path.exists(gds_layout_path):
raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')
with open_temp(prefix='copy', dir=run_dir, delete=True) as f:
copy_log_file = f.name
copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]
flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))
cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view,
None)
flow_list.append((cmd, log, env, cwd, _all_pass))
if self.rcx_mode == 'starrc' or self.rcx_mode == 'qrc':
# check if LVS was run prior to run_rcx
sp_file = os.path.join(run_dir, cell_name + '.sp')
if not os.path.isfile(sp_file):
raise Exception('Did you forget to do run_lvs first?')
# now query the LVS file using query.input
with open_temp(prefix='queryLog', dir=run_dir, delete=False) as queryf:
query_file = queryf.name
if query_input is None:
query_input = os.path.join(run_dir, 'query.input')
cmd = ['calibre', '-query_input', query_input,
'-query', os.path.join(run_dir, 'svdb'), cell_name]
flow_list.append((cmd, query_file, None, run_dir,
lambda rc, lf: query_passed(rc, lf)[0]))
if self.rcx_mode == 'starrc':
# generate new cmd for StarXtract
cmd_content, result = self.modify_starrc_cmd(run_dir, lib_name, cell_name,
rcx_params_actual, query_input, sch_file)
# save cmd for StarXtract
with open_temp(dir=run_dir, delete=False) as cmd_file:
cmd_fname = cmd_file.name
cmd_file.write(cmd_content)
cmd = ['StarXtract', cmd_fname]
else:
# generate new cmd for QRC
cmd_content, result = self.modify_qrc_cmd(run_dir, cell_name, rcx_params_actual, sch_file)
# save cmd for QRC
with open_temp(dir=run_dir, delete=False) as cmd_file:
cmd_fname = cmd_file.name
cmd_file.write(cmd_content)
cmd = ['qrc', '-64', '-cmd', cmd_fname]
elif self.rcx_mode == 'pex':
# generate new runset
runset_content, result = self.modify_pex_runset(run_dir, lib_name, cell_name, lay_view,
lay_file, sch_file, rcx_params_actual)
# save runset
with open_temp(dir=run_dir, delete=False) as runset_file:
runset_fname = runset_file.name
runset_file.write(runset_content)
# remove old svdb directory
svdb_dir = os.path.join(run_dir, 'svdb')
if os.path.exists(svdb_dir) and os.path.isdir(svdb_dir):
shutil.rmtree(svdb_dir)
cmd = ['calibre', '-gui', '-pex', '-runset', runset_fname, '-batch']
else:
# generate new runset
runset_content, result = self.modify_xact_rules(run_dir, cell_name, lay_file, sch_file,
rcx_params_actual)
# save runset
with open_temp(dir=run_dir, delete=False) as runset_file:
runset_fname = runset_file.name
runset_file.write(runset_content)
with open_temp(prefix='lvsLog', dir=run_dir, delete=False) as lvsf:
lvs_file = lvsf.name
num_cores = rcx_params_actual.get('num_cores', 2)
cmd = ['calibre', '-lvs', '-hier', '-turbo', '%d' % num_cores, '-nowait', runset_fname]
flow_list.append(
(cmd, lvs_file, None, run_dir, lambda rc, lf: lvs_passed(rc, lf)[0]))
extract_mode = rcx_params_actual.get('extract_mode', 'rcc')
cmd = ['calibre', '-xact', '-3d', '-%s' % extract_mode, '-turbo', '%d' % num_cores,
runset_fname]
# noinspection PyUnusedLocal
def rcx_passed(retcode, log_fname):
if not os.path.isfile(result):
return None, log_fname
if self.rcx_mode in ['qrc', 'pex']:
if self.rcx_mode == 'qrc':
test_str = ' terminated normally *****'
else:
test_str = ' Errors = 0'
LogCheck = subprocess.Popen(['grep', '-i', test_str, log_fname], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = LogCheck.communicate()
if stdout.decode() == '':
return None, log_fname
return result, log_fname
flow_list.append((cmd, log_file, None, run_dir, rcx_passed))
return flow_list
@classmethod
def _get_lay_sch_files(cls, run_dir):
lay_file = os.path.join(run_dir, 'layout.gds')
sch_file = os.path.join(run_dir, 'schematic.net')
return lay_file, sch_file
def modify_lvs_runset(self, run_dir, lib_name, cell_name, lay_view, gds_file, netlist,
lvs_params):
# type: (str, str, str, str, str, str, Dict[str, Any]) -> str
"""Modify the given LVS runset file.
Parameters
----------
run_dir : str
the run directory.
lib_name : str
the library name.
cell_name : str
the cell name.
lay_view : str
the layout view.
gds_file : str
the layout gds file name.
netlist : str
the schematic netlist file.
lvs_params : Dict[str, Any]
override LVS parameters.
Returns
-------
content : str
the new runset content.
"""
# convert runset content to dictionary
lvs_options = {}
for line in readlines_iter(self.lvs_runset):
key, val = line.split(':', 1)
key = key.strip('*')
lvs_options[key] = val.strip()
# override parameters
lvs_options['lvsRunDir'] = run_dir
lvs_options['lvsLayoutPaths'] = gds_file
lvs_options['lvsLayoutPrimary'] = cell_name
lvs_options['lvsLayoutLibrary'] = lib_name
lvs_options['lvsLayoutView'] = lay_view
lvs_options['lvsSourcePath'] = netlist
lvs_options['lvsSourcePrimary'] = cell_name
lvs_options['lvsSourceLibrary'] = lib_name
lvs_options['lvsSpiceFile'] = os.path.join(run_dir, '%s.sp' % cell_name)
lvs_options['lvsERCDatabase'] = '%s.erc.results' % cell_name
lvs_options['lvsERCSummaryFile'] = '%s.erc.summary' % cell_name
lvs_options['lvsReportFile'] = '%s.lvs.report' % cell_name
lvs_options['lvsMaskDBFile'] = '%s.maskdb' % cell_name
lvs_options['cmnFDILayoutLibrary'] = lib_name
lvs_options['cmnFDILayoutView'] = lay_view
lvs_options['cmnFDIDEFLayoutPath'] = '%s.def' % cell_name
lvs_options.update(lvs_params)
return ''.join(('*%s: %s\n' % (key, val) for key, val in lvs_options.items()))
def modify_pex_runset(self, run_dir, lib_name, cell_name, lay_view, gds_file, netlist,
rcx_params):
# type: (str, str ,str, str, str, str, Dict[str, Any]) -> Tuple[str, str]
"""Modify the given RCX runset file.
Parameters
----------
run_dir : str
the run directory.
lib_name : str
the library name.
cell_name : str
the cell name.
lay_view : str
the layout view.
gds_file : str
the layout gds file name.
netlist : str
the schematic netlist file.
rcx_params : Dict[str, Any]
override RCX parameters.
Returns
-------
content : str
the new runset content.
output_name : str
the extracted netlist file.
"""
# convert runset content to dictionary
rcx_options = {}
for line in readlines_iter(self.rcx_runset):
key, val = line.split(':', 1)
key = key.strip('*')
rcx_options[key] = val.strip()
output_name = '%s.pex.netlist' % cell_name
# override parameters
rcx_options['pexRunDir'] = run_dir
rcx_options['pexLayoutPaths'] = gds_file
rcx_options['pexLayoutPrimary'] = cell_name
rcx_options['pexLayoutLibrary'] = lib_name
rcx_options['pexLayoutView'] = lay_view
rcx_options['pexSourcePath'] = netlist
rcx_options['pexSourcePrimary'] = cell_name
rcx_options['pexSourceLibrary'] = lib_name
rcx_options['pexReportFile'] = '%s.lvs.report' % cell_name
rcx_options['pexPexNetlistFile'] = output_name
rcx_options['pexPexReportFile'] = '%s.pex.report' % cell_name
rcx_options['pexMaskDBFile'] = '%s.maskdb' % cell_name
rcx_options['cmnFDILayoutLibrary'] = lib_name
rcx_options['cmnFDILayoutView'] = lay_view
rcx_options['cmnFDIDEFLayoutPath'] = '%s.def' % cell_name
rcx_options['pexPexNetlistType'] = rcx_params.pop('netlist_type', 'RCC')
rcx_options['pexPexGroundNameValue'] = rcx_params.pop('ground_name_value', 'VSS')
rcx_options.update(rcx_params)
content = ''.join(('*%s: %s\n' % (key, val) for key, val in rcx_options.items()))
return content, os.path.join(run_dir, output_name)
def modify_xact_rules(self, run_dir, cell_name, gds_file, netlist, xact_params):
# type: (str, str, str, str, Dict[str, Any]) -> Tuple[str, str]
"""Modify the given XACT runset file.
Parameters
----------
run_dir : str
the run directory.
cell_name : str
the cell name.
gds_file : str
the layout gds file name.
netlist : str
the schematic netlist file.
xact_params : Dict[str, Any]
additional XACT parameters.
Returns
-------
content : str
the new runset content.
output_name : str
the extracted netlist file.
"""
substrate_name = xact_params.get('substrate_name', 'VSS')
power_names = xact_params.get('power_names', 'VDD')
ground_names = xact_params.get('ground_names', 'VSS')
output_name = '%s.pex.netlist' % cell_name
content = self.render_string_template(read_file(self.xact_rules),
dict(
cell_name=cell_name,
gds_file=gds_file,
netlist=netlist,
substrate_name=substrate_name,
power_names=power_names,
ground_names=ground_names,
output_name=output_name,
))
return content, os.path.join(run_dir, output_name)
def modify_starrc_cmd(self, run_dir, lib_name, cell_name, starrc_params, query_input, sch_file):
# type: (str, str, str, Dict[str, Any], str, str) -> Tuple[str, str]
"""Modify the cmd file.
Parameters
----------
run_dir : str
the run directory.
lib_name : str
the library name.
cell_name : str
the cell name.
starrc_params : Dict[str, Any]
override StarRC parameters.
query_input : str
the path to query.input file
sch_file : str
the schematic netlist
Returns
-------
starrc_cmd : str
the new StarXtract cmd file.
output_name : str
the extracted netlist file.
"""
output_name = '%s.spf' % cell_name
if 'CDSLIBPATH' in os.environ:
cds_lib_path = os.path.abspath(os.path.join(os.environ['CDSLIBPATH'], 'cds.lib'))
else:
cds_lib_path = os.path.abspath('./cds.lib')
content = self.render_string_template(read_file(self.rcx_runset),
dict(
cell_name=cell_name,
query_input=query_input,
extract_type=starrc_params['extract'].get('type', 'RCc'),
netlist_format=starrc_params.get('netlist_format',
'SPF'),
sch_file=sch_file,
cds_lib=cds_lib_path,
lib_name=lib_name,
run_dir=run_dir,
skew=starrc_params.get('skew', 'tt'),
))
return content, os.path.join(run_dir, output_name)
def modify_qrc_cmd(self, run_dir, cell_name, qrc_params, sch_file):
# type: (str, str, Dict[str, Any], str) -> Tuple[str, str]
"""Modify the cmd file.
Parameters
----------
run_dir : str
the run directory.
cell_name : str
the cell name.
qrc_params : Dict[str, Any]
override QRC parameters.
sch_file : str
the schematic netlist
Returns
-------
qrc_cmd : str
the new QRC cmd file.
output_name : str
the extracted netlist file.
"""
output_name = '%s.spf' % cell_name
if 'CDSLIBPATH' in os.environ:
cds_lib_path = os.path.abspath(os.path.join(os.environ['CDSLIBPATH'], 'cds.lib'))
else:
cds_lib_path = os.path.abspath('./cds.lib')
content = self.render_string_template(read_file(self.rcx_runset),
dict(
cell_name=cell_name,
netlist_format=qrc_params.get('netlist_format',
'spf'),
extract_type=qrc_params['extract'].get('type', 'rc_coupled'),
sch_file=sch_file,
cds_lib=cds_lib_path,
skew=qrc_params.get('skew', 'tt'),
temp=qrc_params.get('temp', '25'),
))
return content, os.path.join(run_dir, output_name)
================================================
FILE: bag/verification/icv.py
================================================
# -*- coding: utf-8 -*-
"""This module implements LVS/RCX using ICV and stream out from Virtuoso.
"""
from typing import TYPE_CHECKING, Optional, List, Tuple, Dict, Any, Sequence
import os
import subprocess
from .virtuoso import VirtuosoChecker
from ..io import read_file, open_temp
if TYPE_CHECKING:
from .base import FlowInfo
# noinspection PyUnusedLocal
def _all_pass(retcode, log_file):
return True
# noinspection PyUnusedLocal
def lvs_passed(retcode, log_file):
# type: (int, str) -> Tuple[bool, str]
"""Check if LVS passed
Parameters
----------
retcode : int
return code of the LVS process.
log_file : str
log file name.
Returns
-------
success : bool
True if LVS passed.
log_file : str
the log file name.
"""
dirname = os.path.dirname(log_file)
cell_name = os.path.basename(dirname)
lvs_error_file = os.path.join(dirname, cell_name + '.LVS_ERRORS')
# append error file at the end of log file
with open(log_file, 'a') as logf:
with open(lvs_error_file, 'r') as errf:
for line in errf:
logf.write(line)
if not os.path.isfile(log_file):
return False, ''
test_str = 'Final comparison result:PASS'
LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = LogCheck.communicate()
return stdout.decode() != '', log_file
class ICV(VirtuosoChecker):
"""A subclass of VirtuosoChecker that uses ICV for verification.
Parameters
----------
tmp_dir : string
temporary directory to save files in.
lvs_run_dir : str
the LVS run directory.
lvs_runset : str
the LVS runset filename.
rcx_run_dir : str
the RCX run directory.
rcx_runset : str
the RCX runset filename.
source_added_file : str
the source.added file location. Environment variable is supported.
Default value is '$DK/Calibre/lvs/source.added'.
rcx_mode : str
the RC extraction mode. Defaults to 'starrc'.
"""
def __init__(self, tmp_dir, lvs_run_dir, lvs_runset, rcx_run_dir, rcx_runset,
source_added_file='$DK/Calibre/lvs/source.added', rcx_mode='pex',
**kwargs):
max_workers = kwargs.get('max_workers', None)
cancel_timeout = kwargs.get('cancel_timeout_ms', None)
rcx_params = kwargs.get('rcx_params', {})
lvs_params = kwargs.get('lvs_params', {})
rcx_link_files = kwargs.get('rcx_link_files', None)
lvs_link_files = kwargs.get('lvs_link_files', None)
if cancel_timeout is not None:
cancel_timeout /= 1e3
VirtuosoChecker.__init__(self, tmp_dir, max_workers, cancel_timeout, source_added_file)
self.default_rcx_params = rcx_params
self.default_lvs_params = lvs_params
self.lvs_run_dir = os.path.abspath(lvs_run_dir)
self.lvs_runset = lvs_runset
self.lvs_link_files = lvs_link_files
self.rcx_run_dir = os.path.abspath(rcx_run_dir)
self.rcx_runset = rcx_runset
self.rcx_link_files = rcx_link_files
self.rcx_mode = rcx_mode
self.netlist_format = 'netlist'
def get_rcx_netlists(self, lib_name, cell_name):
# type: (str, str) -> List[str]
"""Returns a list of generated extraction netlist file names.
Parameters
----------
lib_name : str
library name.
cell_name : str
cell_name
Returns
-------
netlists : List[str]
a list of generated extraction netlist file names. The first index is the main netlist.
"""
# PVS generate schematic cellviews directly.
if self.rcx_mode == 'starrc' and self.netlist_format == 'netlist':
return ['%s.spf' % cell_name]
else:
return []
def setup_lvs_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',
params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]
run_dir = os.path.join(self.lvs_run_dir, lib_name, cell_name)
os.makedirs(run_dir, exist_ok=True)
lay_file, sch_file = self._get_lay_sch_files(run_dir)
# add schematic/layout export to flow
flow_list = []
# Check if gds layout is provided
gds_layout_path = kwargs.pop('gds_layout_path', None)
# If not provided the gds layout, need to export layout
if not gds_layout_path:
cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)
flow_list.append((cmd, log, env, cwd, _all_pass))
# If provided gds layout, do not export layout, just copy gds
else:
if not os.path.exists(gds_layout_path):
raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')
with open_temp(prefix='copy', dir=run_dir, delete=True) as f:
copy_log_file = f.name
copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]
flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))
cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view, None)
flow_list.append((cmd, log, env, cwd, _all_pass))
lvs_params_actual = self.default_lvs_params.copy()
if params is not None:
lvs_params_actual.update(params)
with open_temp(prefix='lvsLog', dir=run_dir, delete=False) as logf:
log_file = logf.name
# cmd_options
cmd_options = lvs_params_actual['cmd_options']
cmd = ['icv'] + cmd_options + ['-i', lay_file, '-s', sch_file, '-sf', 'SPICE', '-f', 'GDSII', '-c', cell_name,
'-vue', '-I']
for f in self.lvs_link_files:
cmd.append(f)
flow_list.append((cmd, log_file, None, run_dir, lvs_passed))
return flow_list
def setup_rcx_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',
params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]
# update default RCX parameters.
rcx_params_actual = self.default_rcx_params.copy()
if params is not None:
rcx_params_actual.update(params)
run_dir = os.path.join(self.rcx_run_dir, lib_name, cell_name)
os.makedirs(run_dir, exist_ok=True)
lay_file, sch_file = self._get_lay_sch_files(run_dir)
with open_temp(prefix='rcxLog', dir=run_dir, delete=False) as logf:
log_file = logf.name
flow_list = []
# Check if gds layout is provided
gds_layout_path = kwargs.pop('gds_layout_path', None)
# If not provided the gds layout, need to export layout
if not gds_layout_path:
cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)
flow_list.append((cmd, log, env, cwd, _all_pass))
# If provided gds layout, do not export layout, just copy gds
else:
if not os.path.exists(gds_layout_path):
raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')
with open_temp(prefix='copy', dir=run_dir, delete=True) as f:
copy_log_file = f.name
copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]
flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))
cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view, None)
flow_list.append((cmd, log, env, cwd, _all_pass))
if self.rcx_mode == 'starrc':
# first: run Extraction LVS
cmd_options = rcx_params_actual['cmd_options']
cmd = ['icv'] + cmd_options + ['-i', lay_file, '-s', sch_file, '-sf', 'SPICE', '-f', 'GDSII',
'-c', cell_name, '-I']
for f in self.lvs_link_files:
cmd.append(f)
# hack the environment variables to make sure $PWD is the same as current working directory
env_copy = os.environ.copy()
env_copy['PWD'] = run_dir
flow_list.append((cmd, log_file, env_copy, run_dir, lvs_passed))
# second: setup StarXtract
# make symlinks
if self.rcx_link_files:
for source_file in self.rcx_link_files:
targ_file = os.path.join(run_dir, os.path.basename(source_file))
if not os.path.exists(targ_file):
os.symlink(source_file, targ_file)
# generate new cmd for StarXtract
cmd_content, result = self.modify_starrc_cmd(run_dir, lib_name, cell_name,
rcx_params_actual, sch_file)
# save cmd for StarXtract
with open_temp(dir=run_dir, delete=False) as cmd_file:
cmd_fname = cmd_file.name
cmd_file.write(cmd_content)
cmd = ['StarXtract', '-clean', cmd_fname]
else:
pass
# noinspection PyUnusedLocal
def rcx_passed(retcode, log_fname):
dirname = os.path.dirname(log_fname)
cell_name = os.path.basename(dirname)
results_file = os.path.join(dirname, cell_name + '.RESULTS')
# append error file at the end of log file
with open(log_fname, 'a') as logf:
with open(results_file, 'r') as errf:
for line in errf:
logf.write(line)
if not os.path.isfile(log_fname):
return None, ''
test_str = 'DRC and Extraction Results: CLEAN'
LogCheck = subprocess.Popen(['grep', '-i', test_str, log_fname], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = LogCheck.communicate()
if stdout.decode() != '':
if self.netlist_format == 'netlist':
return results_file, log_fname
else:
return [], log_fname
else:
return None, log_fname
flow_list.append((cmd, log_file, None, run_dir, rcx_passed))
return flow_list
@classmethod
def _get_lay_sch_files(cls, run_dir):
lay_file = os.path.join(run_dir, 'layout.gds')
sch_file = os.path.join(run_dir, 'schematic.net')
return lay_file, sch_file
def modify_starrc_cmd(self, run_dir, lib_name, cell_name, starrc_params, sch_file):
# type: (str, str, str, Dict[str, Any], str) -> Tuple[str, str]
"""Modify the cmd file.
Parameters
----------
run_dir : str
the run directory.
lib_name : str
the library name.
cell_name : str
the cell name.
starrc_params : Dict[str, Any]
override StarRC parameters.
sch_file : str
the schematic netlist
Returns
-------
starrc_cmd : str
the new StarXtract cmd file.
output_name : str
the extracted netlist file.
"""
output_name = '%s.spf' % cell_name
if 'CDSLIBPATH' in os.environ:
cds_lib_path = os.path.abspath(os.path.join(os.environ['CDSLIBPATH'], 'cds.lib'))
else:
cds_lib_path = os.path.abspath('./cds.lib')
content = self.render_string_template(read_file(self.rcx_runset),
dict(
cell_name=cell_name,
extract_type=starrc_params['extract'].get('type'),
netlist_format=starrc_params.get('netlist_format',
'SPF'),
sch_file=sch_file,
cds_lib=cds_lib_path,
lib_name=lib_name,
run_dir=run_dir,
))
self.netlist_format = starrc_params.get('netlist_format', 'netlist')
return content, os.path.join(run_dir, output_name)
================================================
FILE: bag/verification/pvs.py
================================================
# -*- coding: utf-8 -*-
"""This module implements LVS/RCX using PVS/QRC and stream out from Virtuoso.
"""
from typing import TYPE_CHECKING, Optional, List, Dict, Any, Sequence, Tuple
import os
import subprocess
import time
from ..io import read_yaml, open_temp, readlines_iter, fix_string
from .virtuoso import VirtuosoChecker
if TYPE_CHECKING:
from .base import FlowInfo
# noinspection PyUnusedLocal
def _all_pass(retcode, log_file):
return True
# noinspection PyUnusedLocal
def lvs_passed(retcode, log_file):
# type: (int, str) -> Tuple[bool, str]
"""Check if LVS passed
Parameters
----------
retcode : int
return code of the LVS process.
log_file : str
log file name.
Returns
-------
success : bool
True if LVS passed.
log_file : str
the log file name.
"""
if not os.path.isfile(log_file):
return False, ''
test_str = '# Run Result : MATCH'
LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = LogCheck.communicate()
return stdout.decode() != '', log_file
# noinspection PyUnusedLocal
def rcx_passed(retcode, log_file):
"""Check if RCX passed.
Parameters
----------
retcode : int
return code of the RCX process.
log_file : str
log file name.
Returns
-------
netlist : str
netlist file name.
log_file : str
the log file name.
"""
if not os.path.isfile(log_file):
return None, ''
test_str = ' terminated normally *****'
LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = LogCheck.communicate()
if stdout.decode() != '':
return '', log_file
else:
return None, ''
class PVS(VirtuosoChecker):
"""A subclass of VirtuosoChecker that uses PVS/QRC for verification.
Parameters
----------
tmp_dir : string
temporary directory to save files in.
lvs_run_dir : string
the LVS run directory.
lvs_runset : string
the LVS runset filename.
lvs_rule_file : string
the LVS rule filename.
rcx_runset : string
the RCX runset filename.
source_added_file : string
the source.added file location. Environment variable is supported.
Default value is '$DK/Calibre/lvs/source.added'.
"""
def __init__(self, tmp_dir, lvs_run_dir, lvs_runset, lvs_rule_file, rcx_runset,
source_added_file='$DK/Calibre/lvs/source.added', **kwargs):
max_workers = kwargs.get('max_workers', None)
cancel_timeout = kwargs.get('cancel_timeout_ms', None)
if cancel_timeout is not None:
cancel_timeout /= 1e3
VirtuosoChecker.__init__(self, tmp_dir, max_workers, cancel_timeout, source_added_file)
self.default_rcx_params = kwargs.get('rcx_params', {})
self.default_lvs_params = kwargs.get('lvs_params', {})
self.lvs_run_dir = os.path.abspath(lvs_run_dir)
self.lvs_runset = lvs_runset
self.lvs_rule_file = lvs_rule_file
self.rcx_runset = rcx_runset
def get_rcx_netlists(self, lib_name, cell_name):
# type: (str, str) -> List[str]
# PVS generate schematic cellviews directly.
return []
def setup_lvs_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',
params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]
run_dir = os.path.join(self.lvs_run_dir, lib_name, cell_name)
os.makedirs(run_dir, exist_ok=True)
lay_file = os.path.join(run_dir, 'layout.gds')
sch_file = os.path.join(run_dir, 'schematic.net')
# add schematic/layout export to flow
flow_list = []
# Check if gds layout is provided
gds_layout_path = kwargs.pop('gds_layout_path', None)
# If not provided the gds layout, need to export layout
if not gds_layout_path:
cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)
flow_list.append((cmd, log, env, cwd, _all_pass))
# If provided gds layout, do not export layout, just copy gds
else:
if not os.path.exists(gds_layout_path):
raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')
with open_temp(prefix='copy', dir=run_dir, delete=True) as f:
copy_log_file = f.name
copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]
flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))
cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view,
None)
flow_list.append((cmd, log, env, cwd, _all_pass))
lvs_params_actual = self.default_lvs_params.copy()
if params is not None:
lvs_params_actual.update(params)
with open_temp(prefix='lvsLog', dir=run_dir, delete=False) as logf:
log_file = logf.name
# generate new runset
runset_content = self.modify_lvs_runset(run_dir, cell_name, lvs_params_actual)
# save runset
with open_temp(dir=run_dir, delete=False) as runset_file:
runset_fname = runset_file.name
runset_file.write(runset_content)
num_cores = 4
cmd = ['pvs', '-perc', '-lvs', '-qrc_data', '-control', runset_fname, '-dp', str(num_cores),
'-gds', lay_file, '-layout_top_cell', cell_name,
'-source_cdl', sch_file, '-source_top_cell', cell_name,
self.lvs_rule_file,
]
flow_list.append((cmd, log_file, None, run_dir, lvs_passed))
return flow_list
def setup_rcx_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',
params=None, **kwargs):
# type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]
# update default RCX parameters.
rcx_params_actual = self.default_rcx_params.copy()
if params is not None:
rcx_params_actual.update(params)
run_dir = os.path.join(self.lvs_run_dir, lib_name, cell_name)
os.makedirs(run_dir, exist_ok=True)
with open_temp(prefix='rcxLog', dir=run_dir, delete=False) as logf:
log_file = logf.name
# generate new runset
runset_content = self.modify_rcx_runset(run_dir, lib_name, cell_name, lay_view,
rcx_params_actual)
# save runset
with open_temp(dir=run_dir, delete=False) as runset_file:
runset_fname = runset_file.name
runset_file.write(runset_content)
cmd = ['qrc', '-cmd', runset_fname]
# NOTE: qrc needs to be run in the current working directory (virtuoso directory),
# because it needs to access cds.lib
return [(cmd, log_file, None, os.environ['BAG_WORK_DIR'], rcx_passed)]
def modify_lvs_runset(self, run_dir, cell_name, lvs_params):
# type: (str, str, Dict[str, Any]) -> str
"""Modify the given LVS runset file.
Parameters
----------
run_dir : str
the run directory.
cell_name : str
the cell name.
lvs_params : Dict[str, Any]
override LVS parameters.
Returns
-------
content : str
the new runset content.
"""
# convert runset content to dictionary
lvs_options = {}
for line in readlines_iter(self.lvs_runset):
key, val = line.split(' ', 1)
# remove semicolons
val = val.strip().rstrip(';')
if key in lvs_options:
lvs_options[key].append(val)
else:
lvs_options[key] = [val]
# get results_db file name
results_db = os.path.join(run_dir, '%s.erc_errors.ascii' % cell_name)
# override parameters
lvs_options['lvs_report_file'] = ['"%s.rep"' % cell_name]
lvs_options['report_summary'] = ['-erc "%s.sum" -replace' % cell_name]
lvs_options['results_db'] = ['-erc "%s" -ascii' % results_db]
lvs_options['mask_svdb_dir'] = ['"%s"' % os.path.join(run_dir, 'svdb')]
lvs_options.update(lvs_params)
content_list = []
for key, val_list in lvs_options.items():
for v in val_list:
content_list.append('%s %s;\n' % (key, v))
return ''.join(content_list)
def modify_rcx_runset(self, run_dir, lib_name, cell_name, lay_view, rcx_params):
# type: (str, str, str, str, Dict[str, Any]) -> str
"""Modify the given QRC options.
Parameters
----------
run_dir : str
the run directory.
lib_name : str
the library name.
cell_name : str
the cell name.
lay_view : str
the layout view.
rcx_params : Dict[str, Any]
override RCX parameters.
Returns
-------
content : str
the new runset content.
"""
data_dir = os.path.join(run_dir, 'svdb')
# wait 10 seconds to see if not finding directory is just a network drive problem
query_timeout = 10.0
tstart = time.time()
elapsed = 0.0
while not os.path.isdir(data_dir) and elapsed < query_timeout:
time.sleep(0.1)
elapsed = time.time() - tstart
if not os.path.isdir(data_dir):
raise ValueError('cannot find directory %s. Did you run PVS first?' % data_dir)
# load default rcx options
rcx_options = read_yaml(self.rcx_runset)
# setup inputs/outputs
rcx_options['input_db']['design_cell_name'] = '{} {} {}'.format(cell_name, lay_view,
lib_name)
rcx_options['input_db']['run_name'] = cell_name
rcx_options['input_db']['directory_name'] = data_dir
rcx_options['output_db']['cdl_out_map_directory'] = run_dir
rcx_options['output_setup']['directory_name'] = data_dir
rcx_options['output_setup']['temporary_directory_name'] = cell_name
# override parameters
for key, val in rcx_options.items():
if key in rcx_params:
val.update(rcx_params[key])
# convert dictionary to QRC command file format.
content_list = []
for key, options in rcx_options.items():
content_list.append('%s \\' % key)
for k, v in options.items():
v = fix_string(v)
if isinstance(v, str):
# add quotes around string
v = '"{}"'.format(v)
content_list.append(' -%s %s \\' % (k, v))
# remove line continuation backslash from last option
content_list[-1] = content_list[-1][:-2]
return '\n'.join(content_list)
================================================
FILE: bag/verification/templates/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: bag/verification/templates/layout_export_config.txt
================================================
case "preserve"
cellListFile ""
cellMap ""
cellNamePrefix ""
cellNameSuffix ""
convertDot "node"
convertPin "geometry"
#doNotPreservePcellPins
#flattenPcells
#flattenVias
fontMap ""
#ignoreLines
#ignorePcellEvalFail
labelCase "preserve"
labelDepth "1"
labelMap ""
layerMap ""
library "{{lib_name}}"
logFile "strmOut.log"
maxVertices "200"
#mergePathSegsToPath
#noConvertHalfWidthPath
noInfo ""
#noObjectProp
#noOutputTextDisplays
#noOutputUnplacedInst
noWarn ""
objectMap ""
outputDir "{{run_dir}}"
#pathToPolygon
pinAttNum "0"
propMap ""
#propValueOnly
#rectToBox
refLibList ""
#replaceBusBitChar
#reportPrecisionLoss
#respectGDSIINameLimit
runDir "{{run_dir}}"
#snapToGrid
strmFile "{{output_name}}"
strmVersion "5"
summaryFile ""
techLib ""
topCell "{{cell_name}}"
userSkillFile ""
viaMap ""
view "{{view_name}}"
warnToErr ""
================================================
FILE: bag/verification/templates/si_env.txt
================================================
simStopList = '("auCdl")
simViewList = '("auCdl" "schematic")
globalGndSig = ""
globalPowerSig = ""
shrinkFACTOR = 0
checkScale = "meter"
diodeCheck = "none"
capacitorCheck = "none"
resistorCheck = "none"
resistorModel = ""
shortRES = 2000
simNetlistHier = 't
pinMAP = 'nil
displayPININFO = 't
checkLDD = 'nil
connects = ""
setEQUIV = ""
simRunDir = "{{run_dir}}"
hnlNetlistFileName = "{{output_name}}"
simSimulator = "auCdl"
simViewName = "{{view_name}}"
simCellName = "{{cell_name}}"
simLibName = "{{lib_name}}"
incFILE = "{{source_added_file}}"
cdlSimViewList = '("auCdl" "schematic")
cdlSimStopList = '("auCdl")
auCdlDefNetlistProc = "ansCdlHnlPrintInst"
================================================
FILE: bag/verification/virtuoso.py
================================================
# -*- coding: utf-8 -*-
"""This module handles exporting schematic/layout from Virtuoso.
"""
from typing import TYPE_CHECKING, Optional, Dict, Any
import os
from abc import ABC
from ..io import write_file, open_temp
from .base import SubProcessChecker
if TYPE_CHECKING:
from .base import ProcInfo
class VirtuosoChecker(SubProcessChecker, ABC):
"""the base Checker class for Virtuoso.
This class implement layout/schematic export procedures.
Parameters
----------
tmp_dir : str
temporary file directory.
max_workers : int
maximum number of parallel processes.
cancel_timeout : float
timeout for cancelling a subprocess.
source_added_file : str
file to include for schematic export.
"""
def __init__(self, tmp_dir, max_workers, cancel_timeout, source_added_file):
# type: (str, int, float, str) -> None
SubProcessChecker.__init__(self, tmp_dir, max_workers, cancel_timeout)
self._source_added_file = source_added_file
def setup_export_layout(self, lib_name, cell_name, out_file, view_name='layout', params=None):
# type: (str, str, str, str, Optional[Dict[str, Any]]) -> ProcInfo
out_file = os.path.abspath(out_file)
run_dir = os.path.dirname(out_file)
out_name = os.path.basename(out_file)
log_file = os.path.join(run_dir, 'layout_export.log')
os.makedirs(run_dir, exist_ok=True)
# fill in stream out configuration file.
content = self.render_file_template('layout_export_config.txt',
dict(
lib_name=lib_name,
cell_name=cell_name,
view_name=view_name,
output_name=out_name,
run_dir=run_dir,
))
with open_temp(prefix='stream_template', dir=run_dir, delete=False) as config_file:
config_fname = config_file.name
config_file.write(content)
# run strmOut
cmd = ['strmout', '-templateFile', config_fname]
return cmd, log_file, None, os.environ['BAG_WORK_DIR']
def setup_export_schematic(self, lib_name, cell_name, out_file, view_name='schematic',
params=None):
# type: (str, str, str, str, Optional[Dict[str, Any]]) -> ProcInfo
out_file = os.path.abspath(out_file)
run_dir = os.path.dirname(out_file)
out_name = os.path.basename(out_file)
log_file = os.path.join(run_dir, 'schematic_export.log')
# fill in stream out configuration file.
content = self.render_file_template('si_env.txt',
dict(
lib_name=lib_name,
cell_name=cell_name,
view_name=view_name,
output_name=out_name,
source_added_file=self._source_added_file,
run_dir=run_dir,
))
# create configuration file.
config_fname = os.path.join(run_dir, 'si.env')
write_file(config_fname, content)
# run command
cmd = ['si', run_dir, '-batch', '-command', 'netlist']
return cmd, log_file, None, os.environ['BAG_WORK_DIR']
================================================
FILE: bag/virtuoso.py
================================================
# -*- coding: utf-8 -*-
"""This module provides functions needed to get Virtuoso to work with BAG.
"""
import os
import sys
import atexit
import signal
import argparse
import bag.interface
import bag.io
def run_skill_server(args):
"""Run the BAG/Virtuoso server."""
error_msg = ''
server = None
port_file = None
port_number = None
try:
# process command line arguments
min_port = args.min_port
max_port = args.max_port
# remove directory from port file name
port_file = os.path.basename(args.port_file)
log_file = args.log_file
# create log file directory, and remove old log.
if log_file is not None:
log_file = os.path.abspath(log_file)
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
elif os.path.exists(log_file):
os.remove(log_file)
# determine port file name
if 'BAG_WORK_DIR' not in os.environ:
raise Exception('Environment variable BAG_WORK_DIR not defined')
work_dir = os.environ['BAG_WORK_DIR']
if not os.path.isdir(work_dir):
raise Exception('$BAG_WORK_DIR = %s is not a directory' % work_dir)
port_file = os.path.join(work_dir, port_file)
# determine temp directory
tmp_dir = None
if 'BAG_TEMP_DIR' in os.environ:
tmp_dir = os.environ['BAG_TEMP_DIR']
if not os.path.isdir(tmp_dir):
if os.path.exists(tmp_dir):
raise Exception('$BAG_TEMP_DIR = %s is not a directory' % tmp_dir)
else:
os.makedirs(tmp_dir)
# attempt to open port and start server
router = bag.interface.ZMQRouter(min_port=min_port, max_port=max_port, log_file=log_file)
server = bag.interface.SkillServer(router, sys.stdout, sys.stdin, tmpdir=tmp_dir)
port_number = router.get_port()
except Exception as ex:
error_msg = 'bag server process error:\n%s\n' % str(ex)
if not error_msg:
bag.io.write_file(port_file, '%r\n' % port_number)
# TODO: somehow this is a bug??!! figure it out.
# make sure port_file is removed at exit
# def exit_handler():
# if os.path.exists(port_file):
# os.remove(port_file)
# atexit.register(exit_handler)
# signal.signal(signal.SIGTERM, exit_handler)
try:
sys.stdout.write('BAG skill server has started. Yay!\n')
sys.stdout.flush()
server.run()
except Exception as ex:
error_msg = 'bag server process error:\n%s\n' % str(ex)
if error_msg:
sys.stderr.write(error_msg)
sys.stderr.flush()
def parse_command_line_arguments():
"""Parse command line arguments, then run the corresponding function."""
desc = 'A Python program that performs tasks for virtuoso.'
parser = argparse.ArgumentParser(description=desc)
desc = 'Valid commands. Supply -h/--help flag after the command name to learn more about the command.'
sub_parsers = parser.add_subparsers(title='Commands', description=desc, help='command name.')
desc = 'Run BAG skill server.'
par2 = sub_parsers.add_parser('run_skill_server', description=desc, help=desc)
par2.add_argument('min_port', type=int, help='minimum socket port number.')
par2.add_argument('max_port', type=int, help='maximum socket port number.')
par2.add_argument('port_file', type=str, help='file to write the port number to.')
par2.add_argument('log_file', type=str, nargs='?', default=None,
help='log file name.')
par2.set_defaults(func=run_skill_server)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
parse_command_line_arguments()
================================================
FILE: docs/.gitignore
================================================
build
================================================
FILE: docs/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/Makefile
================================================
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help
help:
@echo "Please use \`make ' where is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " applehelp to make an Apple Help Book"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " epub3 to make an epub3"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " coverage to run coverage check of the documentation (if enabled)"
@echo " dummy to check syntax errors of document sources"
.PHONY: clean
clean:
rm -rf $(BUILDDIR)/*
.PHONY: html
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
.PHONY: dirhtml
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
.PHONY: singlehtml
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
.PHONY: pickle
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
.PHONY: json
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
.PHONY: htmlhelp
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
.PHONY: qthelp
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/BAG.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/BAG.qhc"
.PHONY: applehelp
applehelp:
$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
@echo
@echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
@echo "N.B. You won't be able to view it unless you put it in" \
"~/Library/Documentation/Help or install it in your application" \
"bundle."
.PHONY: devhelp
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/BAG"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/BAG"
@echo "# devhelp"
.PHONY: epub
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
.PHONY: epub3
epub3:
$(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3
@echo
@echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
.PHONY: latex
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
.PHONY: latexpdf
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
.PHONY: latexpdfja
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
.PHONY: text
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
.PHONY: man
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
.PHONY: texinfo
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
.PHONY: info
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
.PHONY: gettext
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
.PHONY: changes
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
.PHONY: linkcheck
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
.PHONY: doctest
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
.PHONY: coverage
coverage:
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
@echo "Testing of coverage in the sources finished, look at the " \
"results in $(BUILDDIR)/coverage/python.txt."
.PHONY: xml
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
.PHONY: pseudoxml
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
.PHONY: dummy
dummy:
$(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy
@echo
@echo "Build finished. Dummy builder generates no files."
================================================
FILE: docs/README
================================================
To build/update documentation:
1. make sure BAG Python's bin folder is in your path.
2. run:
./refresh_api.sh
to generate API documentations.
3. run:
make html
to build the documentation webpage.
================================================
FILE: docs/refresh_api.sh
================================================
#!/usr/bin/env tcsh
sphinx-apidoc --force --output-dir=source/api ../bag
================================================
FILE: docs/source/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/source/api/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/source/api/bag.data.rst
================================================
bag.data package
================
Submodules
----------
bag.data.core module
--------------------
.. automodule:: bag.data.core
:members:
:undoc-members:
:show-inheritance:
bag.data.dc module
------------------
.. automodule:: bag.data.dc
:members:
:undoc-members:
:show-inheritance:
bag.data.digital module
-----------------------
.. automodule:: bag.data.digital
:members:
:undoc-members:
:show-inheritance:
bag.data.lti module
-------------------
.. automodule:: bag.data.lti
:members:
:undoc-members:
:show-inheritance:
bag.data.ltv module
-------------------
.. automodule:: bag.data.ltv
:members:
:undoc-members:
:show-inheritance:
bag.data.mos module
-------------------
.. automodule:: bag.data.mos
:members:
:undoc-members:
:show-inheritance:
bag.data.plot module
--------------------
.. automodule:: bag.data.plot
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.data
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.design.rst
================================================
bag.design package
==================
Submodules
----------
bag.design.database module
--------------------------
.. automodule:: bag.design.database
:members:
:undoc-members:
:show-inheritance:
bag.design.module module
------------------------
.. automodule:: bag.design.module
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.design
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.interface.rst
================================================
bag.interface package
=====================
Submodules
----------
bag.interface.database module
-----------------------------
.. automodule:: bag.interface.database
:members:
:undoc-members:
:show-inheritance:
bag.interface.ocean module
--------------------------
.. automodule:: bag.interface.ocean
:members:
:undoc-members:
:show-inheritance:
bag.interface.server module
---------------------------
.. automodule:: bag.interface.server
:members:
:undoc-members:
:show-inheritance:
bag.interface.simulator module
------------------------------
.. automodule:: bag.interface.simulator
:members:
:undoc-members:
:show-inheritance:
bag.interface.skill module
--------------------------
.. automodule:: bag.interface.skill
:members:
:undoc-members:
:show-inheritance:
bag.interface.zmqwrapper module
-------------------------------
.. automodule:: bag.interface.zmqwrapper
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.interface
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.io.rst
================================================
bag.io package
==============
Submodules
----------
bag.io.common module
--------------------
.. automodule:: bag.io.common
:members:
:undoc-members:
:show-inheritance:
bag.io.file module
------------------
.. automodule:: bag.io.file
:members:
:undoc-members:
:show-inheritance:
bag.io.gui module
-----------------
.. automodule:: bag.io.gui
:members:
:undoc-members:
:show-inheritance:
bag.io.process module
---------------------
.. automodule:: bag.io.process
:members:
:undoc-members:
:show-inheritance:
bag.io.sim_data module
----------------------
.. automodule:: bag.io.sim_data
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.io
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.layout.routing.rst
================================================
bag.layout.routing package
==========================
Submodules
----------
bag.layout.routing.base module
------------------------------
.. automodule:: bag.layout.routing.base
:members:
:undoc-members:
:show-inheritance:
bag.layout.routing.fill module
------------------------------
.. automodule:: bag.layout.routing.fill
:members:
:undoc-members:
:show-inheritance:
bag.layout.routing.grid module
------------------------------
.. automodule:: bag.layout.routing.grid
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.layout.routing
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.layout.rst
================================================
bag.layout package
==================
Subpackages
-----------
.. toctree::
bag.layout.routing
Submodules
----------
bag.layout.connection module
----------------------------
.. automodule:: bag.layout.connection
:members:
:undoc-members:
:show-inheritance:
bag.layout.core module
----------------------
.. automodule:: bag.layout.core
:members:
:undoc-members:
:show-inheritance:
bag.layout.digital module
-------------------------
.. automodule:: bag.layout.digital
:members:
:undoc-members:
:show-inheritance:
bag.layout.objects module
-------------------------
.. automodule:: bag.layout.objects
:members:
:undoc-members:
:show-inheritance:
bag.layout.template module
--------------------------
.. automodule:: bag.layout.template
:members:
:undoc-members:
:show-inheritance:
bag.layout.util module
----------------------
.. automodule:: bag.layout.util
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.layout
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.math.rst
================================================
bag.math package
================
Submodules
----------
bag.math.dfun module
--------------------
.. automodule:: bag.math.dfun
:members:
:undoc-members:
:show-inheritance:
bag.math.interpolate module
---------------------------
.. automodule:: bag.math.interpolate
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.math
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.mdao.rst
================================================
bag.mdao package
================
Submodules
----------
bag.mdao.components module
--------------------------
.. automodule:: bag.mdao.components
:members:
:undoc-members:
:show-inheritance:
bag.mdao.core module
--------------------
.. automodule:: bag.mdao.core
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.mdao
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.rst
================================================
bag package
===========
Subpackages
-----------
.. toctree::
bag.data
bag.design
bag.interface
bag.io
bag.layout
bag.math
bag.mdao
bag.tech
bag.util
bag.verification
Submodules
----------
bag.core module
---------------
.. automodule:: bag.core
:members:
:undoc-members:
:show-inheritance:
bag.virtuoso module
-------------------
.. automodule:: bag.virtuoso
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.tech.rst
================================================
bag.tech package
================
Submodules
----------
bag.tech.core module
--------------------
.. automodule:: bag.tech.core
:members:
:undoc-members:
:show-inheritance:
bag.tech.mos module
-------------------
.. automodule:: bag.tech.mos
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.tech
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.util.rst
================================================
bag.util package
================
Submodules
----------
bag.util.interval module
------------------------
.. automodule:: bag.util.interval
:members:
:undoc-members:
:show-inheritance:
bag.util.libimport module
-------------------------
.. automodule:: bag.util.libimport
:members:
:undoc-members:
:show-inheritance:
bag.util.parse module
---------------------
.. automodule:: bag.util.parse
:members:
:undoc-members:
:show-inheritance:
bag.util.search module
----------------------
.. automodule:: bag.util.search
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.util
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/bag.verification.rst
================================================
bag.verification package
========================
Submodules
----------
bag.verification.base module
----------------------------
.. automodule:: bag.verification.base
:members:
:undoc-members:
:show-inheritance:
bag.verification.calibre module
-------------------------------
.. automodule:: bag.verification.calibre
:members:
:undoc-members:
:show-inheritance:
bag.verification.pvs module
---------------------------
.. automodule:: bag.verification.pvs
:members:
:undoc-members:
:show-inheritance:
bag.verification.virtuoso_export module
---------------------------------------
.. automodule:: bag.verification.virtuoso_export
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: bag.verification
:members:
:undoc-members:
:show-inheritance:
================================================
FILE: docs/source/api/modules.rst
================================================
bag
===
.. toctree::
:maxdepth: 4
bag
================================================
FILE: docs/source/conf.py
================================================
# -*- coding: utf-8 -*-
#
# BAG documentation build configuration file, created by
# sphinx-quickstart on Fri May 27 15:45:44 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
# napoleon has better support for class instance attribute than numpydoc.
'sphinx.ext.napoleon',
# 'numpydoc',
]
# make numpydoc work with autosummary
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BAG'
copyright = u'2016, Eric Chang'
author = u'Eric Chang'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.0'
# The full version, including alpha/beta/rc tags.
release = u'2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# " v documentation" by default.
#html_title = u'BAG v2.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'BAGdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'BAG.tex', u'BAG Documentation',
u'Eric Chang', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bag', u'BAG Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'BAG', u'BAG Documentation',
author, 'BAG', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5', None),
'python3': ('https://docs.python.org/3.5', None),
'python2': ('https://docs.python.org/2.7', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
================================================
FILE: docs/source/developer/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/source/developer/developer.rst
================================================
Developer Guide
===============
Nothing here yet...
================================================
FILE: docs/source/index.rst
================================================
.. BAG documentation master file, created by
sphinx-quickstart on Fri May 27 15:45:44 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to BAG's documentation!
===============================
Contents:
.. toctree::
:maxdepth: 2
tutorial/tutorial
overview/overview
setup/setup
developer/developer
api/modules
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
================================================
FILE: docs/source/overview/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/source/overview/design.rst
================================================
Design Module
=============
A design module is a Python class that generates new schematics. It computes all parameters needed to generate a
schematic from user defined specifications. For example, a design module for an inverter needs to compute the width,
length, and threshold flavor of the NMOS and PMOS to generate a new inverter schematic. The designer of this module can
let the user specify these parameters directly, or alternatively compute them from higher level specifications, such as
fanout, input capacitance, and leakage specs.
To create a default design module for a schematic generator, create a :class:`~bag.BagProject` instance and call
:meth:`~bag.BagProject.import_design_library` to import all schematic generators in a library from your CAD
program into Python. The designer should then implement the three methods, :meth:`~bag.design.Module.design`,
:meth:`~bag.design.Module.get_layout_params`, and :meth:`~bag.design.Module.get_layout_pin_mapping` (The latter two are
optional if you do not use BAG to generate layout). Once you finish the design module definition, you can create new
design module instances by calling :meth:`~bag.BagProject.create_design_module`.
The following sections describe how each of these methods should be implemented.
design()
--------
This method computes all parameters needed to generate a schematic from user defined specifications. The input
arguments should also be specified in this method.
A design module can have multiple design methods, as long as they have difference names. For example, You can implement
the ``design()`` method to compute parameters from high level specifications, and define a new method named
``design_override()`` that allows the user to assign parameter values directly for debugging purposes.
To enable hierarchical design, design module has a dictionary, :attr:`~bag.design.Module.instances`, that
maps children instance names to corresponding design modules, so you can simply call their
:meth:`~bag.design.Module.design` methods to set their parameters. See :doc:`/tutorial/tutorial` for an simple example.
If you need to modify the schematic structure (such as adding more inverter buffers), you should call the corresponding
methods before calling :meth:`~bag.design.Module.design` methods of child instances, as those design module could be
changed. The rest of this section explains how you modify the schematic.
Pin Renaming
^^^^^^^^^^^^
Most of the time, you should not rename the pin of schematic. The only time you should rename the pin is when you have
a variable bus pin where the number of bits in the bus can change with the design. In this case, call
:meth:`~bag.design.Module.rename_pin` to change the number of bits in the bus. To connect/remove instances from
the added/deleted bus pins, see :ref:`instance_connection_modification`
Delete Instances
^^^^^^^^^^^^^^^^
Delete a child instance by calling :meth:`~bag.design.Module.delete_instance`. After
this call, the corresponding value in :attr:`~bag.design.Module.instances` dictionary will become ``None``.
.. note::
You don't have to delete 0-width or 0-finger transistors; BAG already handles that for you.
Replace Instance Master
^^^^^^^^^^^^^^^^^^^^^^^
If you have two different designs of a child instance, and you want to swap between the two designs, you can call
:meth:`~bag.design.Module.replace_instance_master` to change the instance master of a child.
.. note::
You can replace instance masters only if the two instance masters have exactly the symbol, including pin names.
.. _instance_connection_modification:
Instance Connection Modification
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Call :meth:`~bag.design.Module.reconnect_instance_terminal` to change a child instance's connection.
Arraying Child Instances
^^^^^^^^^^^^^^^^^^^^^^^^
Call :meth:`~bag.design.Module.array_instance` to array a child instance. After this call,
:attr:`~bag.design.Module.instances` will map the child instance name to a list of design modules, one for each instance
in the array. You can then iterate through this list and design each of the instances. They do not need to have the
same parameter values.
Restoring to Default
^^^^^^^^^^^^^^^^^^^^
If you are using the design module in a design iteration loop, or you're using BAG interactively through the Python
console, and you want to restore a deleted/replaced/arrayed child instance to the default state, you can call
:meth:`~bag.design.Module.restore_instance`.
get_layout_params()
-------------------
This method should return a dictionary from layout parameter names to their values. This dictionary is used to create
a layout cell that will pass LVS against the generated schematic.
get_layout_pin_mapping()
------------------------
This method should return a dictionary from layout pin names to schematic pin names. This method exists because a
layout cell may not have the same pin names as the schematic. If a layout pin should be left un-exported, its
corresponding value in the dictionary must be ``None``.
This dictionary only need to list the layout pins that needs to be renamed. If no renaming is necessary, an empty
dictionary can be returned.
================================================
FILE: docs/source/overview/overview.rst
================================================
Overview
========
.. figure:: ./figures/bag_flow.png
:align: center
:figclass: align-center
BAG design flow diagram
BAG is a Python-based circuit design platform that aims to automate analog circuit design, but at the same time give the
user full visibility and control over every step in the design flow.
The analog circuit design flow is generally as follows:
#. Create a schematic generator of the circuit.
#. Create a testbench generator to measure specifications and verify functionality.
#. Create a layout generator if post-extraction verification is needed.
#. Generate a schematic with given specifications.
#. Generate a testbench that instantiates the generated schematic.
#. Simulate the testbenches and post-process data to verify that the circuit meets specifications.
#. Create the layout of your schematic and verify it's LVS/DRC clean.
#. Repeat step 3 on post-extraction schematic.
BAG 2.0 is designed so that any or all steps of the design flow can be performed in a Python script or console, thus
enabling rapid design iteration and architecture exploration.
To achieve its goal, BAG is divided into 4 components: schematic generators, layout generators, design modules, and
testbench generators. These components are independent from one another, so the designer can pick and choose which steps
in the design flow to automate. For example, the designer can simply use BAG to generate new schematics, and use his
own CAD program for simulation and verification. Alternatively, The designer can provide an existing schematic to BAG
and simply use it to automate the verification process.
BAG interacts with an external CAD program or simulator to complete all the design and simulation tasks. BAG comes with
Virtuoso and Ocean simulator support, but can be extended to other CAD programs or simulators. The rest of this
document assumes you are using Virtuoso and running simulations in Ocean.
Next we will describe each components of BAG in detail.
.. toctree::
:maxdepth: 2
schematic
design
testbench
================================================
FILE: docs/source/overview/schematic.rst
================================================
Schematic Generator
===================
A schematic generator is a schematic in your CAD program that tells BAG all the information needed to create a design.
BAG creates design modules from schematic generators, and BAG will copy and modify schematic generators to implement
new designs.
.. figure:: ./figures/gm_schematic.png
:align: center
:figclass: align-center
An example schematic generator of a differential gm cell.
A schematic generator needs to follow some rules to work with BAG:
#. Instances in a schematic generator must be other schematic generators, or a cell in the ``BAG_prim`` library.
#. BAG can array any instance in a schematic generator. That is, in the design implementation phase, BAG can
copy/paste this instance any number of times, and modify the connections or parameters of any copy. This is useful
in creating array structures, such as an inverter chain with variable number of stages, or a DAC with variable
number of bits.
However, if you need to array an instance, its ports must be connected to wire stubs, with net labels on each of the
wire stubs. Also, there must be absolutely nothing to the right of the instance, since BAG will array the instance
by copying-and-pasting to the right. An example of an inverter buffer chain schematic generator is shown below.
.. figure:: ./figures/inv_chain_schematic.png
:align: center
:figclass: align-center
An example schematic generator of an inverter buffer chain. Ports connected by wire stubs, nothing on the right.
#. BAG can replace the instance master of any instance. The primary use of this is to allow the designer to change
transistor threshold values, but this could be used for other schematic generators if implemented. Whenever you
switch the instance master of an instance, the symbol of the new instance must exactly match the old instance,
including the port names.
#. Although not required, it is good practice to fill in default parameter values for all instances from the
``BAG_prim`` library. This makes it so that you can simulate a schematic generator in a normal testbench, and make
debugging easier.
================================================
FILE: docs/source/overview/testbench.rst
================================================
Testbench Generator
===================
A testbench generator is just a normal testbench with schematic and adexl view. BAG will simply copy the schematic and
adexl view, and replace the device under test with the new generated schematic. There are only 3 restrictions to the
testbench:
#. All device-under-test's (DUTs) in the testbench must have an instance name starting with ``XDUT``. This is to inform BAG
which child instances should be replaced.
#. The testbench must be configured to simulate with ADE-XL. This is to make parametric/corner sweeps and monte carlo
easier.
#. You should not define any process corners in the ADE-XL state, as BAG will load them for you. This makes it
possible to use the same testbench generator across different technologies.
To verify a new design, call :meth:`~bag.BagProject.create_testbench` and specify the testbench generator library/cell,
DUT library/cell, and the library to create the new testbench in. BAG will create a :class:`~bag.core.Testbench` object
to represent this testbench. You can then call its methods to set the parameters, process corners, or enable parametric
sweeps. When you're done, call :meth:`~bag.core.Testbench.update_testbench` to commit the changes to Virtuoso. If you
do not wish to run simulation in BAG, you can then open this testbench in Virtuoso and simulate it there.
If you want to start simulation from BAG and load simulation data, you need to call
:meth:`~bag.core.Testbench.add_output` method to specify which outputs to record and send back to Python. Output
expression is a Virtuoso calculator expression. Then, call :meth:`~bag.core.Testbench.run_simulation` to start a
simulation run. During the simulation, you can press ``Ctrl-C`` anytime to abort simulation. When the simulation
finish, the result directory will be saved to the attribute :attr:`~bag.core.Testbench.save_dir`, and you can call
:func:`bag.data.load_sim_results` to load the result in Python. See :doc:`/tutorial/tutorial` for an example.
Since BAG uses the ADE-XL interface to run simulation, all simulation runs will be recorded in ADE-XL's history tab, so
you can plot them in Virtuoso later for debugging purposes. By default, all simulation runs from BAG has the ``BagSim``
history tag, but you can also specify your own tag name when you call :meth:`~bag.core.Testbench.run_simulation`. Read
ADE-XL documentation if you want to know more about ADE-XL's history feature.
================================================
FILE: docs/source/setup/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/source/setup/bag_config/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/source/setup/bag_config/bag_config.rst
================================================
BAG Configuration File
======================
BAG configuration file is written in YAML format. This document describes each setting.
BAG configuration file may use environment variable to specify values of any entries.
.. toctree::
:maxdepth: 4
socket/socket
database/database
simulation/simulation
misc
================================================
FILE: docs/source/setup/bag_config/database/database.rst
================================================
database
========
This entry defines all settings related to Virtuoso.
data.class
----------
The Python class that handles database interaction. This entry is mainly to support non-Virtuoso CAD programs. If you
use Virtuoso, the value must be ``bag.interface.skill.SkillInterface``.
database.schematic
------------------
This entry contains all settings needed to read/generate schematics.
.. _sch_tech_lib:
database.schematic.tech_lib
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Technology library. When BAG create new libraries, they will be attached to this technology library. Usually this is
the PDK library provided by the foundry.
.. _sch_sympin:
database.schematic.sympin
^^^^^^^^^^^^^^^^^^^^^^^^^
Instance master of symbol pins. This is a list of library/cell/view names. Most of the time this should be
``["basic", "sympin", "symbolNN"]``.
.. _sch_ipin:
database.schematic.ipin
^^^^^^^^^^^^^^^^^^^^^^^
Instance master of input pins in schematic. This is a list of library/cell/view names. Most of the time this should be
``["basic", "ipin", "symbol"]``.
.. _sch_opin:
database.schematic.opin
^^^^^^^^^^^^^^^^^^^^^^^
Instance master of output pins in schematic. This is a list of library/cell/view names. Most of the time this should be
``["basic", "opin", "symbol"]``.
.. _sch_iopin:
database.schematic.iopin
^^^^^^^^^^^^^^^^^^^^^^^^
Instance master of inout pins in schematic. This is a list of library/cell/view names. Most of the time this should be
``["basic", "iopin", "symbolr"]``.
.. _sch_simulators:
database.schematic.simulators
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A list of simulators where the ``termOrder`` CDF field should be defined.
When Virtuoso convert schematics to netlists, it uses the ``termOrder`` CDF field to decide how to order the pin names
in the netlist. This entry makes BAG update the ``termOrder`` field correctly whenever pins are changed.
Most of the time, this should be ``["auLvs", "auCdl", "spectre", "hspiceD"]``.
.. _sch_exclude:
database.schematic.exclude_libraries
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A list of libraries to exclude when importing schematic generators to BAG. Most of the time, this should be
``["analogLib", "basic", {PDK}]``, where ``{PDK}`` is the PDK library.
database.testbench
------------------
This entry contains all settings needed to create new testbenches.
.. _tb_config_libs:
database.testbench.config_libs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A string of config view global libries, separated by spaces. Used to generate config view.
database.testbench.config_views
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A string of config view global cellviews, separated by spaces. Used to generate config view. Most of the time this
should be ``"spectre calibre schematic veriloga"``.
database.testbench.config_stops
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A string of config view global stop cellviews, separated by spaces. Used to generate config view. Most of the time this
should be ``"spectre veriloga"``.
.. _sim_env_file:
database.testbench.env_file
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The simulation environment file name. A simulation environment is a combination of process corner and temperature.
For example, if you simulate your circuit at TT corner with a temperature of 50 degrees Celsius, you may say the
simulation environment is TT_50. A simulation environment file contains all simulation environments you want to define
when BAG creates a new testbench. This file can be generated by exporting corner setup from an ADE-XL view.
database.testbench.def_files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A list of ADE/spectre definition files to include. Sometimes, a process technology uses definition files
in addition to model files. If so, you can specify definition files to include here as a list of strings.
Use an empty list (``[]``) if no definition file is needed.
database.testbench.default_env
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The default simulation environment name. See :ref:`sim_env_file`.
database.checker
----------------
This entry contains all settings needed to run LVS/RCX from BAG.
database.checker.checker_cls
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Python class that handles LVS/RCX. If you use Calibre with Virtuoso for LVS/RCX, the value must be
``bag.verification.calibre.Calibre``.
.. _lvs_rundir:
database.checker.lvs_run_dir
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
LVS run directory.
.. _rcx_rundir:
database.checker.rcx_run_dir
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
RCX run directory
.. _lvs_runset:
database.checker.lvs_runset
^^^^^^^^^^^^^^^^^^^^^^^^^^^
LVS runset.
.. _rcx_runset:
database.checker.rcx_runset
^^^^^^^^^^^^^^^^^^^^^^^^^^^
RCX runset.
database.checker.source_added_file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Location of the source.added file for Calibre LVS. If this entry is not defined, BAG
defaults to ``$DK/Calibre/lvs/source.added``.
database.checker.rcx_mode
^^^^^^^^^^^^^^^^^^^^^^^^^
Whether to use Calibre PEX or Calibre XACT3D flow to perform parasitic extraction. The
value should be either ``pex`` or ``xact``. If this entry is not defined, BAG defaults to
``pex``.
database.checker.xact_rules
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Location of the Calibre XACT3D rules file. This entry must be defined if using Calibre XACT3D flow.
database.calibreview
--------------------
This entry contains all settings needed to generate calibre view after RCX.
.. _calibre_cellmap:
database.calibreview.cell_map
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The calibre view cellmap file.
database.calibreview.view_name
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
view name for calibre view. Usually ``calibre``.
================================================
FILE: docs/source/setup/bag_config/misc.rst
================================================
class
=====
The subclass of :ref:
.. _bag_lib_defs:
lib_defs
========
Location of the BAG design module libraries definition file.
The BAG libraries definition file is similar to the ``cds.lib`` file for Virtuoso, where it defines every design module
library and its location. This file makes it easy to share design module libraries made by different designers.
Each line in the file contains two entries, separated by spaces. The first entry is the name of the design module
library, and the second entry is the location of the design module library. Environment variables may be used in this
file.
.. _bag_new_lib_path:
new_lib_path
============
Directory to put new generated design module libraries.
When you import a new schematic generator library, BAG will create a corresponding Python design module library and
define this library in the library definition file (see :ref:`bag_lib_defs`). This field tells BAG where new design
module libraries should be created.
================================================
FILE: docs/source/setup/bag_config/simulation/simulation.rst
================================================
simulation
==========
This entry defines all settings related to Ocean.
simulation.class
----------------
The Python class that handles simulator interaction. This entry is mainly to support non-Ocean simulators. If you
use Ocean, the value must be ``bag.interface.ocean.OceanInterface``.
simulation.prompt
-----------------
The ocean prompt string.
.. _sim_init_file:
simulation.init_file
--------------------
This file will be loaded when Ocean first started up. This allows you to configure the Ocean simulator. If you do not want to load an initialization file, set this field to an empty string (``""``).
simulation.view
---------------
Testbench view name. Usually ``adexl``.
simulation.state
----------------
ADE-XL setup state name. When you run simulations from BAG, the simulation configuration will be saved to this setup
state.
simulation.update_timeout_ms
----------------------------
If simulation takes a lone time, BAG will print out a message at this time interval (in milliseconds) so you can know
if BAG is still running.
simulation.kwargs
-----------------
pexpect keyword arguments dictionary used to start the simulation. When BAG server receive a simulation request, it
will run Ocean in a subprocess using Python pexpect module. This entry allows you to control how pexpect starts the
Ocean subprocess. Refer to pexpect documentation for more information.
job_options
-----------
A dictionary of job options for ADE-XL. This entry controls whether ADE-XL runs simulations remotely or locally, and how many jobs it launches for a simulation run. Refer to ADE-XL documentation for available options.
================================================
FILE: docs/source/setup/bag_config/socket/socket.rst
================================================
socket
======
This entry defines socket settings for BAG to communicate with Virtuoso.
socket.host
-----------
The host of the BAG server socket, i.e. the machine running the Virtuoso program. usually ``localhost``.
socket.port_file
----------------
File containing socket port number for BAG server. When Virtuoso starts the BAG server process, it finds a open port and bind the
server to this port. It then creates a file with name in ``$BAG_WORK_DIR`` directory, and write the port number to this
file.
socket.sim_port_file
--------------------
File containing socket port number for simulation server. When the simulation server starts, it finds a open port and bind the
server to this port. It then creates a file with name in ``$BAG_WORK_DIR`` directory, and write the port number to this
file.
socket.log_file
---------------
Socket communication debugging log file. All messages sent or received by BAG will be recorded in this log.
socket.pipeline
---------------
number of messages allowed in the ZMQ pipeline. Usually you don't have to change this.
================================================
FILE: docs/source/setup/config_summary.rst
================================================
Configuration Files Summary
===========================
Although BAG has many configuration settings, most of them do not need to be changed. This file summarizes which
settings you should modify under various use cases.
Starting New Project
--------------------
For every new project, it is a good practice to keep a set of global configuration files to make sure everyone working
on the project is simulating the same corners, running LVS and extraction with the same settings, and so on. In this
case, you should change the following fields to point to the global configuration files:
* :ref:`sim_env_file`
* :ref:`lvs_runset`
* :ref:`rcx_runset`
* :ref:`calibre_cellmap`
Customizing Virtuoso Setups
---------------------------
If you changed your Virtuoso setup (configuration files, working directory, etc.), double check the following fields to
see if they need to be modified:
* :ref:`lvs_rundir`
* :ref:`rcx_rundir`
* :ref:`sim_init_file`
Python Design Module Customization
----------------------------------
The following fields control how BAG 2.0 finds design modules, and also where it puts new imported modules:
* :ref:`bag_lib_defs`
* :ref:`bag_new_lib_path`
.. _change_pdk:
Changing Process Technology
---------------------------
If you want to change the process technology, double check the following fields:
* :ref:`sch_tech_lib`
* :ref:`sch_exclude`
* :ref:`tb_config_libs`
* :ref:`tech_config_path`
The following fields probably won't change, but if something doesn't work it's worth to double check:
* :ref:`sch_sympin`
* :ref:`sch_ipin`
* :ref:`sch_opin`
* :ref:`sch_iopin`
* :ref:`sch_simulators`
================================================
FILE: docs/source/setup/install_python.rst
================================================
Installing Python for BAG
==========================
This section describes how to install Python for running BAG.
Installation Requirements
-------------------------
BAG is compatible with Python 3.5+ (Python 2.7+ is theoretically supported but untested), so you will need to have
Python 3.5+ installed. For Linux/Unix systems, it is recommended to install a separate Python distribution from
the system Python.
BAG requires multiple Python packages, some of which requires compiling C++/C/Fortran extensions. Therefore, it is
strongly recommended to download `Anaconda Python `_, which provides a Python
distribution with most of the packages preinstalled. Otherwise, please refer to documentation for each required
package for how to install/build from source.
Required Packages
-----------------
In addition to the default packages that come with Anaconda (numpy, scipy, etc.), you'll need the following additional
packages:
- `subprocess32 `_ (Python 2 only)
This package is a backport of Python 3.2's subprocess module to Python 2. It is installable from ``pip``.
- `sqlitedict `_
This is a dependency of OpenMDAO. It is installable from ``pip``.
- `OpenMDAO `_
This is a flexible optimization framework in Python developed by NASA. It is installable from ``pip``.
- `mpich2 `_ (optional)
This is the Message Passing Interface (MPI) library. OpenMDAO and Pyoptsparse can optionally use this library
for parallel computing. You can install this package with:
.. code-block:: bash
> conda install mpich2
- `mpi4py `_ (optional)
This is the Python wrapper of ``mpich2``. You can install this package with:
.. code-block:: bash
> conda install mpi4py
- `ipopt `__ (optional)
`Ipopt `__ is a free software package for large-scale nonlinear optimization.
This can be used to replace the default optimization solver that comes with scipy. You can install this package with:
.. code-block:: bash
> conda install --channel pkerichang ipopt
- `pyoptsparse `_ (optional)
``pyoptsparse`` is a python package that contains a collection of optmization solvers, including a Python wrapper
around ``Ipopt``. You can install this package with:
.. code-block:: bash
> conda install --channel pkerichang pyoptsparse
================================================
FILE: docs/source/setup/new_pdk.rst
================================================
Setting up New PDK
==================
This section describes how to get BAG 2.0 to work with a new PDK.
#. Create a new technology configuration file for this PDK. See :doc:`tech_config/tech_config` for a description of
the technology configuration file format.
#. Create a new BAG configuration file for this PDK. You can simply copy an existing configuration, then change the
fields listed in :ref:`change_pdk`.
#. Create a new ``BAG_prim`` library for this PDK. The easiest way to do this is to copy an existing ``BAG_prim``
library, then change the underlying instances to be instances from the new PDK. You should use the **pPar** command
in Virtuoso to pass CDF parameters from ``BAG_prim`` instances to PDK instances.
#. Change your cds.lib to refer to the new ``BAG_prim`` library.
#. To avoid everyone having their own python design modules for BAG primitive, you should generated a global design module
library for BAG primitives, then ask every user to include this global library in their ``bag_libs.def`` file. To
do so, setup a BAG workspace and execute the following commands:
.. code-block:: python
import bag
prj = bag.BagProject()
prj.import_design_library('BAG_prim')
now copy the generate design library to a global location.
================================================
FILE: docs/source/setup/pyoptsparse.rst
================================================
Building Pyoptsparse
====================
To be written.
================================================
FILE: docs/source/setup/setup.rst
================================================
BAG Setup Procedure
===================
This document describes how to install Python for BAG and the various configuration settings. Since a lot of the
configuration depends on the external CAD program and simulator, this document assumes you are using Virtuoso and
Ocean (with ADEXL) for schematic design and simulation, respectively.
.. toctree::
:maxdepth: 2
install_python
pyoptsparse
config_summary
bag_config/bag_config
tech_config/tech_config
new_pdk
================================================
FILE: docs/source/setup/tech_config/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/source/setup/tech_config/layout/layout.rst
================================================
layout
======
This entry defines all layout specific settings.
layout.em_temp
--------------
The temperature used to calculate electro-migration specs. The temperature should
be specified in degrees Celsius.
================================================
FILE: docs/source/setup/tech_config/misc.rst
================================================
.. _tech_config_path:
class
=====
The subclass of :class:`bag.layout.core.TechInfo` for this process technology.
If this entry is not defined, a default dummy :class:`~bag.layout.core.TechInfo`
instance will be created for schematic-only design flow.
================================================
FILE: docs/source/setup/tech_config/mos/mos.rst
================================================
mos
===
This entry defines all MOS transistor settings.
mos.width_resolution
--------------------
The transistor width minimum resolution, in meters or number of fins in finfet technology.
mos.length_resolution
---------------------
The transistor length minimum resolution, in meters.
mos.mos_char_root
-----------------
The default transistor characterization data directory.
================================================
FILE: docs/source/setup/tech_config/tech_config.rst
================================================
Technology Configuration File
=============================
Technology configuration file is written in YAML format. This document describes each setting.
Technology configuration file may use environment variable to specify values of any entries.
.. toctree::
:maxdepth: 4
misc
mos/mos
layout/layout
================================================
FILE: docs/source/tutorial/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/source/tutorial/figures/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: docs/source/tutorial/tutorial.rst
================================================
Tutorial
========
This section contains several simple tutorials for you to get an idea of the BAG workflow.
In these tutorials, we will be using :program:`git` extensively. git allows you to copy a working setup,
and it also allows you to checkout and use other people's design while they can work on adding future
improvements. To learn git, you can read the documentations here_, or alternatively you can just
google git commands to learn more about it while working through the tutorial.
.. _here: https://git-scm.com/doc
.. toctree::
:maxdepth: 2
schematic
collaboration
================================================
FILE: run_scripts/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: run_scripts/clean_cds_lib.py
================================================
"""
This script removes the '#Removed by ddDeleteObj' items in cds.lib.
It is also capable of taking a list of patterns as input and delete the generated libraries that
match that pattern in glob style.
"""
from argparse import Namespace
import argparse
import re
import os
import shutil
from pathlib import Path
from bag.io.file import read_yaml_env, readlines_iter, write_file
def arg_parse() -> Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('-rm', '--rm_patterns', nargs='+', dest='patterns', default=[],
help='A list of patterns to be removed from cadence library, the pattern '
'should be the name of the library in glob style')
args = parser.parse_args()
return args
def run_main(args: Namespace):
bag_workspace_dir = Path(os.environ['BAG_WORK_DIR'])
if 'BAG_CONFIG_PATH' not in os.environ:
raise Exception('BAG_CONFIG_PATH not defined.')
bag_config_path = os.environ['BAG_CONFIG_PATH']
bag_config = read_yaml_env(bag_config_path)
gen_libs_path = Path(bag_config['database']['default_lib_path'])
if not gen_libs_path.exists():
print(f'path {str(gen_libs_path)} does not exist')
cds_lib_path = bag_workspace_dir / 'cds.lib'
if not cds_lib_path.exists():
print(f'path {str(cds_lib_path)} does not exist')
return
cds_patterns = ['#Removed by ddDeleteObj'] + [f'DEFINE {p}' for p in args.patterns]
# clean cds.lib
cds_lib_lines_iter = readlines_iter(str(cds_lib_path))
new_cds_lib_content = []
for line in cds_lib_lines_iter:
found = False
for p in cds_patterns:
if re.match(p, line):
found = True
break
if not found:
new_cds_lib_content.append(line)
write_file(str(cds_lib_path), ''.join(new_cds_lib_content))
# clean gen_libs library names that match the pattern in args
for p in args.patterns:
for dir in gen_libs_path.glob(f'{p}*'):
if dir.is_dir():
shutil.rmtree(dir)
else:
print(f'path {str(dir)} is not a directory')
if __name__ == '__main__':
args = arg_parse()
run_main(args)
================================================
FILE: run_scripts/compile_verilog.il
================================================
procedure( compile_netlist_views(fname "t")
let( (p line info_list lib cell view obj cv)
unless( p = infile(fname)
error("Cannot open file %s" fname)
)
while( gets(line p)
info_list = parseString(line)
lib = car(info_list)
cell = cadr(info_list)
view = caddr(info_list)
obj = ddGetObj(lib cell view "verilog.sv" nil "a")
cv = dbOpenCellViewByType(lib cell view "netlist" "ac")
dbSetConnCurrent(cv)
dbSave(cv)
dbClose(cv)
)
close(p)
)
)
compile_netlist_views("verilog_cell_list.txt")
================================================
FILE: run_scripts/gen_cell.py
================================================
import argparse
from argparse import Namespace
from pathlib import Path
from bag.io.file import Pickle, Yaml
from bag.core import BagProject
io_cls_dict = {
'pickle': Pickle,
'yaml': Yaml,
}
def parse_args() -> Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('specs_fname', help='specs yaml file')
parser.add_argument('--no-sch', dest='gen_sch', action='store_false',
default=True, help='skip schematic generation')
parser.add_argument('--no-lay', dest='gen_lay', action='store_false',
default=True, help='skip layout generation')
parser.add_argument('-v', '--lvs', action='store_true', default=False,
help='run lvs')
parser.add_argument('-x', '--rcx', action='store_true', default=False,
help='run rcx')
parser.add_argument('--use-cache', dest='use_cache', action='store_true',
default=False,
help='uses the cache in cache_dir')
parser.add_argument('--save-cache', dest='save_cache', action='store_true',
default=False,
help='updates design database stored in cache_dir')
parser.add_argument('--pre', dest='prefix', default='',
help='prefix used to generate all the cells')
parser.add_argument('--suf', dest='suffix', default='',
help='suffix used to generate all the cells')
parser.add_argument('--format', default='yaml',
help='format of spec file (yaml, json, pickle)')
parser.add_argument('-dump', '--dump', default='',
help='If given will dump output of script into that '
'file according to the format specified')
args = parser.parse_args()
return args
def run_main(prj: BagProject, args: Namespace):
specs_fname = Path(args.specs_fname)
io_cls = io_cls_dict[args.format]
specs = io_cls.load(str(specs_fname))
results = prj.generate_cell(specs=specs,
gen_lay=args.gen_lay,
gen_sch=args.gen_sch,
run_lvs=args.lvs,
run_rcx=args.rcx,
use_cybagoa=True,
use_cache=args.use_cache,
save_cache=args.save_cache,
prefix=args.prefix,
suffix=args.suffix)
if results is not None and args.dump:
out_tmp_file = Path(args.dump)
io_cls.save(results, out_tmp_file)
if __name__ == '__main__':
args = parse_args()
local_dict = locals()
bprj = local_dict.get('bprj', BagProject())
run_main(bprj, args)
================================================
FILE: run_scripts/generate_verilog.py
================================================
import os
import yaml
from jinja2 import Environment, FileSystemLoader
def run_main():
verilog_dir = 'verilog_models'
cell_map_fname = 'verilog_cell_map.yaml'
skill_read_fname = 'verilog_cell_list.txt'
lib_name = 'AAAMODEL_QDR_HYBRID3'
lib_loc = 'gen_libs'
view_name = 'systemVerilog'
model_fname = 'verilog.sv'
with open(cell_map_fname, 'r') as f:
cell_map = yaml.load(f)
jinja_env = Environment(loader=FileSystemLoader(verilog_dir))
with open(skill_read_fname, 'w') as g:
for cell_name, fname in cell_map.items():
root_dir = os.path.join(lib_loc, lib_name, cell_name, view_name)
os.makedirs(root_dir, exist_ok=True)
content = jinja_env.get_template(fname).render(cell_name=cell_name)
with open(os.path.join(root_dir, model_fname), 'w') as f:
f.write(content)
g.write('%s %s %s\n' % (lib_name, cell_name, view_name))
if __name__ == '__main__':
run_main()
================================================
FILE: run_scripts/meas_cell.py
================================================
import argparse
from argparse import Namespace
from pathlib import Path
import pdb
from bag.io.file import Pickle, Yaml
from bag.core import BagProject
io_cls_dict = {
'pickle': Pickle,
'yaml': Yaml,
}
def parse_args() -> Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('specs_fname', help='specs yaml file')
parser.add_argument('--no-cell', dest='gen_cell', action='store_false',
default=True, help='skip cell generation')
parser.add_argument('--no-wrapper', dest='gen_wrapper', action='store_false',
default=True, help='skip wrapper generation')
parser.add_argument('--no-tb', dest='gen_tb', action='store_false',
default=True, help='skip tb generation')
parser.add_argument('--load', dest='load_results', action='store_true',
default=False, help='skip simulation, just load the results')
parser.add_argument('-x', '--extract', dest='extract', action='store_true',
default=False, help='do extracted simulation')
parser.add_argument('--no-sim', dest='run_sim', action='store_false',
default=True, help='run simulation, --load has a priority over this')
parser.add_argument('--format', default='yaml',
help='format of spec file (yaml, json, pickle)')
parser.add_argument('-dump', '--dump', default='', help='output will be dumped to this path, '
'according to the format specified')
parser.add_argument('--pause', default=False, action='store_true',
help='True to pause using pdb.set_trace() after simulation is done')
args = parser.parse_args()
return args
def run_main(prj: BagProject, args: Namespace):
specs_fname = Path(args.specs_fname)
io_cls = io_cls_dict[args.format]
specs = io_cls.load(str(specs_fname))
results = prj.measure_cell(specs=specs,
gen_cell=args.gen_cell,
gen_wrapper=args.gen_wrapper,
gen_tb=args.gen_tb,
load_results=args.load_results,
extract=args.extract,
run_sims=args.run_sim)
if args.pause:
pdb.set_trace()
if results is not None and args.dump:
out_tmp_file = Path(args.dump)
io_cls.save(results, out_tmp_file)
if __name__ == '__main__':
args = parse_args()
local_dict = locals()
bprj = local_dict.get('bprj', BagProject())
run_main(bprj, args)
================================================
FILE: run_scripts/run_bag.sh
================================================
#!/usr/bin/env bash
source .bashrc_pypath
exec ${BAG_PYTHON} $@
================================================
FILE: run_scripts/setup_submodules.py
================================================
#!/usr/bin/env bash
# crazy black magic from:
# https://unix.stackexchange.com/questions/20880/how-can-i-use-environment-variables-in-my-shebang
# this block of code is valid in both bash and python.
# this means if this script is run under bash, it'll
# call this script again using BAG_PYTHON. If
# this script is run under Python, this block of code
# effectively does nothing.
if "true" : '''\'
then
if [[ $BAG_PYTHON ]]; then
exec ${BAG_PYTHON} "$0" "$@"
else
echo "BAG_PYTHON environment variable is not set"
fi
exit 127
fi
'''
import os
import subprocess
import yaml
def write_to_file(fname, lines):
with open(fname, 'w') as f:
f.writelines((l + '\n' for l in lines))
add_git_file(fname)
def setup_python_path(module_list):
lines = ['# -*- coding: utf-8 -*-',
'import os',
'import sys',
'',
"sys.path.append(os.environ['BAG_FRAMEWORK'])",
"sys.path.append(os.environ['BAG_TECH_CONFIG_DIR'])",
]
template = "sys.path.append(os.path.join(os.environ['BAG_WORK_DIR'], '%s'))"
lines.append(template % 'BAG2_TEMPLATES_EC')
for mod_name, _ in module_list:
lines.append(template % mod_name)
write_to_file('bag_startup.py', lines)
def get_sch_libraries(mod_name, mod_info):
bag_modules = mod_info.get('lib_path', 'BagModules')
root_dir = os.path.realpath(os.path.join(mod_name, bag_modules))
if not os.path.isdir(root_dir):
return []
return [name for name in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, name))]
def setup_libs_def(module_list):
lines = ['BAG_prim $BAG_TECH_CONFIG_DIR/DesignModules']
template = '%s $BAG_WORK_DIR/%s/%s'
for mod_name, mod_info in module_list:
bag_modules = mod_info.get('lib_path', 'BagModules')
for lib_name in get_sch_libraries(mod_name, mod_info):
lines.append(template % (lib_name, mod_name, bag_modules))
write_to_file('bag_libs.def', lines)
def setup_cds_lib(module_list):
lines = ['DEFINE BAG_prim $BAG_TECH_CONFIG_DIR/BAG_prim']
template = 'DEFINE %s $BAG_WORK_DIR/%s/%s'
for mod_name, mod_info in module_list:
for lib_name in get_sch_libraries(mod_name, mod_info):
lines.append(template % (lib_name, mod_name, lib_name))
write_to_file('cds.lib.bag', lines)
def run_command(cmd):
timeout = 5
proc = subprocess.Popen(cmd)
try:
proc.communicate()
except KeyboardInterrupt:
print('Ctrl-C detected, terminating')
if proc.returncode is None:
proc.terminate()
print('terminating process...')
try:
proc.wait(timeout=timeout)
print('process terminated')
except subprocess.TimeoutExpired:
proc.kill()
print('process did not terminate, try killing...')
try:
proc.wait(timeout=timeout)
print('process killed')
except subprocess.TimeoutExpired:
print('cannot kill process...')
if proc.returncode is None:
raise ValueError('Ctrl-C detected, but cannot kill process')
elif proc.returncode < 0:
raise ValueError('process terminated with return code = %d' % proc.returncode)
elif proc.returncode > 0:
raise ValueError('command %s failed' % ' '.join(cmd))
def add_git_submodule(module_name, url):
if os.path.exists(module_name):
# skip if already exists
return
run_command(['git', 'submodule', 'add', url])
def add_git_file(fname):
run_command(['git', 'add', '-f', fname])
def link_submodule(repo_path, module_name):
if os.path.exists(module_name):
# skip if already exists
return
src = os.path.join(repo_path, module_name)
if not os.path.isdir(src):
raise ValueError('Cannot find submodule %s in %s' % (module_name, repo_path))
os.symlink(src, module_name)
add_git_file(module_name)
def setup_git_submodules(module_list):
add_git_submodule('BAG2_TEMPLATES_EC', 'git@github.com:ucb-art/BAG2_TEMPLATES_EC')
for module_name, module_info in module_list:
add_git_submodule(module_name, module_info['url'])
def setup_submodule_links(module_list, repo_path):
link_submodule(repo_path, 'BAG2_TEMPLATES_EC')
for module_name, _ in module_list:
link_submodule(repo_path, module_name)
def run_main():
with open('bag_submodules.yaml', 'r') as f:
modules_info = yaml.load(f)
module_list = [(key, modules_info[key]) for key in sorted(modules_info.keys())]
# error checking
bag_dir = 'BAG_framework'
if not os.path.isdir(bag_dir):
raise ValueError('Cannot find directory %s' % bag_dir)
# get real absolute path of parent directory of BAG_framework
repo_path = os.path.dirname(os.path.realpath(bag_dir))
cur_path = os.path.realpath('.')
if cur_path == repo_path:
# BAG_framework is an actual directory in this repo; add dependencies as git submodules
setup_git_submodules(module_list)
else:
setup_submodule_links(module_list, repo_path)
setup_python_path(module_list)
setup_libs_def(module_list)
setup_cds_lib(module_list)
if __name__ == '__main__':
run_main()
================================================
FILE: run_scripts/sim_cell.py
================================================
import argparse
from argparse import Namespace
from pathlib import Path
import pdb
from bag.io.file import Pickle, Yaml
from bag.core import BagProject
io_cls_dict = {
'pickle': Pickle,
'yaml': Yaml,
}
def parse_args() -> Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('specs_fname', help='specs yaml file')
parser.add_argument('--no-cell', dest='gen_cell', action='store_false',
default=True, help='skip cell generation')
parser.add_argument('--no-wrapper', dest='gen_wrapper', action='store_false',
default=True, help='skip wrapper generation')
parser.add_argument('--no-tb', dest='gen_tb', action='store_false',
default=True, help='skip tb generation')
parser.add_argument('--load', dest='load_results', action='store_true',
default=False, help='skip simulation, just load the results')
parser.add_argument('-x', '--extract', dest='extract', action='store_true',
default=False, help='do extracted simulation')
parser.add_argument('--no-sim', dest='run_sim', action='store_false',
default=True, help='run simulation, --load has a priority over this')
parser.add_argument('--format', default='yaml',
help='format of spec file (yaml, json, pickle)')
parser.add_argument('-dump', '--dump', default='', help='output will be dumped to this path, '
'according to the format specified')
parser.add_argument('--pause', default=False, action='store_true',
help='True to pause using pdb.set_trace() after simulation is done')
args = parser.parse_args()
return args
def run_main(prj: BagProject, args: Namespace):
specs_fname = Path(args.specs_fname)
io_cls = io_cls_dict[args.format]
specs = io_cls.load(str(specs_fname))
results = prj.simulate_cell(specs=specs,
gen_cell=args.gen_cell,
gen_wrapper=args.gen_wrapper,
gen_tb=args.gen_tb,
load_results=args.load_results,
extract=args.extract,
run_sim=args.run_sim)
if args.pause:
pdb.set_trace()
if results is not None and args.dump:
out_tmp_file = Path(args.dump)
io_cls.save(results, out_tmp_file)
if __name__ == '__main__':
args = parse_args()
local_dict = locals()
bprj = local_dict.get('bprj', BagProject())
run_main(bprj, args)
================================================
FILE: run_scripts/start_bag.il
================================================
/* Note:
Due to licensing reasons, this skill script is missing the function
CCSinvokeCdfCallbacks() from Cadence solution 11018344, which executes
CDF parameters callback from skill.
If you do not need to instantiate a pcell instance, this method
is not needed.
Eric Chang, Mar 2, 2017.
*/
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Virtuoso Database operations functions ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; reads a skill data structure from file
procedure( parse_data_from_file( fname "t" )
let( (p ans)
unless( p = infile( fname )
error("Cannot open file %s" fname)
)
ans = parse_data_from_file_helper(p)
close( p )
ans
)
)
; recursive helper for parse_data_from_file
procedure( parse_data_from_file_helper( p )
let( (line item ans finish key)
gets( line p )
; remove newline
line = substring(line 1 strlen(line) - 1)
; printf("read line: %s\n" line)
cond(
(line == "#list"
; parse a list
ans = tconc(nil 0)
while( nequal(item = parse_data_from_file_helper(p) "#end")
tconc(ans item)
)
; printf("returning list ")
; print(cdar(ans))
; printf("\n")
cdar(ans)
)
(line == "#prop_list"
; parse a disembodied property list
ans = ncons(nil)
finish = nil
while( !finish
key = parse_data_from_file_helper(p)
if( key == "#end" then
finish = 't
else
item = parse_data_from_file_helper(p)
putprop(ans item key)
)
)
ans
)
; parse a float
(strncmp( line "#float" 6 ) == 0
cdfParseFloatString(cadr(parseString(line)))
)
; parse an int
(strncmp( line "#int" 4 ) == 0
atoi(cadr(parseString(line)))
)
; parse a boolean
(strncmp( line "#bool" 5 ) == 0
if( atoi(cadr(parseString(line))) == 1 then
't
else
nil
)
)
; parse a string token or #end
('t
; printf("returning str %s\n" line)
line
)
)
)
)
; return a list of cells in the given library.
procedure( get_cells_in_library( lib_name "t" )
let( ( lib_obj ans )
if( lib_obj = ddGetObj(lib_name nil nil nil nil "r") then
ans = ddGetObjChildren(lib_obj)~>name
ddReleaseObj(lib_obj)
else
; library does not exist, return empty list
ans = '()
)
ans
)
)
; return a list of cells in the given library.
procedure( get_cells_in_library_file( lib_name fname "tt" )
let( ( p )
p = outfile( fname "w" )
foreach( cell get_cells_in_library(lib_name)
fprintf(p "%s\n" cell)
)
close(p)
)
)
; Returns the directory corresponding to the given library.
procedure( get_lib_directory(lib_name "t")
let( ( lib_obj ans )
if( lib_obj = ddGetObj(lib_name nil nil nil nil "r") then
ans = lib_obj~>readPath
ddReleaseObj(lib_obj)
else
; library does not exist, return empty list
ans = ""
)
ans
)
)
; Parse the netlist of the given cellview.
; Works on schematic and veriloga.
procedure( parse_cad_sch(lib_name cell_name file_name "ttt")
let( (cv cell_type p indent direction term_names tb_list tb_match
inst_lib_name inst_cell_name inst_cnt)
indent = ""
cell_type = "schematic"
unless( cv = dbOpenCellViewByType( lib_name cell_name "schematic" nil "r" )
cell_type = "veriloga"
unless( cv = dbOpenCellViewByType( lib_name cell_name "veriloga" nil "r" )
error( "Cannot find schematic or veriloga view of cell %s__%s" lib_name cell_name )
)
)
p = outfile( file_name "w" )
; print cellview information
printf( "*INFO* Writing cell %s__%s (%s) netlist to %s\n" lib_name cell_name cell_type file_name )
fprintf( p "%slib_name: %s\n" indent lib_name )
fprintf( p "%scell_name: %s\n" indent cell_name )
; print pins
fprintf( p "%spins: [ " indent )
if( cell_type == "veriloga" then
term_names = reverse(cv~>terminals~>name)
else
term_names = cv~>terminals~>name
)
; add quotes around pin names to escape array pins
term_names = mapcar( lambda( (x) sprintf(nil "\"%s\"" x) ) term_names )
fprintf( p "%s ]\n" buildString(term_names ", "))
; print instances
if( not(cv~>instances) then
fprintf( p "%sinstances: {}\n" indent )
else
inst_cnt = 0
fprintf( p "%sinstances:\n" indent )
foreach( inst cv~>instances
inst_cnt++
; print entry for instance
indent = " "
fprintf( p "%s%s:\n" indent inst~>name )
; print instance master information.
indent = " "
fprintf( p "%slib_name: %s\n" indent inst~>libName )
fprintf( p "%scell_name: %s\n" indent inst~>cellName )
; print instance terminal information
if( !(inst~>instTerms) then
fprintf( p "%sinstpins: {}\n" indent )
else
fprintf( p "%sinstpins:\n" indent )
foreach( inst_term inst~>instTerms
unless( direction = inst_term~>direction
direction = ""
)
indent = " "
fprintf( p "%s%s:\n" indent inst_term~>name )
indent = " "
fprintf( p "%sdirection: %s\n" indent direction )
fprintf( p "%snet_name: \"%s\"\n" indent inst_term~>net~>name )
fprintf( p "%snum_bits: %d\n" indent inst_term~>numBits )
)
)
)
when(inst_cnt == 0
fprintf( p " {}\n" )
)
)
; close resources
close(p)
dbClose(cv)
)
)
; Delete a cellview if it exists. Currently used to delete old calibre file.
procedure( delete_cellview(lib_name cell_name view_name "ttt")
let( (obj)
obj = ddGetObj(lib_name cell_name view_name)
if( obj then
ddDeleteObj(obj)
else
't
)
)
)
; Parse the structure of the given cellview.
; Works on layout.
procedure( parse_cad_layout(lib_name cell_name file_name "ttt")
let( (cv cell_type p indent rect_cnt label_cnt inst_cnt)
indent = ""
cell_type = "layout"
unless( cv = dbOpenCellViewByType( lib_name cell_name cell_type nil "r" )
error( "Cannot find layout view of cell %s__%s" lib_name cell_name )
)
p = outfile( file_name "w" )
; print cellview information
printf( "*INFO* Writing cell %s__%s (%s) netlist to %s\n" lib_name cell_name cell_type file_name )
fprintf( p "%slib_name: %s\n" indent lib_name )
fprintf( p "%scell_name: %s\n" indent cell_name )
; print rects
if( not(cv~>shapes) then
fprintf( p "%srects: {}\n" indent )
else
rect_cnt = 0
fprintf( p "%srects:\n" indent )
foreach( shape cv~>shapes
if( (shape~>objType == "rect") then
rect_cnt++
; print entry for rect
indent = " "
fprintf( p "%s%d:\n" indent rect_cnt )
; print rect master information.
indent = " "
fprintf( p "%slayer: %s %s\n" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))
fprintf( p "%sbBox: [[%f, %f], [%f, %f]]\n" indent
nthelem(1 nthelem(1 shape~>bBox)) nthelem(2 nthelem(1 shape~>bBox))
nthelem(1 nthelem(2 shape~>bBox)) nthelem(2 nthelem(2 shape~>bBox))
);fprintf
)
);if
if((rect_cnt == 0) then
fprintf( p " {}\n" )
);if
)
; print labels
indent = ""
if( not(cv~>shapes) then
fprintf( p "%slabels: {}\n" indent )
else
label_cnt = 0
fprintf( p "%slabels:\n" indent )
foreach( shape cv~>shapes
if( (shape~>objType == "label") then
label_cnt++
; print entry for label
indent = " "
fprintf( p "%s%d:\n" indent label_cnt )
; print label master information.
indent = " "
fprintf( p "%slabel: %s\n" indent shape~>theLabel )
fprintf( p "%slayer: %s %s\n" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))
fprintf( p "%sxy: [%f, %f]\n" indent nthelem(1 shape~>xy) nthelem(2 shape~>xy))
)
if( (shape~>objType == "textDisplay") then ;some labels are instantiated as text displays
label_cnt++
; print entry for label
indent = " "
fprintf( p "%s%d:\n" indent label_cnt )
; print label master information.
indent = " "
fprintf( p "%slabel: %s\n" indent shape~>owner~>name )
fprintf( p "%slayer: %s %s\n" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))
fprintf( p "%sxy: [%f, %f]\n" indent nthelem(1 shape~>xy) nthelem(2 shape~>xy))
)
);if
if((label_cnt == 0) then
fprintf( p " {}\n" )
);if
)
; print instances
indent = ""
if( not(cv~>instances) then
fprintf( p "%sinstances: {}\n" indent )
else
inst_cnt = 0
fprintf( p "%sinstances:\n" indent )
foreach( inst cv~>instances
inst_cnt++
; print entry for instance
indent = " "
fprintf( p "%s%s:\n" indent inst~>name )
; print instance master information.
indent = " "
fprintf( p "%slib_name: %s\n" indent inst~>libName )
fprintf( p "%scell_name: %s\n" indent inst~>cellName )
fprintf( p "%sxy: [%f, %f]\n" indent nthelem(1 inst~>xy) nthelem(2 inst~>xy))
if( (inst~>objType == "mosaic") then
fprintf( p "%scols: %d\n" indent inst~>columns)
fprintf( p "%srows: %d\n" indent inst~>rows)
fprintf( p "%ssp_cols: %f\n" indent inst~>uX)
fprintf( p "%ssp_rows: %f\n" indent inst~>uY)
fprintf( p "%srotation: %s\n" indent car(inst~>tileArray))
else
fprintf( p "%srotation: %s\n" indent inst~>orient)
);if
)
when(inst_cnt == 0
fprintf( p " {}\n" )
)
)
; close resources
close(p)
dbClose(cv)
)
)
; get a list of cells containing in the specficied library
procedure( get_cell_list(lib_name file_name "tt")
let( (lib cellname p)
lib=ddGetObj(lib_name)
p = outfile( file_name "w" )
fprintf( p "%s: [" lib_name)
foreach( cellname lib~>cells~>name
fprintf( p "%s, " cellname)
);foreach
fprintf( p "] \n" )
; close resources
close(p)
);let
)
; if library with lib_name does not exists, create a new
; library with that name. Otherwise, if erase is true,
; remove all cells in that library. Returns the library
; database object.
procedure( create_or_erase_library(lib_name tech_lib lib_path erase "tttg")
let( (lib_obj)
if( lib_obj = ddGetObj(lib_name nil nil nil nil "r") then
when( erase
; delete all cells in the library
foreach( cell lib_obj~>cells
unless( ddDeleteObj(cell)
error("cannot delete cell %s in library %s\n" cell~>name lib_name)
)
)
)
ddReleaseObj(lib_obj)
't
else
; create library if not exist
when( and(lib_path (lib_path != "."))
lib_path = strcat(lib_path "/" lib_name)
)
lib_obj = ddCreateLib(lib_name lib_path)
; attach technology file
techBindTechFile(lib_obj tech_lib)
; close library
ddReleaseObj(lib_obj)
't
)
)
)
; copy all template cells to the given library.
; template list is a list of three-element lists with the format
; '("master_lib_name" "master_cell_name" "target_cell_name")
; any existing cellviews will be overwritten.
procedure( copy_templates_to_library(lib_name template_list "tl")
let( (current remaining src_gdm targ_gdm table master_lib master_cell target_cell key cnt
empty_spec targ_lib_obj test_cv)
current = template_list
remaining = '()
empty_spec = gdmCreateSpecList()
targ_lib_obj = ddGetObj(lib_name nil nil nil nil "r")
; ccpCopy cannot copy the same cell to multiple different cells.
; because of this, we need to copy a set of unique cells at a time,
; hence the while loop.
while( current
; Create GDMSpecList used to copy all cells
src_gdm = gdmCreateSpecList()
targ_gdm = gdmCreateSpecList()
; table to keep track of seen cells.
table = makeTable("mytable" 0)
; Populate GDMSpecList
foreach( template_info current
master_lib = car(template_info)
master_cell = cadr(template_info)
target_cell = caddr(template_info)
; check if we copied this cell on this iteration yet
key = list(master_lib master_cell)
if( table[key] == 1 then
; wait for the next iteration
remaining = cons(template_info remaining)
else
; purge target cellview if exist
when( targ_lib_obj
test_cv = dbFindOpenCellView(targ_lib_obj target_cell "schematic")
when( test_cv
dbPurge(test_cv)
)
test_cv = dbFindOpenCellView(targ_lib_obj target_cell "symbol")
when( test_cv
dbPurge(test_cv)
)
; hard remove adexl state if it exists
test_cv = ddGetObj(lib_name target_cell "adexl")
when( test_cv
ddDeleteObj(test_cv)
)
)
gdmAddSpecToSpecList(gdmCreateSpec(master_lib master_cell nil nil "CDBA") src_gdm)
gdmAddSpecToSpecList(gdmCreateSpec(lib_name target_cell nil nil "CDBA") targ_gdm)
table[key] = 1
)
)
; Perform copy
ccpCopy(src_gdm targ_gdm 't 'CCP_EXPAND_COMANAGED nil nil "" "" 'CCP_UPDATE_FROM_LIBLIST empty_spec)
; set current and remaining
current = remaining
remaining = '()
; debug printing
; printstruct(table)
)
)
't
)
; returns a unique terminal name in the given cellview.
; name_base is the suffix of the returned terminal name.
procedure( get_unique_term_name( cvid name_base "gt")
let( (cnt new_term_name)
cnt = 1
sprintf( new_term_name "temp%d_%s" cnt name_base )
while( dbFindTermByName(cvid new_term_name)
cnt = cnt + 1
sprintf( new_term_name "temp%d_%s" cnt name_base )
)
new_term_name
)
)
; helper method to open pin master
procedure( open_pin_master(cvid pin_cv_info)
let( (pin_master mpin_lib mpin_cell mpin_view)
mpin_lib = car(pin_cv_info)
mpin_cell = cadr(pin_cv_info)
mpin_view = caddr(pin_cv_info)
unless( pin_master = dbOpenCellViewByType( mpin_lib mpin_cell mpin_view nil "r" )
dbClose(cvid)
error( "Cannot find pin master cellview: %s__%s (%s)" mpin_lib mpin_cell mpin_view)
)
pin_master
)
)
; update pins of a schematic
; cvid is the opened cellview id of the schematic. It must be in append mode.
; pin_map is a list of two-element lists of old pin names and new pin names, respectively.
; ipin, opin, and iopin are lists of three strings for input/output/inout pins, respectively.
; first element is the pin master library, second element is the pin mater cell, and third element
; is the pin master cellview.
procedure( update_schematic_pin(cvid pin_map new_pins ipin opin iopin "glllll")
let( (snap_dist cur_term_name new_term_name term pin pin_orient pin_location pin_direction
temp_new_term_name pin_master ipin_master opin_master iopin_master
pin_xy_info npin_xl npin_yl npin_xr npin_yr npin_name npin_type)
snap_dist = schGetEnv("schSnapSpacing")
; open pin masters
ipin_master = open_pin_master(cvid ipin)
opin_master = open_pin_master(cvid opin)
iopin_master = open_pin_master(cvid iopin)
pin_master = nil
; get new pin locations before any pin addition/substraction.
pin_xy_info = get_new_pin_locations(cvid snap_dist)
; rename or remove pins
foreach( p pin_map
cur_term_name = car(p)
new_term_name = cadr(p)
; printf("%s %s\n" cur_term_name new_term_name)
when(cur_term_name != new_term_name
unless( term = dbFindTermByName(cvid cur_term_name)
dbClose(cvid)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
error( "Terminal %s not found." cur_term_name )
)
when( term~>pinCount != 1
dbClose(cvid)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
error( "Terminal %s does not have exactly one pin." cur_term_name)
)
pin = car(term~>pins)
if( strlen(new_term_name) != 0 then
; rename pin
pin_orient = pin~>fig~>orient
pin_location = pin~>fig~>xy
pin_direction = term~>direction
; create new pin figure
cond( ( pin_direction == "input" pin_master = ipin_master)
( pin_direction == "output" pin_master = opin_master)
( 't pin_master = iopin_master)
)
; delete pin
unless( dbDeleteObject(pin~>fig)
dbClose(cvid)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
error( "Cannot delete pin for terminal %s" cur_term_name )
)
; create a temporary terminal with a unique name so we can change the number of bits without getting an error
temp_new_term_name = get_unique_term_name(cvid new_term_name)
schCreatePin(cvid pin_master temp_new_term_name pin_direction nil pin_location "R0" )
; now rename the new terminal
new_term = dbFindTermByName(cvid temp_new_term_name )
new_term~>name = new_term_name
else
; remove pin
dbDeleteObject(pin~>fig)
)
)
)
; add new pins
when( new_pins
; get location for new pins
npin_xl = xCoord(car(pin_xy_info))
npin_yl = yCoord(car(pin_xy_info)) - 2 * snap_dist
npin_xr = xCoord(cadr(pin_xy_info))
npin_yr = yCoord(cadr(pin_xy_info)) - 2 * snap_dist
foreach( npin_info new_pins
npin_name = car(npin_info)
npin_type = cadr(npin_info)
; verify that this pin does not exist yet
when(dbFindTermByName(cvid npin_name)
dbClose(cvid)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
error( "Terminal %s already exists" npin_name)
)
; get pin location based on pin type
cond( ( npin_type == "input" pin_master = ipin_master pin_location = npin_xl:npin_yl npin_yl = npin_yl - 2 * snap_dist)
( npin_type == "output" pin_master = opin_master pin_location = npin_xr:npin_yr npin_yr = npin_yr - 2 * snap_dist)
( 't pin_master = iopin_master pin_location = npin_xl:npin_yl npin_yl = npin_yl - 2 * snap_dist)
)
; create pin
schCreatePin(cvid pin_master npin_name npin_type nil pin_location "R0")
)
)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
)
)
; find X and Y coordinates to insert new symbol pins
procedure( get_new_pin_locations(cvid snap_dist)
let( (pin bbox pin_x pin_y xl xr yl yr)
; find the left-most/right-most pin X coordinates, and find the lowst
; Y coordinate of the left-most/right-most pins
xl = nil
xr = nil
yl = nil
yr = nil
foreach( term cvid->terminals
when( term~>pinCount != 1
dbClose(cvid)
error( "Terminal %s does not have exactly one pin" term~>name)
)
pin = car(term~>pins)
bbox = pin~>fig~>bBox
pin_x = round2((xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0 / snap_dist)
pin_y = round2((yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0 / snap_dist)
if( xl == nil then
xl = pin_x
xr = pin_x
yl = pin_y
yr = pin_y
else
cond( (pin_x < xl xl = pin_x yl = pin_y)
(pin_x == xl yl = min(yl pin_y)))
cond( (pin_x > xr xr = pin_x yr = pin_y)
(pin_x == xr yr = min(yr pin_y)))
)
)
when(xl == nil
; default values if schematic has no terminals
; this usually means you have a testbench schematic
xl = 0
yl = 0
xr = 10
yr = 0
)
list((xl * snap_dist):(yl * snap_dist) (xr * snap_dist):(yr * snap_dist))
)
)
; update pins of a symbol
; pin_map is a list of two-element lists, first element is old pin name, second element is new pin name.
; sympin is a 3-element list of strings. first element is the pin master library,
; second element is the pin mater cell, and third element is the pin master cellview.
; simulators is a list of simulator names for which termOrder should be updated.
; Usually simulators = '("auLvs" "auCdl" "spectre" "hspiceD")
procedure( update_symbol_pin(lib_name cell_name pin_map new_pins sympin simulators "ttllll")
let( (snap_dist cvid pin_master cur_term_name new_term_name term pin bbox pin_x pin_y pin_location pin_direction
label_location label_rel_location temp_new_term_name new_term new_port_order cell_obj bc
mpin_lib mpin_cell mpin_view pin_xy_info npin_xl npin_yl npin_xr npin_yr npin_name npin_type
modified_pins)
snap_dist = schGetEnv("schSnapSpacing")
modified_pins = nil
mpin_lib = car(sympin)
mpin_cell = cadr(sympin)
mpin_view = caddr(sympin)
unless( pin_master = dbOpenCellViewByType(mpin_lib mpin_cell mpin_view nil "r")
error("Cannot open symbol pin cellview %s__%s (%s)." mpin_lib mpin_cell mpin_view)
)
unless( cvid = dbOpenCellViewByType(lib_name cell_name "symbol" nil "a")
dbClose(pin_master)
error("Cannot open cellview %s__%s (symbol)." lib_name cell_name)
)
; get new pin locations before any pin addition/substraction.
pin_xy_info = get_new_pin_locations(cvid snap_dist)
; modify existing pins
new_port_order = tconc(nil "")
foreach( p pin_map
cur_term_name = car(p)
new_term_name = cadr(p)
new_port_order = tconc(new_port_order new_term_name)
when( cur_term_name != new_term_name
modified_pins = 't
; printf("%s %s\n" cur_term_name new_term_name)
unless( term = dbFindTermByName(cvid cur_term_name)
dbClose(pin_master)
dbReopen(cvid, "r")
dbClose(cvid)
error( "Terminal %s not found." cur_term_name )
)
when( term~>pinCount != 1
dbClose(pin_master)
dbReopen(cvid, "r")
dbClose(cvid)
error( "Terminal %s does not have exactly one pin." cur_term_name)
)
pin = car(term~>pins)
if( strlen(new_term_name) != 0 then
; rename pin
bbox = pin~>fig~>bBox
pin_x = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0
pin_y = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0
pin_location = round2(pin_x / snap_dist) * snap_dist:round2(pin_y / snap_dist) * snap_dist
pin_direction = term~>direction
; change label
prog( (label_orientation label_font label_font_size label_type label_text)
foreach( label pin~>fig~>children
when( label~>objType == "label"
label_location = label~>xy
label_orientation = label~>orient
label_rel_location = label~>justify
label_font = label~>font
label_font_size = label~>height
label_type = label~>labelType
label_text = label~>theLabel
when( label_text == cur_term_name
schCreateSymbolLabel(cvid label_location "pin label" new_term_name label_rel_location
label_orientation label_font label_font_size label_type)
return('t)
)
)
)
return(nil)
)
dbDeleteObject(pin~>fig)
dbDeleteObject(pin)
;create a temporary terminal with a unique name so we can change the number of bits without getting an error
temp_new_term_name = get_unique_term_name(cvid new_term_name)
schCreateSymbolPin(cvid pin_master temp_new_term_name pin_direction pin_location "R0" )
new_term = dbFindTermByName(cvid temp_new_term_name )
dbDeleteObject(term)
new_term~>name = new_term_name
else
; remove pin
dbDeleteObject(pin~>fig)
dbDeleteObject(pin)
dbDeleteObject(term)
)
)
)
; add new pins
when( new_pins
modified_pins = 't
; get location for new pins
npin_xl = xCoord(car(pin_xy_info))
npin_yl = yCoord(car(pin_xy_info)) - 2 * snap_dist
npin_xr = xCoord(cadr(pin_xy_info))
npin_yr = yCoord(cadr(pin_xy_info)) - 2 * snap_dist
foreach( npin_info new_pins
npin_name = car(npin_info)
npin_type = cadr(npin_info)
; verify that this pin does not exist yet
when(dbFindTermByName(cvid npin_name)
dbClose(pin_master)
dbReopen(cvid, "r")
dbClose(cvid)
error( "Terminal %s already exists" npin_name)
)
; update pin order
new_port_order = tconc(new_port_order npin_name)
; get pin location based on pin type
if( equal(npin_type "output") then
label_location = npin_xr:npin_yr
label_rel_location = "lowerLeft"
npin_yr = npin_yr - 2 * snap_dist
else
label_location = npin_xl:npin_yl
label_rel_location = "lowerRight"
npin_yl = npin_yl - 2 * snap_dist
)
; create label and pin
schCreateSymbolLabel(cvid label_location "pin label" npin_name label_rel_location
"R0" "stick" snap_dist "normalLabel")
schCreateSymbolPin(cvid pin_master npin_name npin_type label_location "R0")
)
)
dbClose(pin_master)
when( modified_pins
; update pin order
new_port_order = cdar(new_port_order)
schEditPinOrder(cvid new_port_order 't)
dbSave(cvid)
; update termOrder for each simulators
cell_obj = ddGetObj(lib_name cell_name nil nil nil "r")
unless( bc = cdfGetBaseCellCDF(cell_obj)
ddReleaseObj(cell_obj)
dbReopen(cvid, "r")
dbClose(cvid)
error("Cannot find CDF parameters for %s__%s. Delete generated cell and try again" lib_name cell_name)
)
foreach( simu simulators
get(bc->simInfo simu)->termOrder = new_port_order
)
unless( cdfSaveCDF(bc)
ddReleaseObj(cell_obj)
dbReopen(cvid, "r")
dbClose(cvid)
error("Cannot save termOrder CDF for %s__%s." lib_name cell_name)
)
ddReleaseObj(cell_obj)
)
; opening schematic will open all symbols inside that schematic.
; as the result, dbClose may not close this symbol view. To get rid
; of edit lock, we use dbReopen so even if dbClose fails the edit lock
; will be gone.
dbReopen(cvid, "r")
dbClose(cvid)
)
)
; record an association list from pin name to pin location in units of snap distances.
; the pin name is sorted alphabetically so we can use the equal function to test
; for equality.
procedure( get_instance_pin_info(inst "g")
let( (snap_dist term_name pin_fig xval yval inst_term_xy ans)
ans = nil
snap_dist = schGetEnv("schSnapSpacing")
foreach( term inst->master->terminals
term_name = term~>name
; get terminal coordinate in symbol
pin_fig = car(term~>pins)~>fig
bbox = pin_fig~>bBox
xval = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0
yval = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0
; quantize to schematic snap spacing to avoid floating point rounding error.
inst_term_xy = round2(xval / snap_dist):round2(yval / snap_dist)
ans = cons(list(term_name inst_term_xy) ans)
)
sortcar(ans nil)
)
)
; get all the wire objects connected to terminals of the given instance.
; we assume each terminal has exactly one pin with 1 wire connected, with a
; single label on the wire. The wire doesn't connect to anything else.
; returns an association list from terminal name to a list of net name and wire figure object.
procedure( get_instance_terminal_wires(sch inst "gg")
let( (snap_dist term_name pin_fig xval yval inst_term_xy net_name ans net_map)
ans = nil
net_map = nil
snap_dist = schGetEnv("schSnapSpacing")
foreach( inst_term inst~>instTerms
term_name = inst_term~>name
; printf("terminal name: %s\n" term_name)
when( inst_term~>term~>pinCount != 1
dbClose(sch)
error("Terminal %s must have exactly one pin." term_name)
)
unless( pin_fig = car(inst_term~>term~>pins)~>fig
dbClose(sch)
error("Cannot find pin figure for terminal %s" term_name)
)
; get instance terminal coordinate in schematic
bbox = dbTransformBBox(pin_fig~>bBox inst~>transform)
; printf("terminal pin fig bbox: %A\n" bbox)
xval = xCoord(car(bbox)) + (xCoord(cadr(bbox)) - xCoord(car(bbox))) / 2.0
yval = yCoord(car(bbox)) + (yCoord(cadr(bbox)) - yCoord(car(bbox))) / 2.0
; quantize to schematic snap spacing to avoid floating point rounding error.
inst_term_xy = round2(xval / snap_dist) * snap_dist:round2(yval / snap_dist) * snap_dist
net_name = inst_term~>net~>name
net_map = cons(list(term_name net_name) net_map)
; printf("terminal pin x/y: %A\n" inst_term_xy)
foreach( fig inst_term~>net~>figs
points = fig~>points
; printf("figure points: %A\n" points)
when( member(inst_term_xy points)
when( length(points) != 2
error("pin for terminal %s must be connected to a single wire with label" term_name)
)
; printf("adding figure for terminal %s\n" term_name)
ans = cons(list(term_name fig) ans)
)
)
)
list(ans net_map)
)
)
; Modify the instance terminal connections of the given instance.
; we assume each terminal to modify has at most 1 wire connected,
; if it exists, the wire connects to nothing else, and it has a label.
; In this way, this function just have to change the label text.
;
; if wire_list is not empty, then that means each terminal has exactly one
; wire connected. This function will update the label on the wires according
; to term_mapping.
;
; if wire_list is empty, then that means no wires are connected to terminals.
; this function will attach labels directly to each terminal. The labels are
; determined first from term_mapping, then from net_map
;
; sch is the schematic database object. Must be opened in append/write mode.
; inst is the instance object to modify.
; term_mapping is a list of key-value pairs, where keys are old net names,
; and values are new net names.
procedure( modify_instance_terminal(sch inst wire_list net_map term_mapping "gglll")
let( (snap_dist key_val old_name new_name fig points mid_point new_wire inst_term inst_pin
bbox xval yval term_map_final db_term)
; get schematic snap distance spacing.
snap_dist = schGetEnv("schSnapSpacing")
if( wire_list then
foreach( wire_info wire_list
old_name = car(wire_info)
when(key_val = assoc(old_name term_mapping)
new_name = cadr(key_val)
fig = cadr(wire_info)
points = fig~>points
mid_point = foreach(mapcar (c1 c2) car(points) cadr(points) (c1 + c2) / 2.0)
; delete old wire, then add wire back with new label.
schDelete(fig)
new_wire = car(schCreateWire(sch "draw" "full" points snap_dist snap_dist 0))
schCreateWireLabel(sch new_wire mid_point new_name "lowerCenter" "R0" "stick" 0.0625 nil)
)
)
't
else
; combine net_map and term_mapping
term_map_final = copy(term_mapping)
foreach( net_info net_map
old_name = car(net_info)
unless( assoc(old_name term_map_final)
; add net mapping only if it's not in term_mapping
term_map_final = cons(net_info term_map_final)
)
)
foreach( net_info term_map_final
old_name = car(net_info)
new_name = cadr(net_info)
when(db_term = dbFindTermByName(inst->master old_name)
; only create terminal that's present in the current master
inst_term = dbCreateInstTerm(nil inst db_term)
inst_pin = car(inst_term~>term~>pins)~>fig
bbox = dbTransformBBox(inst_pin~>bBox inst~>transform)
xval = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0
yval = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0
xval = round2(xval / snap_dist) * snap_dist
yval = round2(yval / snap_dist) * snap_dist
new_wire = car(schCreateWire(sch "draw" "full" list(xval-snap_dist:yval-snap_dist xval:yval)
snap_dist snap_dist 0))
schCreateWireLabel(sch new_wire xval:yval new_name "lowerCenter" "R0" "stick" 0.0625 nil)
)
)
't
)
)
)
; Perform check-and-save on the given schematic database object, then close it.
procedure( check_and_save_schematic(sch "g")
let( (errs)
schSetEnv( "checkHierSave" 't)
schSetEnv( "saveAction" "Save")
errs = schCheckHier(sch "schematic symbol" "")
foreach( ex errs
warn( "%s__%s (%s) has %d errors." car(ex)~>lib~>name car(ex)~>cellName car(ex)~>viewName cadr(ex))
)
; make sure all edit locks are gone by reopening in read mode
dbReopen(sch, "r")
dbClose(sch)
)
)
; modify a schematic cell. Used to convert copied template cells into concrete instantiation.
;
; inst_list is an association list of (inst_name, rinst_list) pairs. Where:
;
; inst_name : name of the instance in the template cell.
; rinst_list : a list of rinsts, which are instances to replace the original instance by.
; If this list is empty, the original instance should be deleted. If the list
; has more than one element, we should array the original instance.
;
; Each rinst is a disembodied property lists, with the properties:
;
; rinst->name : the name of this rinst.
; rinst->lib_name : the instance master library.
; rinst->cell_name : the instance master cell.
; rinst->params : an association list of the CDF params of this rinst. The values are always string.
; rinst->term_mapping : an association list of the modified terminal connections of this rinst.
; if no connections are changed, this list should be empty.
;
; (You can read more about disembodied property lists and association list in the skill
; language user guide).
;
; For each instance, this function does the following:
; 1. Find the instance with the given name.
; 2. If rinst_list is nil, delete this instance.
; 3. If rinst_list has exactly one element:
; i. rename the instance name to rinst's name.
; ii. change the instance master of the instance.
; iii. change the CDF parameters (this should only happen with BAG primitives).
; iv. change the port connections of this instance.
; 4. If rinst_list has more than one element, for each additional element,
; copy the original instance and perform step 3 on that instance.
;
; This procedure allows one to delete or array any instances in the schematic template.
procedure( modify_schematic_content(sch_cv inst_list "gl")
let( (inst_obj inst_name rinst_list rinst_len cur_inst wire_list net_map par_val xl xr transform
snap_dist errmsg pin_info tmp_result)
snap_dist = schGetEnv("schSnapSpacing")
foreach( inst inst_list
inst_name = car(inst)
unless( inst_obj = dbFindAnyInstByName(sch_cv inst_name)
dbClose(sch_cv)
error( "Cannot find instance %s" inst_name )
)
rinst_list = cadr(inst)
rinst_len = length(rinst_list)
last_inst = nil
if( rinst_len == 0 then
; no instances to replace by, delete.
wire_list = car(get_instance_terminal_wires(sch_cv inst_obj))
; delete wires connected to instance
foreach( wire_info wire_list
schDelete(cadr(wire_info))
)
; delete instance
dbDeleteObject(inst_obj)
else
cur_inst = nil
pin_info = nil
foreach( rinst rinst_list
if( !cur_inst then
cur_inst = inst_obj
; printf("inst %s lib = %s, cell = %s\n" inst_name inst_obj->master->libName inst_obj->master->cellName)
tmp_result = get_instance_terminal_wires(sch_cv cur_inst)
net_map = cadr(tmp_result)
wire_list = car(tmp_result)
pin_info = get_instance_pin_info(cur_inst)
; printf("%s wire_list: %A\n" inst_name wire_list)
; figure out bounding box for potential future array
; printf("instance %s bbox: %A\n" cur_inst~>name cur_inst~>bBox)
xl = xCoord(car(cur_inst~>bBox))
xr = xCoord(cadr(cur_inst~>bBox))
foreach( wire_info wire_list
; printf("instance %s wire: %A %A\n" cur_inst~>name xCoord(car(cadr(wire_info)~>bBox)) xCoord(cadr(cadr(wire_info)~>bBox)))
xl = min(xl xCoord(car(cadr(wire_info)~>bBox)))
xr = max(xr xCoord(cadr(cadr(wire_info)~>bBox)))
)
transform = list(round2((xr - xl + snap_dist) / snap_dist) * snap_dist:0 "R0" 1.0)
; printf("instance %s transform: %A\n" cur_inst~>name transform)
else
; more than 1 rinst, copy cur_inst, do not copy wires
wire_list = nil
; copy instance
cur_inst = dbCopyFig(cur_inst nil transform)
)
; change instance name and master
when(cur_inst->name != rinst->name
cur_inst->name = rinst->name
)
schReplaceProperty(list(cur_inst) "master" sprintf(nil "%s %s %s" rinst->lib_name
rinst->cell_name cur_inst->viewName))
; set parameters
foreach( cdf_par cdfGetInstCDF(cur_inst)~>parameters
par_val = cadr(assoc(cdf_par->name rinst->params))
; change CDF parameter value only if specified in given parameters
when( par_val != nil
cdf_par->value = par_val
)
)
when( wire_list
; if wire_list is not empty, check that the pins match. If so, keep wires around,
; otherwise, delete wires
unless( equal(pin_info get_instance_pin_info(cur_inst))
; delete wires connected to instance
foreach( wire_info wire_list
schDelete(cadr(wire_info))
)
wire_list = nil
)
)
; modify connections, keeping old wires around
; printf("instance %s wire_list: %A net_map: %A term_map: %A\n" cur_inst~>name wire_list net_map rinst->term_mapping)
modify_instance_terminal(sch_cv cur_inst wire_list net_map rinst->term_mapping)
)
)
)
)
)
; given a copied template cell, modify it to a concrete schematic.
procedure( convert_template_cells(lib_name cell_name pin_map new_pins inst_list sympin ipin opin iopin simulators)
let( (sym_cv sch)
; update symbol view first.
if( sym_cv = dbOpenCellViewByType(lib_name cell_name "symbol" nil "r") then
printf("*INFO* Updating %s__%s symbol pins.\n" lib_name cell_name)
update_symbol_pin(lib_name cell_name pin_map new_pins sympin simulators)
else
warn("Did not find symbol for %s__%s. Skipping. Is it testbench?" lib_name cell_name)
)
; attempt to open schematic in append mode
unless( sch = dbOpenCellViewByType(lib_name cell_name "schematic" nil "a")
error("Cannot open %s__%s (schematic) in append mode." lib_name cell_name)
)
; update schematic content
printf("*INFO* Updating %s__%s instances and connections.\n" lib_name cell_name)
modify_schematic_content(sch inst_list)
; update schematic pins
printf("*INFO* Updating %s__%s schematic pins.\n" lib_name cell_name)
update_schematic_pin(sch pin_map new_pins ipin opin iopin)
check_and_save_schematic(sch)
)
)
; create concrete schematics
procedure( create_concrete_schematic( lib_name tech_lib lib_path temp_file change_file
sympin ipin opin iopin simulators copy "tttttlllllg" )
let( (template_list change_list cell_name pin_map inst_list)
printf("*INFO* Reading template and change list from file\n")
template_list = parse_data_from_file( temp_file )
change_list = parse_data_from_file( change_file )
when( copy
printf("*INFO* Creating library: %s\n" lib_name)
create_or_erase_library( lib_name tech_lib lib_path nil )
printf("*INFO* Copying templates to library: %s\n" lib_name)
copy_templates_to_library( lib_name template_list )
)
foreach( change change_list
cell_name = change->name
pin_map = change->pin_map
new_pins = change->new_pins
inst_list = change->inst_list
printf("*INFO* Updating cell %s__%s\n" lib_name cell_name)
convert_template_cells( lib_name cell_name pin_map new_pins inst_list
sympin ipin opin iopin simulators )
)
't
)
)
; create a new layout view then instantiate a single pcell instance.
; this method also copy all the labels in the pcell top level. In this way LVS/PEX will
; work correctly.
; params is a list of (variable_name type_string value) lists.
; pin_mapping is a list of (old_pin new_pin) lists.
procedure( create_layout_with_pcell(lib_name cell_name view_name inst_lib inst_cell params_f pin_mapping_f "ttttttt")
let( (lay_cv inst_master inst inst_shapes label_location label_orientation label_lpp
label_just label_font label_height label_type label_text params pin_mapping)
unless( lay_cv = dbOpenCellViewByType(lib_name cell_name view_name "maskLayout" "w")
error("Cannot open cellview %s__%s (%s)." lib_name cell_name view_name)
)
unless( inst_master = dbOpenCellViewByType(inst_lib inst_cell "layout" "maskLayout" "r")
dbClose(lay_cv)
error("Cannot open cellview %s__%s (layout)." inst_lib inst_cell)
)
params = parse_data_from_file(params_f)
pin_mapping = parse_data_from_file(pin_mapping_f)
inst = dbCreateParamInst(lay_cv inst_master "XTOP" '(0 0) "R0" 1 params)
inst_shapes = inst~>master~>shapes
foreach(shape inst_shapes
when( shape->objType == "label"
label_location = shape~>xy
label_orientation = shape~>orient
label_lpp = shape~>lpp
label_just = shape~>justify
label_font = shape~>font
label_height = shape~>height
label_type = shape~>labelType
label_text = shape~>theLabel
when( cadr(assoc(label_text pin_mapping))
label_text = cadr(assoc(label_text pin_mapping))
)
dbCreateLabel(lay_cv label_lpp label_location label_text label_just label_orientation label_font label_height )
)
)
dbClose(inst_master)
dbSave(lay_cv)
dbClose(lay_cv)
)
)
; helper for creating a path segment
procedure( create_path_seg_helper(cv lay p0 p1 width start_s end_s)
let( (diag_ext info_list bext eext)
if( and(car(p0) != car(p1) cadr(p0) != cadr(p1)) then
diag_ext = width / 2
width = width * sqrt(2)
else
diag_ext = width * sqrt(2) / 2
)
bext = 0
eext = 0
when( start_s == "round"
bext = width / 2
start_s = "custom"
)
when( end_s == "round"
eext = width / 2
end_s = "custom"
)
info_list = list(bext eext list(diag_ext diag_ext width/2 diag_ext diag_ext width/2))
dbCreatePathSeg(cv lay p0 p1 width start_s end_s info_list)
)
)
; helper for creating a path
procedure( create_path_helper( cv path )
let( (lay width points estyle jstyle p0 p1 plen idx start_s end_s)
lay = path->layer
width = path->width
points = path->points
estyle = path->end_style
jstyle = path->join_style
p0 = nil
plen = length(points)
idx = 0
foreach( cur_point points
p1 = cur_point
when( idx > 0
if( idx == 1 then
start_s = estyle
else
start_s = jstyle
)
if( idx == plen - 1 then
end_s = estyle
else
end_s = jstyle
)
create_path_seg_helper(cv lay p0 p1 width start_s end_s)
)
p0 = p1
idx = idx + 1
)
)
)
; helper for creating a single layout view
procedure( create_layout_helper( cv tech_file inst_list rect_list via_list pin_list path_list
blockage_list boundary_list polygon_list "ggllllllll" )
let( (inst_cv obj via_def via_enc1 via_enc2 enc1 enc2 off1 off2 via_params make_pin_rect
pin_bb pin_w pin_h pin_xc pin_yc pin_orient label_h param_order orig_shape arr_dx arr_dy)
; create instances
foreach( inst inst_list
if( inst_cv = dbOpenCellViewByType( inst->lib inst->cell inst->view nil "r" ) then
if( and( inst->num_rows==1 inst->num_cols==1) then
if( inst->params != nil then
; create pcell instance
obj = dbCreateParamInst(cv inst_cv inst->name inst->loc inst->orient 1 inst->params)
when( obj
if( inst->param_order != nil then
param_order=inst->param_order
else
param_order= mapcar( lambda( (x) car(x) ) inst->params)
)
CCSinvokeInstCdfCallbacks(obj ?order param_order)
)
else
obj = dbCreateInst(cv inst_cv inst->name inst->loc inst->orient)
)
else
if( inst->params != nil then
; create pcell mosaic
obj = dbCreateParamSimpleMosaic(cv inst_cv inst->name inst->loc inst->orient
inst->num_rows inst->num_cols inst->sp_rows inst->sp_cols
inst->params)
when( obj
if( inst->param_order != nil then
param_order=inst->param_order
else
param_order= mapcar( lambda( (x) car(x) ) inst->params)
)
CCSinvokeInstCdfCallbacks(obj ?order param_order)
)
else
obj = dbCreateSimpleMosaic(cv inst_cv inst->name inst->loc inst->orient
inst->num_rows inst->num_cols inst->sp_rows inst->sp_cols)
)
)
unless( obj
warn("Error creating instance %s of %s__%s (%s). Skipping." inst->name inst->lib inst->cell inst->view)
)
else
warn("Cannot find instance %s__%s (%s). Skipping." inst->lib inst->cell inst->view)
)
)
; create rectangles
foreach( rect rect_list
orig_shape = dbCreateRect(cv rect->layer rect->bbox)
if( not(orig_shape) then
warn("Error creating rectangle of layer %A. Skipping." rect->layer)
else
when( rect->arr_nx != nil
for(icol 2 rect->arr_nx
arr_dx = rect->arr_spx * (icol - 1)
for(irow 1 rect->arr_ny
arr_dy = rect->arr_spy * (irow - 1)
dbCopyFig(orig_shape nil list(arr_dx:arr_dy "R0" 1))
)
)
for(irow 2 rect->arr_ny
arr_dy = rect->arr_spy * (irow - 1)
dbCopyFig(orig_shape nil list(0:arr_dy "R0" 1))
)
)
)
)
; create paths
foreach( path path_list
create_path_helper(cv path)
)
; create polygons
foreach( poly polygon_list
dbCreatePolygon(cv poly->layer poly->points)
)
; create blockages
foreach( block blockage_list
if( block->btype == "placement" then
dbCreateAreaBlockage(cv block->points)
else
dbCreateLayerBlockage(cv block->layer block->btype block->points)
)
)
; create boundaries
foreach( bound boundary_list
cond( (bound->btype == "PR"
dbCreatePRBoundary(cv bound->points))
(bound->btype == "snap"
dbCreateSnapBoundary(cv bound->points))
(bound->btype == "area"
dbCreateAreaBoundary(cv bound->points))
('t
warn("Unknown boundary type %s. Skipping." bound->btype))
)
)
; create vias
foreach( via via_list
if( via_def = techFindViaDefByName(tech_file via->id) then
; compute via parameter list
via_enc1 = via->enc1
via_enc2 = via->enc2
enc1 = list( (car(via_enc1) + cadr(via_enc1)) / 2.0
(caddr(via_enc1) + cadr(cddr(via_enc1))) / 2.0 )
enc2 = list( (car(via_enc2) + cadr(via_enc2)) / 2.0
(caddr(via_enc2) + cadr(cddr(via_enc2))) / 2.0 )
off1 = list( (cadr(via_enc1) - car(via_enc1)) / 2.0
(caddr(via_enc1) - cadr(cddr(via_enc1))) / 2.0 )
off2 = list( (cadr(via_enc2) - car(via_enc2)) / 2.0
(caddr(via_enc2) - cadr(cddr(via_enc2))) / 2.0 )
via_params = list( list("cutRows" via->num_rows)
list("cutColumns" via->num_cols)
list("cutSpacing" list(via->sp_cols via->sp_rows))
list("layer1Enc" enc1)
list("layer2Enc" enc2)
list("layer1Offset" off1)
list("layer2Offset" off2) )
; if via width and height given, add to via_params
when( via->cut_width != nil
via_params = cons( list("cutWidth" via->cut_width) via_params)
)
when( via->cut_height != nil
via_params = cons( list("cutHeight" via->cut_height) via_params)
)
; create actual via
orig_shape = dbCreateVia(cv via_def via->loc via->orient via_params)
if( not(orig_shape) then
warn("Error creating via %s. Skipping." via->id)
else
when( via->arr_nx != nil
for(icol 2 via->arr_nx
arr_dx = via->arr_spx * (icol - 1)
for(irow 1 via->arr_ny
arr_dy = via->arr_spy * (irow - 1)
dbCopyFig(orig_shape nil list(arr_dx:arr_dy "R0" 1))
)
)
for(irow 2 via->arr_ny
arr_dy = via->arr_spy * (irow - 1)
dbCopyFig(orig_shape nil list(0:arr_dy "R0" 1))
)
)
)
else
warn("Via %s not found. Skipping." via->id)
)
)
; create pins
foreach( pin pin_list
pin_bb = pin->bbox
pin_w = caadr(pin_bb) - caar(pin_bb)
pin_h = cadr(cadr(pin_bb)) - cadr(car(pin_bb))
pin_xc = (caar(pin_bb) + caadr(pin_bb)) / 2.0
pin_yc = (cadr(car(pin_bb)) + cadr(cadr(pin_bb))) / 2.0
if( pin_w >= pin_h then
pin_orient = "R0"
label_h = pin_h
else
pin_orient = "R90"
label_h = pin_w
)
; get make_pin_rect, true if both net_name and pin_name are non-empty
make_pin_rect = pin->net_name != "" && pin->pin_name != ""
when( pin->make_rect != nil
make_pin_rect = pin->make_rect
)
; printf("make_pin_rect: %A\n" make_pin_rect)
; create pin object only if make_pin_rect is True.
when( make_pin_rect != 0 && make_pin_rect != nil
; printf("making pin.\n")
dbCreatePin( dbMakeNet(cv pin->net_name) dbCreateRect(cv pin->layer pin_bb) pin->pin_name )
)
; printf("%A %A %A %A\n" pin->label pin->layer pin_xc pin_yc)
dbCreateLabel( cv pin->layer list(pin_xc pin_yc) pin->label "centerCenter" pin_orient "roman" label_h )
)
)
)
; create a new layout view with the given geometries
; inst_f, rect_f, via_f, and pin_f are files containing list of disembodied property lists.
procedure( create_layout( lib_name view_name via_tech layout_f "ttt" )
let( (tech_file layout_info cell_name inst_list rect_list via_list pin_list
path_list blockage_list boundary_list polygon_list cv)
unless( tech_file = techGetTechFile(ddGetObj(via_tech))
error("Via technology file %s not found." via_tech)
)
layout_info = parse_data_from_file(layout_f)
foreach( info layout_info
cell_name = nthelem(1 info)
inst_list = nthelem(2 info)
rect_list = nthelem(3 info)
via_list = nthelem(4 info)
pin_list = nthelem(5 info)
path_list = nthelem(6 info)
blockage_list = nthelem(7 info)
boundary_list = nthelem(8 info)
polygon_list = nthelem(9 info)
unless( cv = dbOpenCellViewByType( lib_name cell_name view_name "maskLayout" "w" )
error("Cannot create new layout cell %s__%s (%s)." lib_name cell_name view_name)
)
printf("Creating %s__%s (%s)\n" lib_name cell_name view_name)
create_layout_helper(cv tech_file inst_list rect_list via_list pin_list path_list
blockage_list boundary_list polygon_list)
dbSave(cv)
dbClose(cv)
)
t
)
)
; release write locks from all the given cellviews
procedure( release_write_locks( lib_name cell_view_list_f "tt" )
let( (cell_view_list lib_obj cv)
cell_view_list = parse_data_from_file(cell_view_list_f)
when( lib_obj = ddGetObj(lib_name nil nil nil nil "r")
foreach( info cell_view_list
when( cv = dbFindOpenCellView( lib_obj car(info) cadr(info) )
dbReopen(cv, "r")
dbClose(cv)
)
)
ddReleaseObj(lib_obj)
)
t
)
)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Simulation/Testbench related functions ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; set an entry in an association list
; returns the modified association list.
procedure( set_assoc_list(mylist mykey myval)
let( (tmp)
when( tmp = assoc(mykey mylist)
; print("replacing")
rplacd(tmp list(myval))
)
)
mylist
)
; Copy the schematic of a testbench, and replace the DUT instance.
;
; This procedure copies the schematic of a testbench to a new library and cell, then finds all
; instances with the name prefix "XDUT", then change their instance master to dut_lib and dut_cell.
;
procedure( copy_testbench(master_lib master_cell targ_lib
dut_lib dut_cell tech_lib new_lib_path "ttttttt")
let( (tlib_obj sch replace_count inst_prefix new_master)
inst_prefix = "XDUT"
printf("Copying testbench %s__%s to %s__%s\n" master_lib master_cell targ_lib master_cell)
; create target library if does not exist
unless( tlib_obj = ddGetObj(targ_lib nil nil nil nil "r")
when( and(new_lib_path (new_lib_path != "."))
new_lib_path = strcat(new_lib_path "/" lib_name)
)
tlib_obj = ddCreateLib(targ_lib new_lib_path)
; attach technology file
techBindTechFile(tlib_obj tech_lib)
)
; copy testbench to new library
src_gdm = gdmCreateSpecList()
gdmAddSpecToSpecList(gdmCreateSpec(master_lib master_cell nil nil "CDBA") src_gdm)
targ_gdm = gdmCreateSpecList()
gdmAddSpecToSpecList(gdmCreateSpec(targ_lib master_cell nil nil "CDBA") targ_gdm)
ccpCopy(src_gdm targ_gdm 't 'CCP_EXPAND_COMANAGED)
; open copied schematic
unless( sch = dbOpenCellViewByType(tlib_obj master_cell "schematic" nil "a")
ddReleaseObj(tlib_obj)
error("Cannot open testbench schematic %s__%s" targ_lib master_cell)
)
; replace instances
replace_count = 0
sprintf(new_master "%s %s symbol" dut_lib dut_cell)
foreach( inst sch~>instances
when( strncmp( inst~>name inst_prefix strlen(inst_prefix) ) == 0
replace_count = replace_count + 1
schReplaceProperty(list(inst) "master" new_master)
)
)
; save and close resources
check_and_save_schematic(sch)
ddReleaseObj(tlib_obj)
; error if nothing is replaced
when( replace_count == 0
error("Cannot find any instances in %s__%s with name prefix %s" targ_lib master_cell inst_prefix)
)
't
)
)
; opens an adexl session. Returns a list of session name and setup database handle.
procedure( open_adexl_session(tb_lib tb_cell tb_view session_name mode "ttttt")
let( (session sdb)
unless( session = axlCreateSession(session_name)
error("Cannot create temporary adexl session: %s" session_name)
)
unless( sdb = axlSetMainSetupDBLCV(session tb_lib tb_cell tb_view)
axlCloseSession(session)
error("Cannot load adexl database from %s__%s (%s)" tb_lib tb_cell tb_view)
)
list(session sdb)
)
)
; Enables only the given corners in the simulation setup database.
procedure( enable_adexl_corners( sdb corner_list env_param_list "gll")
let( (env_name par_val_list corner)
foreach(cur_name cadr(axlGetCorners(sdb))
axlSetEnabled( axlGetCorner(sdb cur_name) member(cur_name corner_list) )
)
foreach(env_par_obj env_param_list
env_name = car(env_par_obj)
par_val_list = cadr(env_par_obj)
corner = axlGetCorner(sdb env_name)
foreach(par_val par_val_list
axlPutVar(corner car(par_val) cadr(par_val))
)
)
)
)
; Set testbench parameters
; val_list is an association list from variable names to variable values as string, which
; could be a constant value or a parametric sweep string
procedure( set_adexl_parameters(sdb par_val_list "gl")
foreach( var_spec par_val_list
axlPutVar(sdb car(var_spec) cadr(var_spec))
)
)
; Create a new config view for a testbench.
;
; lib_name : testbench library name.
; cell_name : testbench cell name.
; view_name : name of the config view (a testbench can have multiple config views)
; libs : a string of global libraries, separated by spaces.
; views : a string of cellviews to use, separated by spaces.
; stops : a string of cellviews to stop at, separated by spaces.
procedure( create_config_view(lib_name cell_name view_name libs views stops "tttttt")
let( (conf conf_bag)
printf("Creating config view %s__%s (%s)\n" lib_name cell_name view_name)
unless( conf = hdbOpen(lib_name cell_name view_name "w")
error("Cannot open config view %s__%s (%s)." lib_name cell_name view_name)
)
hdbSetTopCellViewName(conf lib_name cell_name "schematic")
hdbSetDefaultLibListString(conf libs)
hdbSetDefaultViewListString(conf views)
hdbSetDefaultStopListString(conf stops)
hdbSaveAs(conf lib_name cell_name view_name)
; close configuration
conf_bag = hdbCreateConfigBag()
hdbAddConfigToBag(conf_bag conf)
hdbCloseConfigsInBag(conf_bag)
)
)
; edit the config view of a testbench. Use to control whether we're simulating with
; schematic or post-extraction.
;
; lib_name : testbench library name.
; cell_name : testbench cell name.
; view_name : name of the config view (a testbench can have multiple config views)
; conf_list : a list of (, , ) configurations. Where each entry
; means that view should be used for the cell in library .
procedure( edit_config_view(lib_name cell_name view_name conf_list "tttl")
let( (conf lib cell view conf_bag netlist_list)
unless( conf = hdbOpen(lib_name cell_name view_name "a")
error("Cannot open config view %s__%s (%s)." lib_name cell_name view_name)
)
netlist_list = '()
foreach( cell_config conf_list
lib = car(cell_config)
cell = cadr(cell_config)
view = caddr(cell_config)
if( view == "netlist" then
; set to use extracted netlist
netlist_list = cons(list(lib cell) netlist_list)
else
; set to use extracted cellview
hdbSetObjBindRule(conf list(list(lib cell nil nil))
list('hdbcBindingRule list(nil nil view)))
)
)
hdbSaveAs(conf lib_name cell_name view_name)
; close configuration
conf_bag = hdbCreateConfigBag()
hdbAddConfigToBag(conf_bag conf)
hdbCloseConfigsInBag(conf_bag)
; update netlist source files
edit_config_source_files(lib_name cell_name view_name netlist_list)
)
)
; HACKERMAN FUNCTION:
; so as usual, cadence is so terrible they don't have skill API to set source files.
; instead, spice/spectre source files are defined in a secret ASCII prop.cfg file.
; this hacky method will create the right prop.cfg file for you.
procedure( edit_config_source_files(lib_name cell_name view_name netlist_list "tttl")
let( (p lib_dir cell_lib_dir)
lib_dir = get_lib_directory(lib_name)
p = outfile( sprintf(nil "%s/%s/%s/%s" lib_dir cell_name view_name "prop.cfg") "w" )
; common header
fprintf( p "file-format-id 1.1;\ndefault\n{\n}\n" )
foreach( lib_cell netlist_list
lib = car(lib_cell)
cell = cadr(lib_cell)
cell_lib_dir = get_lib_directory(lib)
fprintf( p "cell %s.%s\n{\n" lib cell )
fprintf( p " non-inherited string prop sourcefile = \"%s/%s/netlist/netlist\";\n}\n"
cell_lib_dir cell )
)
close(p)
)
)
; Write testbench information to file.
procedure( write_testbench_info_to_file(sdb result_file output_list en_corner_list)
let( (p output_count)
; write testbench information to result_file
p = outfile(result_file "w")
fprintf(p "corners:\n")
foreach( corn cadr(axlGetCorners(sdb))
fprintf(p " - %s\n" corn)
)
fprintf(p "enabled_corners:\n")
foreach( corn en_corner_list
fprintf(p " - %s\n" corn)
)
fprintf(p "parameters:\n")
if( var_list = cadr(axlGetVars(sdb)) then
foreach( var_name var_list
fprintf(p " %s: \"%s\"\n" var_name axlGetVarValue(axlGetVar(sdb var_name)))
)
else
fprintf(p " {}\n")
)
fprintf(p "outputs:\n")
output_count = 0
foreach( out_obj output_list
if( rexMatchp( "\"" out_obj->name) then
warn("Output expression name (%s) have quotes, skipping" out_obj->name)
else
fprintf(p " \"%s\": !!str %A\n" out_obj->name out_obj->expression)
output_count = output_count + 1
)
)
when( output_count == 0
fprintf(p " {}\n")
)
close(p)
)
)
; Instantiates a testbench.
;
; Copy a testbench template to the desired location, replace instances, make config view,
; and also setup corner settings in adexl.
; this method will also record list of corners, global variables, and output expressions
; to result_file
procedure( instantiate_testbench(tb_cell targ_lib
config_libs config_views config_stops
default_corner corner_file def_files
tech_lib result_file
"tttttttltt")
let( (session_name session_sdb session sdb test_names test_name test tool_args corner_list
ade_symbol ade_session output_list tmp_state_name state_obj success)
tmp_state_name = "orig_state"
; check if temporary ADE session state already exists, if so, delete it
state_obj = ddGetObj(targ_lib tb_cell tmp_state_name)
when( state_obj
success = ddDeleteObj(state_obj)
unless( success
error("Cannot delete orig_state cellview.")
)
)
; create config view
create_config_view(targ_lib tb_cell "config" config_libs config_views config_stops)
; session_name = "modify_adexl"
session_name = sprintf(nil "modify_adexl_%d" bag_modify_adexl_counter)
bag_modify_adexl_counter = bag_modify_adexl_counter + 1
session_sdb = open_adexl_session(targ_lib tb_cell "adexl" session_name "a")
session = car(session_sdb)
sdb = cadr(session_sdb)
; check that only one test is defined
test_names = cadr(axlGetTests(sdb))
when(length(test_names) != 1
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
error("ADEXL testbench must have exactly 1 test defined.")
)
; save current test setup state
axlSaveSetupState(session "adexl_default" "All")
; change all tests to use config view, and set all test's definition files
; also get a list of defined output expressions
; step 1: get ADE session
test_name = car(test_names)
ade_symbol = axlGetToolSession(session_name test_name)
ade_session = asiGetSession(ade_symbol)
; step 2: save original ADE session
asiSaveState(ade_session ?name tmp_state_name ?option 'cellview ?lib targ_lib ?cell tb_cell)
; step 3: change test library
test = axlGetTest(sdb test_name)
tool_args = axlGetTestToolArgs(test)
set_assoc_list(tool_args "view" "config")
set_assoc_list(tool_args "lib" targ_lib)
set_assoc_list(tool_args "cell" tb_cell)
axlSetTestToolArgs(test tool_args)
; step 4: reopen ADE session, then load original ADE state
ade_symbol = axlGetToolSession(session_name test_name)
ade_session = asiGetSession(ade_symbol)
asiLoadState(ade_session ?name tmp_state_name ?option 'cellview)
asiSetEnvOptionVal(ade_session 'definitionFiles def_files)
output_list = setof(ele asiGetOutputList(ade_session) ele->name)
; step 5: delete temporary ADE session state
state_obj = ddGetObj(targ_lib tb_cell tmp_state_name)
ddDeleteObj(state_obj)
axlMainAppSaveSetup(session_name)
; load corner
unless(axlLoadCorners(sdb corner_file)
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
error("Error loading corner file %s to %s__%s (%s)" corner_file lib_name cell_name view_name)
)
; set default corner
corner_list = list(default_corner)
enable_adexl_corners(sdb corner_list nil)
; write testbench information to file
write_testbench_info_to_file(sdb result_file output_list corner_list)
; save and close
axlSaveSetupState(session "adexl_default" "All")
axlSaveSetupState(session "ocean_default" "All")
axlMainAppSaveSetup(session_name)
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
)
)
; Returns parameter and corner information of a testbench.
procedure( get_testbench_info(tb_lib tb_cell result_file "ttt")
let( (session_name session_sdb session sdb test_names test_name ade_symbol asi_sess
output_list corner_list en_list success)
session_name = "read_adexl"
session_sdb = open_adexl_session(tb_lib tb_cell "adexl" session_name "r")
session = car(session_sdb)
sdb = cadr(session_sdb)
; check that only one test is defined
test_names = cadr(axlGetTests(sdb))
when(length(test_names) != 1
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
error("ADEXL testbench must have exactly 1 test defined.")
)
; get output list
test_name = car(test_names)
ade_symbol = axlGetToolSession(session_name test_name)
asi_sess = sevEnvironment(ade_symbol)
output_list = setof(ele asiGetOutputList(asi_sess) ele->name)
; get enabled corners
corner_list = cadr(axlGetCorners(sdb))
en_list = setof(corner corner_list axlGetEnabled(axlGetCorner(sdb corner)))
; write testbench information to file
write_testbench_info_to_file(sdb result_file output_list en_list)
; close
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
)
)
; Configure run options. Used to setup monte carlo parameters.
; run_params is an association list of run options and their values. The key "mode"
; corresponds to the run mode.
procedure( set_run_options(session sdb run_params "ggl")
let( (run_mode opt_list run_opt)
when( run_mode = cadr(assoc("mode" run_params))
; no options for single run/sweep mode.
cond( (run_mode == "Single Run, Sweeps and Corners"
opt_list = nil)
(run_mode == "Monte Carlo Sampling"
opt_list = '("mcnumpoints" "mcmethod") )
('t
axlCloseSession(session)
error("Unsupported run mode: %s" run_mode) )
)
foreach( opt_name opt_list
when( opt_val = cadr(assoc(opt_name run_params))
run_opt = axlPutRunOption(sdb run_mode opt_name)
axlSetRunOptionValue(run_opt opt_val)
)
)
axlSetCurrentRunMode(sdb run_mode)
)
)
)
; modify the given testbench.
; tb_lib and tb_cell describes the library and cell of the testbench to simulate.
; conf_file contains the config view settings.
; opt_file contains the association list of run mode options.
; corner_file contains a list of corners to simulate.
; param_file contains the association list of parameter values.
procedure( modify_testbench(tb_lib tb_cell conf_file opt_file corner_file param_file env_params_file "ttttttt")
let( (tmp_list session sdb conf_list run_params corner_list param_values env_param_values session_name)
sprintf(session_name "bag_sim_adexl_%s" getCurrentTime())
; read inputs from file.
conf_list = parse_data_from_file(conf_file)
run_params = parse_data_from_file(opt_file)
corner_list = parse_data_from_file(corner_file)
param_values = parse_data_from_file(param_file)
env_param_values = parse_data_from_file(env_params_file)
; modify config view
when( conf_list
edit_config_view(tb_lib tb_cell "config" conf_list)
)
tmp_list = open_adexl_session(tb_lib tb_cell "adexl" session_name "a")
session = car(tmp_list)
sdb = cadr(tmp_list)
; change corners, parameters, and run options
enable_adexl_corners( sdb corner_list env_param_values)
set_adexl_parameters( sdb param_values )
set_run_options( session sdb run_params )
; save and close
axlSaveSetupState(session "adexl_default" "All")
axlSaveSetupState(session "ocean_default" "All")
axlMainAppSaveSetup(session_name)
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
)
)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; BAG server related functions ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
procedure( stdoutHandler(ipcId data)
let( (result result_str)
if( bag_server_started > 0 then
printf("*INFO* Evaluate expression from BAG process: %s\n" data)
if( result = errsetstring(data 't) then
sprintf(result_str "%A\n" car(result))
else
sprintf(result_str "%s\n" car(nthelem(5 errset.errset)))
)
printf("*INFO* Sending result to BAG process: %s" result_str)
ipcWriteProcess(ipcId sprintf(nil "%d\n" strlen(result_str)))
ipcWriteProcess(ipcId result_str)
't
else
if( data == "BAG skill server has started. Yay!\n" then
bag_server_started = 1
printf("*INFO* BAG skill server started.\n")
else
printf("*INFO* Waiting for BAG skill server. Message: %s\n" data)
)
)
)
)
procedure( stderrHandler(ipcId data)
warn("BAG server process error: %s\n" data)
warn("Shutting down BAG server.")
ipcKillProcess(ipcId)
't
)
procedure( exitHandler(ipcId exitId)
printf("*INFO* BAG server process exited with status: %d\n" exitId)
't
)
procedure( start_bag_server()
bag_server_started = 0
printf("*INFO* Starting BAG server process.\n")
ipcBeginProcess("bash virt_server.sh" "" 'stdoutHandler 'stderrHandler 'exitHandler "")
)
bag_server_started = 0
bag_modify_adexl_counter = 0
bag_proc = start_bag_server()
================================================
FILE: run_scripts/start_bag.sh
================================================
#!/usr/bin/env bash
export PYTHONPATH=""
# disable QT session manager warnings
unset SESSION_MANAGER
exec ${BAG_PYTHON} -m IPython $@
================================================
FILE: run_scripts/start_bag_ICADV12d3.il
================================================
/* Note:
Due to licensing reasons, this skill script is missing the function
CCSinvokeCdfCallbacks() from Cadence solution 11018344, which executes
CDF parameters callback from skill.
If you do not need to instantiate a pcell instance, this method
is not needed.
Eric Chang, Mar 2, 2017.
*/
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Virtuoso Database operations functions ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; reads a skill data structure from file
procedure( parse_data_from_file( fname "t" )
let( (p ans)
unless( p = infile( fname )
error("Cannot open file %s" fname)
)
ans = parse_data_from_file_helper(p)
close( p )
ans
)
)
; recursive helper for parse_data_from_file
procedure( parse_data_from_file_helper( p )
let( (line item ans finish key)
gets( line p )
; remove newline
line = substring(line 1 strlen(line) - 1)
; printf("read line: %s\n" line)
cond(
(line == "#list"
; parse a list
ans = tconc(nil 0)
while( nequal(item = parse_data_from_file_helper(p) "#end")
tconc(ans item)
)
; printf("returning list ")
; print(cdar(ans))
; printf("\n")
cdar(ans)
)
(line == "#prop_list"
; parse a disembodied property list
ans = ncons(nil)
finish = nil
while( !finish
key = parse_data_from_file_helper(p)
if( key == "#end" then
finish = 't
else
item = parse_data_from_file_helper(p)
putprop(ans item key)
)
)
ans
)
; parse a float
(strncmp( line "#float" 6 ) == 0
cdfParseFloatString(cadr(parseString(line)))
)
; parse an int
(strncmp( line "#int" 4 ) == 0
atoi(cadr(parseString(line)))
)
; parse a boolean
(strncmp( line "#bool" 5 ) == 0
if( atoi(cadr(parseString(line))) == 1 then
't
else
nil
)
)
; parse a string token or #end
('t
; printf("returning str %s\n" line)
line
)
)
)
)
; return a list of cells in the given library.
procedure( get_cells_in_library( lib_name "t" )
let( ( lib_obj ans )
if( lib_obj = ddGetObj(lib_name nil nil nil nil "r") then
ans = ddGetObjChildren(lib_obj)~>name
ddReleaseObj(lib_obj)
else
; library does not exist, return empty list
ans = '()
)
ans
)
)
; return a list of cells in the given library.
procedure( get_cells_in_library_file( lib_name fname "tt" )
let( ( p )
p = outfile( fname "w" )
foreach( cell get_cells_in_library(lib_name)
fprintf(p "%s\n" cell)
)
close(p)
)
)
; Returns the directory corresponding to the given library.
procedure( get_lib_directory(lib_name "t")
let( ( lib_obj ans )
if( lib_obj = ddGetObj(lib_name nil nil nil nil "r") then
ans = lib_obj~>readPath
ddReleaseObj(lib_obj)
else
; library does not exist, return empty list
ans = ""
)
ans
)
)
; Parse the netlist of the given cellview.
; Works on schematic and veriloga.
procedure( parse_cad_sch(lib_name cell_name file_name "ttt")
let( (cv cell_type p indent direction term_names tb_list tb_match
inst_lib_name inst_cell_name inst_cnt)
indent = ""
cell_type = "schematic"
unless( cv = dbOpenCellViewByType( lib_name cell_name "schematic" nil "r" )
cell_type = "veriloga"
unless( cv = dbOpenCellViewByType( lib_name cell_name "veriloga" nil "r" )
error( "Cannot find schematic or veriloga view of cell %s__%s" lib_name cell_name )
)
)
p = outfile( file_name "w" )
; print cellview information
printf( "*INFO* Writing cell %s__%s (%s) netlist to %s\n" lib_name cell_name cell_type file_name )
fprintf( p "%slib_name: %s\n" indent lib_name )
fprintf( p "%scell_name: %s\n" indent cell_name )
; print pins
fprintf( p "%spins: [ " indent )
if( cell_type == "veriloga" then
term_names = reverse(cv~>terminals~>name)
else
term_names = cv~>terminals~>name
)
; add quotes around pin names to escape array pins
term_names = mapcar( lambda( (x) sprintf(nil "\"%s\"" x) ) term_names )
fprintf( p "%s ]\n" buildString(term_names ", "))
; print instances
if( not(cv~>instances) then
fprintf( p "%sinstances: {}\n" indent )
else
inst_cnt = 0
fprintf( p "%sinstances:\n" indent )
foreach( inst cv~>instances
inst_cnt++
; print entry for instance
indent = " "
fprintf( p "%s%s:\n" indent inst~>name )
; print instance master information.
indent = " "
fprintf( p "%slib_name: %s\n" indent inst~>libName )
fprintf( p "%scell_name: %s\n" indent inst~>cellName )
; print instance terminal information
if( !(inst~>instTerms) then
fprintf( p "%sinstpins: {}\n" indent )
else
fprintf( p "%sinstpins:\n" indent )
foreach( inst_term inst~>instTerms
unless( direction = inst_term~>direction
direction = ""
)
indent = " "
fprintf( p "%s%s:\n" indent inst_term~>name )
indent = " "
fprintf( p "%sdirection: %s\n" indent direction )
fprintf( p "%snet_name: \"%s\"\n" indent inst_term~>net~>name )
fprintf( p "%snum_bits: %d\n" indent inst_term~>numBits )
)
)
)
when(inst_cnt == 0
fprintf( p " {}\n" )
)
)
; close resources
close(p)
dbClose(cv)
)
)
; Delete a cellview if it exists. Currently used to delete old calibre file.
procedure( delete_cellview(lib_name cell_name view_name "ttt")
let( (obj)
obj = ddGetObj(lib_name cell_name view_name)
if( obj then
ddDeleteObj(obj)
else
't
)
)
)
; Parse the structure of the given cellview.
; Works on layout.
procedure( parse_cad_layout(lib_name cell_name file_name "ttt")
let( (cv cell_type p indent rect_cnt label_cnt inst_cnt)
indent = ""
cell_type = "layout"
unless( cv = dbOpenCellViewByType( lib_name cell_name cell_type nil "r" )
error( "Cannot find layout view of cell %s__%s" lib_name cell_name )
)
p = outfile( file_name "w" )
; print cellview information
printf( "*INFO* Writing cell %s__%s (%s) netlist to %s\n" lib_name cell_name cell_type file_name )
fprintf( p "%slib_name: %s\n" indent lib_name )
fprintf( p "%scell_name: %s\n" indent cell_name )
; print rects
if( not(cv~>shapes) then
fprintf( p "%srects: {}\n" indent )
else
rect_cnt = 0
fprintf( p "%srects:\n" indent )
foreach( shape cv~>shapes
if( (shape~>objType == "rect") then
rect_cnt++
; print entry for rect
indent = " "
fprintf( p "%s%d:\n" indent rect_cnt )
; print rect master information.
indent = " "
fprintf( p "%slayer: %s %s\n" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))
fprintf( p "%sbBox: [[%f, %f], [%f, %f]]\n" indent
nthelem(1 nthelem(1 shape~>bBox)) nthelem(2 nthelem(1 shape~>bBox))
nthelem(1 nthelem(2 shape~>bBox)) nthelem(2 nthelem(2 shape~>bBox))
);fprintf
)
);if
if((rect_cnt == 0) then
fprintf( p " {}\n" )
);if
)
; print labels
indent = ""
if( not(cv~>shapes) then
fprintf( p "%slabels: {}\n" indent )
else
label_cnt = 0
fprintf( p "%slabels:\n" indent )
foreach( shape cv~>shapes
if( (shape~>objType == "label") then
label_cnt++
; print entry for label
indent = " "
fprintf( p "%s%d:\n" indent label_cnt )
; print label master information.
indent = " "
fprintf( p "%slabel: %s\n" indent shape~>theLabel )
fprintf( p "%slayer: %s %s\n" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))
fprintf( p "%sxy: [%f, %f]\n" indent nthelem(1 shape~>xy) nthelem(2 shape~>xy))
)
if( (shape~>objType == "textDisplay") then ;some labels are instantiated as text displays
label_cnt++
; print entry for label
indent = " "
fprintf( p "%s%d:\n" indent label_cnt )
; print label master information.
indent = " "
fprintf( p "%slabel: %s\n" indent shape~>owner~>name )
fprintf( p "%slayer: %s %s\n" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))
fprintf( p "%sxy: [%f, %f]\n" indent nthelem(1 shape~>xy) nthelem(2 shape~>xy))
)
);if
if((label_cnt == 0) then
fprintf( p " {}\n" )
);if
)
; print instances
indent = ""
if( not(cv~>instances) then
fprintf( p "%sinstances: {}\n" indent )
else
inst_cnt = 0
fprintf( p "%sinstances:\n" indent )
foreach( inst cv~>instances
inst_cnt++
; print entry for instance
indent = " "
fprintf( p "%s%s:\n" indent inst~>name )
; print instance master information.
indent = " "
fprintf( p "%slib_name: %s\n" indent inst~>libName )
fprintf( p "%scell_name: %s\n" indent inst~>cellName )
fprintf( p "%sxy: [%f, %f]\n" indent nthelem(1 inst~>xy) nthelem(2 inst~>xy))
if( (inst~>objType == "mosaic") then
fprintf( p "%scols: %d\n" indent inst~>columns)
fprintf( p "%srows: %d\n" indent inst~>rows)
fprintf( p "%ssp_cols: %f\n" indent inst~>uX)
fprintf( p "%ssp_rows: %f\n" indent inst~>uY)
fprintf( p "%srotation: %s\n" indent car(inst~>tileArray))
else
fprintf( p "%srotation: %s\n" indent inst~>orient)
);if
)
when(inst_cnt == 0
fprintf( p " {}\n" )
)
)
; close resources
close(p)
dbClose(cv)
)
)
; get a list of cells containing in the specficied library
procedure( get_cell_list(lib_name file_name "tt")
let( (lib cellname p)
lib=ddGetObj(lib_name)
p = outfile( file_name "w" )
fprintf( p "%s: [" lib_name)
foreach( cellname lib~>cells~>name
fprintf( p "%s, " cellname)
);foreach
fprintf( p "] \n" )
; close resources
close(p)
);let
)
; if library with lib_name does not exists, create a new
; library with that name. Otherwise, if erase is true,
; remove all cells in that library. Returns the library
; database object.
procedure( create_or_erase_library(lib_name tech_lib lib_path erase "tttg")
let( (lib_obj)
if( lib_obj = ddGetObj(lib_name nil nil nil nil "r") then
when( erase
; delete all cells in the library
foreach( cell lib_obj~>cells
unless( ddDeleteObj(cell)
error("cannot delete cell %s in library %s\n" cell~>name lib_name)
)
)
)
ddReleaseObj(lib_obj)
't
else
; create library if not exist
when( and(lib_path (lib_path != "."))
lib_path = strcat(lib_path "/" lib_name)
)
lib_obj = ddCreateLib(lib_name lib_path)
; attach technology file
techBindTechFile(lib_obj tech_lib)
; close library
ddReleaseObj(lib_obj)
't
)
)
)
; copy all template cells to the given library.
; template list is a list of three-element lists with the format
; '("master_lib_name" "master_cell_name" "target_cell_name")
; any existing cellviews will be overwritten.
procedure( copy_templates_to_library(lib_name template_list "tl")
let( (current remaining src_gdm targ_gdm table master_lib master_cell target_cell key cnt
empty_spec targ_lib_obj test_cv)
current = template_list
remaining = '()
empty_spec = gdmCreateSpecList()
targ_lib_obj = ddGetObj(lib_name nil nil nil nil "r")
; ccpCopy cannot copy the same cell to multiple different cells.
; because of this, we need to copy a set of unique cells at a time,
; hence the while loop.
while( current
; Create GDMSpecList used to copy all cells
src_gdm = gdmCreateSpecList()
targ_gdm = gdmCreateSpecList()
; table to keep track of seen cells.
table = makeTable("mytable" 0)
; Populate GDMSpecList
foreach( template_info current
master_lib = car(template_info)
master_cell = cadr(template_info)
target_cell = caddr(template_info)
; check if we copied this cell on this iteration yet
key = list(master_lib master_cell)
if( table[key] == 1 then
; wait for the next iteration
remaining = cons(template_info remaining)
else
; purge target cellview if exist
when( targ_lib_obj
test_cv = dbFindOpenCellView(targ_lib_obj target_cell "schematic")
when( test_cv
dbPurge(test_cv)
)
test_cv = dbFindOpenCellView(targ_lib_obj target_cell "symbol")
when( test_cv
dbPurge(test_cv)
)
; hard remove adexl state if it exists
test_cv = ddGetObj(lib_name target_cell "adexl")
when( test_cv
ddDeleteObj(test_cv)
)
)
gdmAddSpecToSpecList(gdmCreateSpec(master_lib master_cell nil nil "CDBA") src_gdm)
gdmAddSpecToSpecList(gdmCreateSpec(lib_name target_cell nil nil "CDBA") targ_gdm)
table[key] = 1
)
)
; Perform copy
ccpCopy(src_gdm targ_gdm 't 'CCP_EXPAND_COMANAGED nil nil "" "" 'CCP_UPDATE_FROM_LIBLIST empty_spec)
; set current and remaining
current = remaining
remaining = '()
; debug printing
; printstruct(table)
)
)
't
)
; returns a unique terminal name in the given cellview.
; name_base is the suffix of the returned terminal name.
procedure( get_unique_term_name( cvid name_base "gt")
let( (cnt new_term_name)
cnt = 1
sprintf( new_term_name "temp%d_%s" cnt name_base )
while( dbFindTermByName(cvid new_term_name)
cnt = cnt + 1
sprintf( new_term_name "temp%d_%s" cnt name_base )
)
new_term_name
)
)
; helper method to open pin master
procedure( open_pin_master(cvid pin_cv_info)
let( (pin_master mpin_lib mpin_cell mpin_view)
mpin_lib = car(pin_cv_info)
mpin_cell = cadr(pin_cv_info)
mpin_view = caddr(pin_cv_info)
unless( pin_master = dbOpenCellViewByType( mpin_lib mpin_cell mpin_view nil "r" )
dbClose(cvid)
error( "Cannot find pin master cellview: %s__%s (%s)" mpin_lib mpin_cell mpin_view)
)
pin_master
)
)
; update pins of a schematic
; cvid is the opened cellview id of the schematic. It must be in append mode.
; pin_map is a list of two-element lists of old pin names and new pin names, respectively.
; ipin, opin, and iopin are lists of three strings for input/output/inout pins, respectively.
; first element is the pin master library, second element is the pin mater cell, and third element
; is the pin master cellview.
procedure( update_schematic_pin(cvid pin_map new_pins ipin opin iopin "glllll")
let( (snap_dist cur_term_name new_term_name term pin pin_orient pin_location pin_direction
temp_new_term_name pin_master ipin_master opin_master iopin_master
pin_xy_info npin_xl npin_yl npin_xr npin_yr npin_name npin_type)
snap_dist = schGetEnv("schSnapSpacing")
; open pin masters
ipin_master = open_pin_master(cvid ipin)
opin_master = open_pin_master(cvid opin)
iopin_master = open_pin_master(cvid iopin)
pin_master = nil
; get new pin locations before any pin addition/substraction.
pin_xy_info = get_new_pin_locations(cvid snap_dist)
; rename or remove pins
foreach( p pin_map
cur_term_name = car(p)
new_term_name = cadr(p)
; printf("%s %s\n" cur_term_name new_term_name)
when(cur_term_name != new_term_name
unless( term = dbFindTermByName(cvid cur_term_name)
dbClose(cvid)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
error( "Terminal %s not found." cur_term_name )
)
when( term~>pinCount != 1
dbClose(cvid)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
error( "Terminal %s does not have exactly one pin." cur_term_name)
)
pin = car(term~>pins)
if( strlen(new_term_name) != 0 then
; rename pin
pin_orient = pin~>fig~>orient
pin_location = pin~>fig~>xy
pin_direction = term~>direction
; create new pin figure
cond( ( pin_direction == "input" pin_master = ipin_master)
( pin_direction == "output" pin_master = opin_master)
( 't pin_master = iopin_master)
)
; delete pin
unless( dbDeleteObject(pin~>fig)
dbClose(cvid)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
error( "Cannot delete pin for terminal %s" cur_term_name )
)
; create a temporary terminal with a unique name so we can change the number of bits without getting an error
temp_new_term_name = get_unique_term_name(cvid new_term_name)
schCreatePin(cvid pin_master temp_new_term_name pin_direction nil pin_location "R0" )
; now rename the new terminal
new_term = dbFindTermByName(cvid temp_new_term_name )
new_term~>name = new_term_name
else
; remove pin
dbDeleteObject(pin~>fig)
)
)
)
; add new pins
when( new_pins
; get location for new pins
npin_xl = xCoord(car(pin_xy_info)) - 2 * snap_dist
npin_yl = yCoord(car(pin_xy_info)) - 2 * snap_dist
npin_xr = xCoord(cadr(pin_xy_info))
npin_yr = yCoord(cadr(pin_xy_info)) - 2 * snap_dist
foreach( npin_info new_pins
npin_name = car(npin_info)
npin_type = cadr(npin_info)
; verify that this pin does not exist yet
when(dbFindTermByName(cvid npin_name)
dbClose(cvid)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
error( "Terminal %s already exists" npin_name)
)
; get pin location based on pin type
cond( ( npin_type == "input" pin_master = ipin_master pin_location = npin_xl:npin_yl npin_yl = npin_yl - 2 * snap_dist)
( npin_type == "output" pin_master = opin_master pin_location = npin_xr:npin_yr npin_yr = npin_yr - 2 * snap_dist)
( 't pin_master = iopin_master pin_location = npin_xl:npin_yl npin_yl = npin_yl - 2 * snap_dist)
)
; create pin
schCreatePin(cvid pin_master npin_name npin_type nil pin_location "R0")
)
)
dbClose(ipin_master)
dbClose(opin_master)
dbClose(iopin_master)
)
)
; find X and Y coordinates to insert new symbol pins
procedure( get_new_pin_locations(cvid snap_dist)
let( (pin bbox pin_x pin_y xl xr yl yr)
; find the left-most/right-most pin X coordinates, and find the lowst
; Y coordinate of the left-most/right-most pins
xl = nil
xr = nil
yl = nil
yr = nil
foreach( term cvid->terminals
when( term~>pinCount != 1
dbClose(cvid)
error( "Terminal %s does not have exactly one pin" term~>name)
)
pin = car(term~>pins)
bbox = pin~>fig~>bBox
pin_x = round2((xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0 / snap_dist)
pin_y = round2((yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0 / snap_dist)
if( xl == nil then
xl = pin_x
xr = pin_x
yl = pin_y
yr = pin_y
else
cond( (pin_x < xl xl = pin_x yl = pin_y)
(pin_x == xl yl = min(yl pin_y)))
cond( (pin_x > xr xr = pin_x yr = pin_y)
(pin_x == xr yr = min(yr pin_y)))
)
)
when(xl == nil
; default values if schematic has no terminals
; this usually means you have a testbench schematic
xl = 0
yl = 0
xr = 10
yr = 0
)
list((xl * snap_dist):(yl * snap_dist) (xr * snap_dist):(yr * snap_dist))
)
)
; update pins of a symbol
; pin_map is a list of two-element lists, first element is old pin name, second element is new pin name.
; sympin is a 3-element list of strings. first element is the pin master library,
; second element is the pin mater cell, and third element is the pin master cellview.
; simulators is a list of simulator names for which termOrder should be updated.
; Usually simulators = '("auLvs" "auCdl" "spectre" "hspiceD")
procedure( update_symbol_pin(lib_name cell_name pin_map new_pins sympin simulators "ttllll")
let( (snap_dist cvid pin_master cur_term_name new_term_name term pin bbox pin_x pin_y pin_location pin_direction
label_location label_rel_location temp_new_term_name new_term new_port_order cell_obj bc
mpin_lib mpin_cell mpin_view pin_xy_info npin_xl npin_yl npin_xr npin_yr npin_name npin_type
modified_pins)
snap_dist = schGetEnv("schSnapSpacing")
modified_pins = nil
mpin_lib = car(sympin)
mpin_cell = cadr(sympin)
mpin_view = caddr(sympin)
unless( pin_master = dbOpenCellViewByType(mpin_lib mpin_cell mpin_view nil "r")
error("Cannot open symbol pin cellview %s__%s (%s)." mpin_lib mpin_cell mpin_view)
)
unless( cvid = dbOpenCellViewByType(lib_name cell_name "symbol" nil "a")
dbClose(pin_master)
error("Cannot open cellview %s__%s (symbol)." lib_name cell_name)
)
; get new pin locations before any pin addition/substraction.
pin_xy_info = get_new_pin_locations(cvid snap_dist)
; modify existing pins
new_port_order = tconc(nil "")
foreach( p pin_map
cur_term_name = car(p)
new_term_name = cadr(p)
new_port_order = tconc(new_port_order new_term_name)
when( cur_term_name != new_term_name
modified_pins = 't
; printf("%s %s\n" cur_term_name new_term_name)
unless( term = dbFindTermByName(cvid cur_term_name)
dbClose(pin_master)
dbReopen(cvid, "r")
dbClose(cvid)
error( "Terminal %s not found." cur_term_name )
)
when( term~>pinCount != 1
dbClose(pin_master)
dbReopen(cvid, "r")
dbClose(cvid)
error( "Terminal %s does not have exactly one pin." cur_term_name)
)
pin = car(term~>pins)
if( strlen(new_term_name) != 0 then
; rename pin
bbox = pin~>fig~>bBox
pin_x = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0
pin_y = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0
pin_location = round2(pin_x / snap_dist) * snap_dist:round2(pin_y / snap_dist) * snap_dist
pin_direction = term~>direction
; change label
prog( (label_orientation label_font label_font_size label_type label_text)
foreach( label pin~>fig~>children
when( label~>objType == "label"
label_location = label~>xy
label_orientation = label~>orient
label_rel_location = label~>justify
label_font = label~>font
label_font_size = label~>height
label_type = label~>labelType
label_text = label~>theLabel
when( label_text == cur_term_name
schCreateSymbolLabel(cvid label_location "pin label" new_term_name label_rel_location
label_orientation label_font label_font_size label_type)
return('t)
)
)
)
return(nil)
)
dbDeleteObject(pin~>fig)
dbDeleteObject(pin)
;create a temporary terminal with a unique name so we can change the number of bits without getting an error
temp_new_term_name = get_unique_term_name(cvid new_term_name)
schCreateSymbolPin(cvid pin_master temp_new_term_name pin_direction pin_location "R0" )
new_term = dbFindTermByName(cvid temp_new_term_name )
dbDeleteObject(term)
new_term~>name = new_term_name
else
; remove pin
dbDeleteObject(pin~>fig)
dbDeleteObject(pin)
dbDeleteObject(term)
)
)
)
; add new pins
when( new_pins
modified_pins = 't
; get location for new pins
npin_xl = xCoord(car(pin_xy_info))
npin_yl = yCoord(car(pin_xy_info)) - 2 * snap_dist
npin_xr = xCoord(cadr(pin_xy_info))
npin_yr = yCoord(cadr(pin_xy_info)) - 2 * snap_dist
foreach( npin_info new_pins
npin_name = car(npin_info)
npin_type = cadr(npin_info)
; verify that this pin does not exist yet
when(dbFindTermByName(cvid npin_name)
dbClose(pin_master)
dbReopen(cvid, "r")
dbClose(cvid)
error( "Terminal %s already exists" npin_name)
)
; update pin order
new_port_order = tconc(new_port_order npin_name)
; get pin location based on pin type
if( equal(npin_type "output") then
label_location = npin_xr:npin_yr
label_rel_location = "lowerLeft"
npin_yr = npin_yr - 2 * snap_dist
else
label_location = npin_xl:npin_yl
label_rel_location = "lowerRight"
npin_yl = npin_yl - 2 * snap_dist
)
; create label and pin
schCreateSymbolLabel(cvid label_location "pin label" npin_name label_rel_location
"R0" "stick" snap_dist "normalLabel")
schCreateSymbolPin(cvid pin_master npin_name npin_type label_location "R0")
)
)
dbClose(pin_master)
when( modified_pins
; update pin order
new_port_order = cdar(new_port_order)
schEditPinOrder(cvid new_port_order 't)
dbSave(cvid)
; update termOrder for each simulators
cell_obj = ddGetObj(lib_name cell_name nil nil nil "r")
unless( bc = cdfGetBaseCellCDF(cell_obj)
ddReleaseObj(cell_obj)
dbReopen(cvid, "r")
dbClose(cvid)
error("Cannot find CDF parameters for %s__%s. Delete generated cell and try again" lib_name cell_name)
)
foreach( simu simulators
get(bc->simInfo simu)->termOrder = new_port_order
)
unless( cdfSaveCDF(bc)
ddReleaseObj(cell_obj)
dbReopen(cvid, "r")
dbClose(cvid)
error("Cannot save termOrder CDF for %s__%s." lib_name cell_name)
)
ddReleaseObj(cell_obj)
)
; opening schematic will open all symbols inside that schematic.
; as the result, dbClose may not close this symbol view. To get rid
; of edit lock, we use dbReopen so even if dbClose fails the edit lock
; will be gone.
dbReopen(cvid, "r")
dbClose(cvid)
)
)
; record an association list from pin name to pin location in units of snap distances.
; the pin name is sorted alphabetically so we can use the equal function to test
; for equality.
procedure( get_instance_pin_info(inst "g")
let( (snap_dist term_name pin_fig xval yval inst_term_xy ans)
ans = nil
snap_dist = schGetEnv("schSnapSpacing")
foreach( term inst->master->terminals
term_name = term~>name
; get terminal coordinate in symbol
pin_fig = car(term~>pins)~>fig
bbox = pin_fig~>bBox
xval = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0
yval = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0
; quantize to schematic snap spacing to avoid floating point rounding error.
inst_term_xy = round2(xval / snap_dist):round2(yval / snap_dist)
ans = cons(list(term_name inst_term_xy) ans)
)
sortcar(ans nil)
)
)
; get all the wire objects connected to terminals of the given instance.
; we assume each terminal has exactly one pin with 1 wire connected, with a
; single label on the wire. The wire doesn't connect to anything else.
; returns an association list from terminal name to a list of net name and wire figure object.
procedure( get_instance_terminal_wires(sch inst "gg")
let( (snap_dist term_name pin_fig xval yval inst_term_xy net_name ans net_map)
ans = nil
net_map = nil
snap_dist = schGetEnv("schSnapSpacing")
foreach( inst_term inst~>instTerms
term_name = inst_term~>name
; printf("terminal name: %s\n" term_name)
when( inst_term~>term~>pinCount != 1
dbClose(sch)
error("Terminal %s must have exactly one pin." term_name)
)
unless( pin_fig = car(inst_term~>term~>pins)~>fig
dbClose(sch)
error("Cannot find pin figure for terminal %s" term_name)
)
; get instance terminal coordinate in schematic
bbox = dbTransformBBox(pin_fig~>bBox inst~>transform)
; printf("terminal pin fig bbox: %A\n" bbox)
xval = xCoord(car(bbox)) + (xCoord(cadr(bbox)) - xCoord(car(bbox))) / 2.0
yval = yCoord(car(bbox)) + (yCoord(cadr(bbox)) - yCoord(car(bbox))) / 2.0
; quantize to schematic snap spacing to avoid floating point rounding error.
inst_term_xy = round2(xval / snap_dist) * snap_dist:round2(yval / snap_dist) * snap_dist
net_name = inst_term~>net~>name
net_map = cons(list(term_name net_name) net_map)
; printf("terminal pin x/y: %A\n" inst_term_xy)
foreach( fig inst_term~>net~>figs
points = fig~>points
; printf("figure points: %A\n" points)
when( member(inst_term_xy points)
when( length(points) != 2
error("pin for terminal %s must be connected to a single wire with label" term_name)
)
; printf("adding figure for terminal %s\n" term_name)
ans = cons(list(term_name fig) ans)
)
)
)
list(ans net_map)
)
)
; Modify the instance terminal connections of the given instance.
; we assume each terminal to modify has at most 1 wire connected,
; if it exists, the wire connects to nothing else, and it has a label.
; In this way, this function just have to change the label text.
;
; if wire_list is not empty, then that means each terminal has exactly one
; wire connected. This function will update the label on the wires according
; to term_mapping.
;
; if wire_list is empty, then that means no wires are connected to terminals.
; this function will attach labels directly to each terminal. The labels are
; determined first from term_mapping, then from net_map
;
; sch is the schematic database object. Must be opened in append/write mode.
; inst is the instance object to modify.
; term_mapping is a list of key-value pairs, where keys are old net names,
; and values are new net names.
procedure( modify_instance_terminal(sch inst wire_list net_map term_mapping "gglll")
let( (snap_dist key_val old_name new_name fig points mid_point new_wire inst_term inst_pin
bbox xval yval term_map_final db_term)
; get schematic snap distance spacing.
snap_dist = schGetEnv("schSnapSpacing")
if( wire_list then
foreach( wire_info wire_list
old_name = car(wire_info)
when(key_val = assoc(old_name term_mapping)
new_name = cadr(key_val)
fig = cadr(wire_info)
points = fig~>points
mid_point = foreach(mapcar (c1 c2) car(points) cadr(points) (c1 + c2) / 2.0)
; delete old wire, then add wire back with new label.
schDelete(fig)
new_wire = car(schCreateWire(sch "draw" "full" points snap_dist snap_dist 0))
schCreateWireLabel(sch new_wire mid_point new_name "lowerCenter" "R0" "stick" 0.0625 nil)
)
)
't
else
; combine net_map and term_mapping
term_map_final = copy(term_mapping)
foreach( net_info net_map
old_name = car(net_info)
unless( assoc(old_name term_map_final)
; add net mapping only if it's not in term_mapping
term_map_final = cons(net_info term_map_final)
)
)
foreach( net_info term_map_final
old_name = car(net_info)
new_name = cadr(net_info)
when(db_term = dbFindTermByName(inst->master old_name)
; only create terminal that's present in the current master
inst_term = dbCreateInstTerm(nil inst db_term)
inst_pin = car(inst_term~>term~>pins)~>fig
bbox = dbTransformBBox(inst_pin~>bBox inst~>transform)
xval = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0
yval = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0
xval = round2(xval / snap_dist) * snap_dist
yval = round2(yval / snap_dist) * snap_dist
new_wire = car(schCreateWire(sch "draw" "full" list(xval-snap_dist:yval-snap_dist xval:yval)
snap_dist snap_dist 0))
schCreateWireLabel(sch new_wire xval:yval new_name "lowerCenter" "R0" "stick" 0.0625 nil)
)
)
't
)
)
)
; Perform check-and-save on the given schematic database object, then close it.
procedure( check_and_save_schematic(sch "g")
let( (errs)
schSetEnv( "checkHierSave" 't)
schSetEnv( "saveAction" "Save")
errs = schCheckHier(sch "schematic symbol" "")
foreach( ex errs
warn( "%s__%s (%s) has %d errors." car(ex)~>lib~>name car(ex)~>cellName car(ex)~>viewName cadr(ex))
)
; make sure all edit locks are gone by reopening in read mode
dbReopen(sch, "r")
dbClose(sch)
)
)
; modify a schematic cell. Used to convert copied template cells into concrete instantiation.
;
; inst_list is an association list of (inst_name, rinst_list) pairs. Where:
;
; inst_name : name of the instance in the template cell.
; rinst_list : a list of rinsts, which are instances to replace the original instance by.
; If this list is empty, the original instance should be deleted. If the list
; has more than one element, we should array the original instance.
;
; Each rinst is a disembodied property lists, with the properties:
;
; rinst->name : the name of this rinst.
; rinst->lib_name : the instance master library.
; rinst->cell_name : the instance master cell.
; rinst->params : an association list of the CDF params of this rinst. The values are always string.
; rinst->term_mapping : an association list of the modified terminal connections of this rinst.
; if no connections are changed, this list should be empty.
;
; (You can read more about disembodied property lists and association list in the skill
; language user guide).
;
; For each instance, this function does the following:
; 1. Find the instance with the given name.
; 2. If rinst_list is nil, delete this instance.
; 3. If rinst_list has exactly one element:
; i. rename the instance name to rinst's name.
; ii. change the instance master of the instance.
; iii. change the CDF parameters (this should only happen with BAG primitives).
; iv. change the port connections of this instance.
; 4. If rinst_list has more than one element, for each additional element,
; copy the original instance and perform step 3 on that instance.
;
; This procedure allows one to delete or array any instances in the schematic template.
procedure( modify_schematic_content(sch_cv inst_list "gl")
let( (inst_obj inst_name rinst_list rinst_len cur_inst wire_list net_map par_val xl xr transform
snap_dist errmsg pin_info tmp_result)
snap_dist = schGetEnv("schSnapSpacing")
foreach( inst inst_list
inst_name = car(inst)
unless( inst_obj = dbFindAnyInstByName(sch_cv inst_name)
dbClose(sch_cv)
error( "Cannot find instance %s" inst_name )
)
rinst_list = cadr(inst)
rinst_len = length(rinst_list)
last_inst = nil
if( rinst_len == 0 then
; no instances to replace by, delete.
wire_list = car(get_instance_terminal_wires(sch_cv inst_obj))
; delete wires connected to instance
foreach( wire_info wire_list
schDelete(cadr(wire_info))
)
; delete instance
dbDeleteObject(inst_obj)
else
cur_inst = nil
pin_info = nil
foreach( rinst rinst_list
if( !cur_inst then
cur_inst = inst_obj
; printf("inst %s lib = %s, cell = %s\n" inst_name inst_obj->master->libName inst_obj->master->cellName)
tmp_result = get_instance_terminal_wires(sch_cv cur_inst)
net_map = cadr(tmp_result)
wire_list = car(tmp_result)
pin_info = get_instance_pin_info(cur_inst)
; printf("%s wire_list: %A\n" inst_name wire_list)
; figure out bounding box for potential future array
; printf("instance %s bbox: %A\n" cur_inst~>name cur_inst~>bBox)
xl = xCoord(car(cur_inst~>bBox))
xr = xCoord(cadr(cur_inst~>bBox))
foreach( wire_info wire_list
; printf("instance %s wire: %A %A\n" cur_inst~>name xCoord(car(cadr(wire_info)~>bBox)) xCoord(cadr(cadr(wire_info)~>bBox)))
xl = min(xl xCoord(car(cadr(wire_info)~>bBox)))
xr = max(xr xCoord(cadr(cadr(wire_info)~>bBox)))
)
transform = list(round2((xr - xl + snap_dist) / snap_dist) * snap_dist:0 "R0" 1.0)
; printf("instance %s transform: %A\n" cur_inst~>name transform)
else
; more than 1 rinst, copy cur_inst, do not copy wires
wire_list = nil
; copy instance
cur_inst = dbCopyFig(cur_inst nil transform)
)
; change instance name and master
when(cur_inst->name != rinst->name
cur_inst->name = rinst->name
)
schReplaceProperty(list(cur_inst) "master" sprintf(nil "%s %s %s" rinst->lib_name
rinst->cell_name cur_inst->viewName))
; set parameters
foreach( cdf_par cdfGetInstCDF(cur_inst)~>parameters
par_val = cadr(assoc(cdf_par->name rinst->params))
; change CDF parameter value only if specified in given parameters
when( par_val != nil
cdf_par->value = par_val
)
)
when( wire_list
; if wire_list is not empty, check that the pins match. If so, keep wires around,
; otherwise, delete wires
unless( equal(pin_info get_instance_pin_info(cur_inst))
; delete wires connected to instance
foreach( wire_info wire_list
schDelete(cadr(wire_info))
)
wire_list = nil
)
)
; modify connections, keeping old wires around
; printf("instance %s wire_list: %A net_map: %A term_map: %A\n" cur_inst~>name wire_list net_map rinst->term_mapping)
modify_instance_terminal(sch_cv cur_inst wire_list net_map rinst->term_mapping)
)
)
)
)
)
; given a copied template cell, modify it to a concrete schematic.
procedure( convert_template_cells(lib_name cell_name pin_map new_pins inst_list sympin ipin opin iopin simulators)
let( (sym_cv sch)
; update symbol view first.
if( sym_cv = dbOpenCellViewByType(lib_name cell_name "symbol" nil "r") then
printf("*INFO* Updating %s__%s symbol pins.\n" lib_name cell_name)
update_symbol_pin(lib_name cell_name pin_map new_pins sympin simulators)
else
warn("Did not find symbol for %s__%s. Skipping. Is it testbench?" lib_name cell_name)
)
; attempt to open schematic in append mode
unless( sch = dbOpenCellViewByType(lib_name cell_name "schematic" nil "a")
error("Cannot open %s__%s (schematic) in append mode." lib_name cell_name)
)
; update schematic content
printf("*INFO* Updating %s__%s instances and connections.\n" lib_name cell_name)
modify_schematic_content(sch inst_list)
; update schematic pins
printf("*INFO* Updating %s__%s schematic pins.\n" lib_name cell_name)
update_schematic_pin(sch pin_map new_pins ipin opin iopin)
check_and_save_schematic(sch)
)
)
; create concrete schematics
procedure( create_concrete_schematic( lib_name tech_lib lib_path temp_file change_file
sympin ipin opin iopin simulators copy "tttttlllllg" )
let( (template_list change_list cell_name pin_map inst_list)
printf("*INFO* Reading template and change list from file\n")
template_list = parse_data_from_file( temp_file )
change_list = parse_data_from_file( change_file )
when( copy
printf("*INFO* Creating library: %s\n" lib_name)
create_or_erase_library( lib_name tech_lib lib_path nil )
printf("*INFO* Copying templates to library: %s\n" lib_name)
copy_templates_to_library( lib_name template_list )
)
foreach( change change_list
cell_name = change->name
pin_map = change->pin_map
new_pins = change->new_pins
inst_list = change->inst_list
printf("*INFO* Updating cell %s__%s\n" lib_name cell_name)
convert_template_cells( lib_name cell_name pin_map new_pins inst_list
sympin ipin opin iopin simulators )
)
't
)
)
; create a new layout view then instantiate a single pcell instance.
; this method also copy all the labels in the pcell top level. In this way LVS/PEX will
; work correctly.
; params is a list of (variable_name type_string value) lists.
; pin_mapping is a list of (old_pin new_pin) lists.
procedure( create_layout_with_pcell(lib_name cell_name view_name inst_lib inst_cell params_f pin_mapping_f "ttttttt")
let( (lay_cv inst_master inst inst_shapes label_location label_orientation label_lpp
label_just label_font label_height label_type label_text params pin_mapping)
unless( lay_cv = dbOpenCellViewByType(lib_name cell_name view_name "maskLayout" "w")
error("Cannot open cellview %s__%s (%s)." lib_name cell_name view_name)
)
unless( inst_master = dbOpenCellViewByType(inst_lib inst_cell "layout" "maskLayout" "r")
dbClose(lay_cv)
error("Cannot open cellview %s__%s (layout)." inst_lib inst_cell)
)
params = parse_data_from_file(params_f)
pin_mapping = parse_data_from_file(pin_mapping_f)
inst = dbCreateParamInst(lay_cv inst_master "XTOP" '(0 0) "R0" 1 params)
inst_shapes = inst~>master~>shapes
foreach(shape inst_shapes
when( shape->objType == "label"
label_location = shape~>xy
label_orientation = shape~>orient
label_lpp = shape~>lpp
label_just = shape~>justify
label_font = shape~>font
label_height = shape~>height
label_type = shape~>labelType
label_text = shape~>theLabel
when( cadr(assoc(label_text pin_mapping))
label_text = cadr(assoc(label_text pin_mapping))
)
dbCreateLabel(lay_cv label_lpp label_location label_text label_just label_orientation label_font label_height )
)
)
dbClose(inst_master)
dbSave(lay_cv)
dbClose(lay_cv)
)
)
; helper for creating a path segment
procedure( create_path_seg_helper(cv lay p0 p1 width start_s end_s)
let( (diag_ext info_list bext eext)
if( and(car(p0) != car(p1) cadr(p0) != cadr(p1)) then
diag_ext = width / 2
width = width * sqrt(2)
else
diag_ext = width * sqrt(2) / 2
)
bext = width/2
eext = width/2
if( start_s == "round" then
start_s = "custom"
else
when( start_s == "truncate"
bext = 0
)
)
if( end_s == "round" then
end_s = "custom"
else
when( end_s == "truncate"
eext = 0
)
)
info_list = list(bext eext list(diag_ext diag_ext width/2 diag_ext diag_ext width/2))
dbCreatePathSeg(cv lay p0 p1 width start_s end_s info_list)
)
)
; helper for creating a path
procedure( create_path_helper( cv path )
let( (lay width points estyle jstyle p0 p1 plen idx start_s end_s)
lay = path->layer
width = path->width
points = path->points
estyle = path->end_style
jstyle = path->join_style
p0 = nil
plen = length(points)
idx = 0
foreach( cur_point points
p1 = cur_point
when( idx > 0
if( idx == 1 then
start_s = estyle
else
start_s = jstyle
)
if( idx == plen - 1 then
end_s = estyle
else
end_s = jstyle
)
create_path_seg_helper(cv lay p0 p1 width start_s end_s)
)
p0 = p1
idx = idx + 1
)
)
)
; helper for creating a single layout view
procedure( create_layout_helper( cv tech_file inst_list rect_list via_list pin_list path_list
blockage_list boundary_list polygon_list "ggllllllll" )
let( (inst_cv obj via_def via_enc1 via_enc2 enc1 enc2 off1 off2 via_params make_pin_rect
pin_bb pin_w pin_h pin_xc pin_yc pin_orient label_h param_order orig_shape arr_dx arr_dy)
; create instances
foreach( inst inst_list
if( inst_cv = dbOpenCellViewByType( inst->lib inst->cell inst->view nil "r" ) then
if( and( inst->num_rows==1 inst->num_cols==1) then
if( inst->params != nil then
; create pcell instance
obj = dbCreateParamInst(cv inst_cv inst->name inst->loc inst->orient 1 inst->params)
; execute parameter callbacks
when( obj
if( inst->param_order != nil then
param_order = inst->param_order
else
param_order = mapcar( lambda( (x) car(x) ) inst->params )
)
CCSinvokeCdfCallbacks(obj ?order param_order)
)
else
obj = dbCreateInst(cv inst_cv inst->name inst->loc inst->orient)
)
else
if( inst->params != nil then
; create pcell mosaic
obj = dbCreateParamSimpleMosaic(cv inst_cv inst->name inst->loc inst->orient
inst->num_rows inst->num_cols inst->sp_rows inst->sp_cols
inst->params)
; execute parameter callbacks
when( obj
if( inst->param_order != nil then
param_order = inst->param_order
else
param_order = mapcar( lambda( (x) car(x) ) inst->params )
)
CCSinvokeCdfCallbacks(obj ?order param_order)
)
else
obj = dbCreateSimpleMosaic(cv inst_cv inst->name inst->loc inst->orient
inst->num_rows inst->num_cols inst->sp_rows inst->sp_cols)
)
)
unless( obj
warn("Error creating instance %s of %s__%s (%s). Skipping." inst->name inst->lib inst->cell inst->view)
)
else
warn("Cannot find instance %s__%s (%s). Skipping." inst->lib inst->cell inst->view)
)
)
; create rectangles
foreach( rect rect_list
orig_shape = dbCreateRect(cv rect->layer rect->bbox)
if( not(orig_shape) then
warn("Error creating rectangle of layer %A. Skipping." rect->layer)
else
when( rect->arr_nx != nil
for(icol 2 rect->arr_nx
arr_dx = rect->arr_spx * (icol - 1)
for(irow 1 rect->arr_ny
arr_dy = rect->arr_spy * (irow - 1)
dbCopyFig(orig_shape nil list(arr_dx:arr_dy "R0" 1))
)
)
for(irow 2 rect->arr_ny
arr_dy = rect->arr_spy * (irow - 1)
dbCopyFig(orig_shape nil list(0:arr_dy "R0" 1))
)
)
)
)
; create paths
foreach( path path_list
create_path_helper(cv path)
)
; create polygons
foreach( poly polygon_list
dbCreatePolygon(cv poly->layer poly->points)
)
; create blockages
foreach( block blockage_list
if( block->btype == "placement" then
dbCreateAreaBlockage(cv block->points)
else
dbCreateLayerBlockage(cv block->layer block->btype block->points)
)
)
; create boundaries
foreach( bound boundary_list
cond( (bound->btype == "PR"
dbCreatePRBoundary(cv bound->points))
(bound->btype == "snap"
dbCreateSnapBoundary(cv bound->points))
(bound->btype == "area"
dbCreateAreaBoundary(cv bound->points))
('t
warn("Unknown boundary type %s. Skipping." bound->btype))
)
)
; create vias
foreach( via via_list
if( via_def = techFindViaDefByName(tech_file via->id) then
; compute via parameter list
via_enc1 = via->enc1
via_enc2 = via->enc2
enc1 = list( (car(via_enc1) + cadr(via_enc1)) / 2.0
(caddr(via_enc1) + cadr(cddr(via_enc1))) / 2.0 )
enc2 = list( (car(via_enc2) + cadr(via_enc2)) / 2.0
(caddr(via_enc2) + cadr(cddr(via_enc2))) / 2.0 )
off1 = list( (cadr(via_enc1) - car(via_enc1)) / 2.0
(caddr(via_enc1) - cadr(cddr(via_enc1))) / 2.0 )
off2 = list( (cadr(via_enc2) - car(via_enc2)) / 2.0
(caddr(via_enc2) - cadr(cddr(via_enc2))) / 2.0 )
via_params = list( list("cutRows" via->num_rows)
list("cutColumns" via->num_cols)
list("cutSpacing" list(via->sp_cols via->sp_rows))
list("layer1Enc" enc1)
list("layer2Enc" enc2)
list("layer1Offset" off1)
list("layer2Offset" off2) )
; if via width and height given, add to via_params
when( via->cut_width != nil
via_params = cons( list("cutWidth" via->cut_width) via_params)
)
when( via->cut_height != nil
via_params = cons( list("cutHeight" via->cut_height) via_params)
)
; create actual via
orig_shape = dbCreateVia(cv via_def via->loc via->orient via_params)
if( not(orig_shape) then
warn("Error creating via %s. Skipping." via->id)
else
when( via->arr_nx != nil
for(icol 2 via->arr_nx
arr_dx = via->arr_spx * (icol - 1)
for(irow 1 via->arr_ny
arr_dy = via->arr_spy * (irow - 1)
dbCopyFig(orig_shape nil list(arr_dx:arr_dy "R0" 1))
)
)
for(irow 2 via->arr_ny
arr_dy = via->arr_spy * (irow - 1)
dbCopyFig(orig_shape nil list(0:arr_dy "R0" 1))
)
)
)
else
warn("Via %s not found. Skipping." via->id)
)
)
; create pins
foreach( pin pin_list
pin_bb = pin->bbox
pin_w = caadr(pin_bb) - caar(pin_bb)
pin_h = cadr(cadr(pin_bb)) - cadr(car(pin_bb))
pin_xc = (caar(pin_bb) + caadr(pin_bb)) / 2.0
pin_yc = (cadr(car(pin_bb)) + cadr(cadr(pin_bb))) / 2.0
if( pin_w >= pin_h then
pin_orient = "R0"
label_h = pin_h
else
pin_orient = "R90"
label_h = pin_w
)
; get make_pin_rect, true if both net_name and pin_name are non-empty
make_pin_rect = pin->net_name != "" && pin->pin_name != ""
when( pin->make_rect != nil
make_pin_rect = pin->make_rect
)
; printf("make_pin_rect: %A\n" make_pin_rect)
; create pin object only if make_pin_rect is True.
when( make_pin_rect != 0 && make_pin_rect != nil
; printf("making pin.\n")
dbCreatePin( dbMakeNet(cv pin->net_name) dbCreateRect(cv pin->layer pin_bb) pin->pin_name )
)
; printf("%A %A %A %A\n" pin->label pin->layer pin_xc pin_yc)
dbCreateLabel( cv pin->layer list(pin_xc pin_yc) pin->label "centerCenter" pin_orient "roman" label_h )
)
)
)
; create a new layout view with the given geometries
; inst_f, rect_f, via_f, and pin_f are files containing list of disembodied property lists.
procedure( create_layout( lib_name view_name via_tech layout_f "ttt" )
let( (tech_file layout_info cell_name inst_list rect_list via_list pin_list
path_list blockage_list boundary_list polygon_list cv)
unless( tech_file = techGetTechFile(ddGetObj(via_tech))
error("Via technology file %s not found." via_tech)
)
layout_info = parse_data_from_file(layout_f)
foreach( info layout_info
cell_name = nthelem(1 info)
inst_list = nthelem(2 info)
rect_list = nthelem(3 info)
via_list = nthelem(4 info)
pin_list = nthelem(5 info)
path_list = nthelem(6 info)
blockage_list = nthelem(7 info)
boundary_list = nthelem(8 info)
polygon_list = nthelem(9 info)
unless( cv = dbOpenCellViewByType( lib_name cell_name view_name "maskLayout" "w" )
error("Cannot create new layout cell %s__%s (%s)." lib_name cell_name view_name)
)
printf("Creating %s__%s (%s)\n" lib_name cell_name view_name)
create_layout_helper(cv tech_file inst_list rect_list via_list pin_list path_list
blockage_list boundary_list polygon_list)
dbSave(cv)
dbClose(cv)
)
t
)
)
; release write locks from all the given cellviews
procedure( release_write_locks( lib_name cell_view_list_f "tt" )
let( (cell_view_list lib_obj cv)
cell_view_list = parse_data_from_file(cell_view_list_f)
when( lib_obj = ddGetObj(lib_name nil nil nil nil "r")
foreach( info cell_view_list
when( cv = dbFindOpenCellView( lib_obj car(info) cadr(info) )
dbReopen(cv, "r")
dbClose(cv)
)
)
ddReleaseObj(lib_obj)
)
t
)
)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Simulation/Testbench related functions ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; set an entry in an association list
; returns the modified association list.
procedure( set_assoc_list(mylist mykey myval)
let( (tmp)
when( tmp = assoc(mykey mylist)
; print("replacing")
rplacd(tmp list(myval))
)
)
mylist
)
; Copy the schematic of a testbench, and replace the DUT instance.
;
; This procedure copies the schematic of a testbench to a new library and cell, then finds all
; instances with the name prefix "XDUT", then change their instance master to dut_lib and dut_cell.
;
procedure( copy_testbench(master_lib master_cell targ_lib
dut_lib dut_cell tech_lib new_lib_path "ttttttt")
let( (tlib_obj sch replace_count inst_prefix new_master)
inst_prefix = "XDUT"
printf("Copying testbench %s__%s to %s__%s\n" master_lib master_cell targ_lib master_cell)
; create target library if does not exist
unless( tlib_obj = ddGetObj(targ_lib nil nil nil nil "r")
when( and(new_lib_path (new_lib_path != "."))
new_lib_path = strcat(new_lib_path "/" lib_name)
)
tlib_obj = ddCreateLib(targ_lib new_lib_path)
; attach technology file
techBindTechFile(tlib_obj tech_lib)
)
; copy testbench to new library
src_gdm = gdmCreateSpecList()
gdmAddSpecToSpecList(gdmCreateSpec(master_lib master_cell nil nil "CDBA") src_gdm)
targ_gdm = gdmCreateSpecList()
gdmAddSpecToSpecList(gdmCreateSpec(targ_lib master_cell nil nil "CDBA") targ_gdm)
ccpCopy(src_gdm targ_gdm 't 'CCP_EXPAND_COMANAGED)
; open copied schematic
unless( sch = dbOpenCellViewByType(tlib_obj master_cell "schematic" nil "a")
ddReleaseObj(tlib_obj)
error("Cannot open testbench schematic %s__%s" targ_lib master_cell)
)
; replace instances
replace_count = 0
sprintf(new_master "%s %s symbol" dut_lib dut_cell)
foreach( inst sch~>instances
when( strncmp( inst~>name inst_prefix strlen(inst_prefix) ) == 0
replace_count = replace_count + 1
schReplaceProperty(list(inst) "master" new_master)
)
)
; save and close resources
check_and_save_schematic(sch)
ddReleaseObj(tlib_obj)
; error if nothing is replaced
when( replace_count == 0
error("Cannot find any instances in %s__%s with name prefix %s" targ_lib master_cell inst_prefix)
)
't
)
)
; opens an adexl session. Returns a list of session name and setup database handle.
procedure( open_adexl_session(tb_lib tb_cell tb_view session_name mode "ttttt")
let( (session sdb)
unless( session = axlCreateSession(session_name)
error("Cannot create temporary adexl session: %s" session_name)
)
unless( sdb = axlSetMainSetupDBLCV(session tb_lib tb_cell tb_view ?mode mode)
axlCloseSession(session)
error("Cannot load adexl database from %s__%s (%s)" tb_lib tb_cell tb_view)
)
list(session sdb)
)
)
; Enables only the given corners in the simulation setup database.
procedure( enable_adexl_corners( sdb corner_list env_param_list "gll")
let( (env_name par_val_list corner)
foreach(cur_name cadr(axlGetCorners(sdb))
axlSetEnabled( axlGetCorner(sdb cur_name) member(cur_name corner_list) )
)
foreach(env_par_obj env_param_list
env_name = car(env_par_obj)
par_val_list = cadr(env_par_obj)
corner = axlGetCorner(sdb env_name)
foreach(par_val par_val_list
axlPutVar(corner car(par_val) cadr(par_val))
)
)
)
)
; Set testbench parameters
; val_list is an association list from variable names to variable values as string, which
; could be a constant value or a parametric sweep string
procedure( set_adexl_parameters(sdb par_val_list "gl")
foreach( var_spec par_val_list
axlPutVar(sdb car(var_spec) cadr(var_spec))
)
)
; Create a new config view for a testbench.
;
; lib_name : testbench library name.
; cell_name : testbench cell name.
; view_name : name of the config view (a testbench can have multiple config views)
; libs : a string of global libraries, separated by spaces.
; views : a string of cellviews to use, separated by spaces.
; stops : a string of cellviews to stop at, separated by spaces.
procedure( create_config_view(lib_name cell_name view_name libs views stops "tttttt")
let( (conf conf_bag)
printf("Creating config view %s__%s (%s)\n" lib_name cell_name view_name)
unless( conf = hdbOpen(lib_name cell_name view_name "w")
error("Cannot open config view %s__%s (%s)." lib_name cell_name view_name)
)
hdbSetTopCellViewName(conf lib_name cell_name "schematic")
hdbSetDefaultLibListString(conf libs)
hdbSetDefaultViewListString(conf views)
hdbSetDefaultStopListString(conf stops)
hdbSaveAs(conf lib_name cell_name view_name)
; close configuration
conf_bag = hdbCreateConfigBag()
hdbAddConfigToBag(conf_bag conf)
hdbCloseConfigsInBag(conf_bag)
)
)
; edit the config view of a testbench. Use to control whether we're simulating with
; schematic or post-extraction.
;
; lib_name : testbench library name.
; cell_name : testbench cell name.
; view_name : name of the config view (a testbench can have multiple config views)
; conf_list : a list of (, , ) configurations. Where each entry
; means that view should be used for the cell in library .
procedure( edit_config_view(lib_name cell_name view_name conf_list "tttl")
let( (conf lib cell view conf_bag netlist_list)
unless( conf = hdbOpen(lib_name cell_name view_name "a")
error("Cannot open config view %s__%s (%s)." lib_name cell_name view_name)
)
netlist_list = '()
foreach( cell_config conf_list
lib = car(cell_config)
cell = cadr(cell_config)
view = caddr(cell_config)
if( view == "netlist" then
; set to use extracted netlist
netlist_list = cons(list(lib cell) netlist_list)
else
; set to use extracted cellview
hdbSetObjBindRule(conf list(list(lib cell nil nil))
list('hdbcBindingRule list(nil nil view)))
)
)
hdbSaveAs(conf lib_name cell_name view_name)
; close configuration
conf_bag = hdbCreateConfigBag()
hdbAddConfigToBag(conf_bag conf)
hdbCloseConfigsInBag(conf_bag)
; update netlist source files
edit_config_source_files(lib_name cell_name view_name netlist_list)
)
)
; HACKERMAN FUNCTION:
; so as usual, cadence is so terrible they don't have skill API to set source files.
; instead, spice/spectre source files are defined in a secret ASCII prop.cfg file.
; this hacky method will create the right prop.cfg file for you.
procedure( edit_config_source_files(lib_name cell_name view_name netlist_list "tttl")
let( (p lib_dir cell_lib_dir)
lib_dir = get_lib_directory(lib_name)
p = outfile( sprintf(nil "%s/%s/%s/%s" lib_dir cell_name view_name "prop.cfg") "w" )
; common header
fprintf( p "file-format-id 1.1;\ndefault\n{\n}\n" )
foreach( lib_cell netlist_list
lib = car(lib_cell)
cell = cadr(lib_cell)
cell_lib_dir = get_lib_directory(lib)
fprintf( p "cell %s.%s\n{\n" lib cell )
fprintf( p " non-inherited string prop sourcefile = \"%s/%s/netlist/netlist\";\n}\n"
cell_lib_dir cell )
)
close(p)
)
)
; Write testbench information to file.
procedure( write_testbench_info_to_file(sdb result_file output_list en_corner_list)
let( (p output_count)
; write testbench information to result_file
p = outfile(result_file "w")
fprintf(p "corners:\n")
foreach( corn cadr(axlGetCorners(sdb))
fprintf(p " - %s\n" corn)
)
fprintf(p "enabled_corners:\n")
foreach( corn en_corner_list
fprintf(p " - %s\n" corn)
)
fprintf(p "parameters:\n")
if( var_list = cadr(axlGetVars(sdb)) then
foreach( var_name var_list
fprintf(p " %s: \"%s\"\n" var_name axlGetVarValue(axlGetVar(sdb var_name)))
)
else
fprintf(p " {}\n")
)
fprintf(p "outputs:\n")
output_count = 0
foreach( out_obj output_list
if( rexMatchp( "\"" out_obj->name) then
warn("Output expression name (%s) have quotes, skipping" out_obj->name)
else
fprintf(p " \"%s\": !!str %A\n" out_obj->name out_obj->expression)
output_count = output_count + 1
)
)
when( output_count == 0
fprintf(p " {}\n")
)
close(p)
)
)
; Instantiates a testbench.
;
; Copy a testbench template to the desired location, replace instances, make config view,
; and also setup corner settings in adexl.
; this method will also record list of corners, global variables, and output expressions
; to result_file
procedure( instantiate_testbench(tb_cell targ_lib
config_libs config_views config_stops
default_corner corner_file def_files
tech_lib result_file
"tttttttltt")
let( (session_name session_sdb session sdb test_names test_name test tool_args corner_list
ade_symbol ade_session output_list tmp_state_name state_obj success)
tmp_state_name = "orig_state"
; check if temporary ADE session state already exists, if so, delete it
state_obj = ddGetObj(targ_lib tb_cell tmp_state_name)
when( state_obj
success = ddDeleteObj(state_obj)
unless( success
error("Cannot delete orig_state cellview.")
)
)
; create config view
create_config_view(targ_lib tb_cell "config" config_libs config_views config_stops)
; session_name = "modify_adexl"
session_name = sprintf(nil "modify_adexl_%d" bag_modify_adexl_counter)
bag_modify_adexl_counter = bag_modify_adexl_counter + 1
session_sdb = open_adexl_session(targ_lib tb_cell "adexl" session_name "a")
session = car(session_sdb)
sdb = cadr(session_sdb)
; check that only one test is defined
test_names = cadr(axlGetTests(sdb))
when(length(test_names) != 1
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
error("ADEXL testbench must have exactly 1 test defined.")
)
; save current test setup state
axlSaveSetupState(session "adexl_default" "All")
; change all tests to use config view, and set all test's definition files
; also get a list of defined output expressions
; step 1: get ADE session
test_name = car(test_names)
ade_symbol = axlGetToolSession(session_name test_name)
ade_session = asiGetSession(ade_symbol)
; step 2: save original ADE session
asiSaveState(ade_session ?name tmp_state_name ?option 'cellview ?lib targ_lib ?cell tb_cell)
; step 3: change test library
test = axlGetTest(sdb test_name)
tool_args = axlGetTestToolArgs(test)
set_assoc_list(tool_args "view" "config")
set_assoc_list(tool_args "lib" targ_lib)
set_assoc_list(tool_args "cell" tb_cell)
axlSetTestToolArgs(test tool_args)
; step 4: reopen ADE session, then load original ADE state
ade_symbol = axlGetToolSession(session_name test_name)
ade_session = asiGetSession(ade_symbol)
asiLoadState(ade_session ?name tmp_state_name ?option 'cellview)
asiSetEnvOptionVal(ade_session 'definitionFiles def_files)
output_list = setof(ele asiGetOutputList(ade_session) ele->name)
; step 5: delete temporary ADE session state
state_obj = ddGetObj(targ_lib tb_cell tmp_state_name)
ddDeleteObj(state_obj)
axlMainAppSaveSetup(session_name)
; load corner
unless(axlLoadCorners(sdb corner_file)
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
error("Error loading corner file %s to %s__%s (%s)" corner_file lib_name cell_name view_name)
)
; set default corner
corner_list = list(default_corner)
enable_adexl_corners(sdb corner_list nil)
; write testbench information to file
write_testbench_info_to_file(sdb result_file output_list corner_list)
; save and close
axlSaveSetupState(session "adexl_default" "All")
axlSaveSetupState(session "ocean_default" "All")
axlMainAppSaveSetup(session_name)
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
)
)
; Returns parameter and corner information of a testbench.
procedure( get_testbench_info(tb_lib tb_cell result_file "ttt")
let( (session_name session_sdb session sdb test_names test_name ade_symbol asi_sess
output_list corner_list en_list success)
session_name = "read_adexl"
session_sdb = open_adexl_session(tb_lib tb_cell "adexl" session_name "r")
session = car(session_sdb)
sdb = cadr(session_sdb)
; check that only one test is defined
test_names = cadr(axlGetTests(sdb))
when(length(test_names) != 1
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
error("ADEXL testbench must have exactly 1 test defined.")
)
; get output list
test_name = car(test_names)
ade_symbol = axlGetToolSession(session_name test_name)
asi_sess = sevEnvironment(ade_symbol)
output_list = setof(ele asiGetOutputList(asi_sess) ele->name)
; get enabled corners
corner_list = cadr(axlGetCorners(sdb))
en_list = setof(corner corner_list axlGetEnabled(axlGetCorner(sdb corner)))
; write testbench information to file
write_testbench_info_to_file(sdb result_file output_list en_list)
; close
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
)
)
; Configure run options. Used to setup monte carlo parameters.
; run_params is an association list of run options and their values. The key "mode"
; corresponds to the run mode.
procedure( set_run_options(session sdb run_params "ggl")
let( (run_mode opt_list run_opt)
when( run_mode = cadr(assoc("mode" run_params))
; no options for single run/sweep mode.
cond( (run_mode == "Single Run, Sweeps and Corners"
opt_list = nil)
(run_mode == "Monte Carlo Sampling"
opt_list = '("mcnumpoints" "mcmethod") )
('t
axlCloseSession(session)
error("Unsupported run mode: %s" run_mode) )
)
foreach( opt_name opt_list
when( opt_val = cadr(assoc(opt_name run_params))
run_opt = axlPutRunOption(sdb run_mode opt_name)
axlSetRunOptionValue(run_opt opt_val)
)
)
axlSetCurrentRunMode(sdb run_mode)
)
)
)
; modify the given testbench.
; tb_lib and tb_cell describes the library and cell of the testbench to simulate.
; conf_file contains the config view settings.
; opt_file contains the association list of run mode options.
; corner_file contains a list of corners to simulate.
; param_file contains the association list of parameter values.
procedure( modify_testbench(tb_lib tb_cell conf_file opt_file corner_file param_file env_params_file "ttttttt")
let( (tmp_list session sdb conf_list run_params corner_list param_values env_param_values session_name)
sprintf(session_name "bag_sim_adexl_%s" getCurrentTime())
; read inputs from file.
conf_list = parse_data_from_file(conf_file)
run_params = parse_data_from_file(opt_file)
corner_list = parse_data_from_file(corner_file)
param_values = parse_data_from_file(param_file)
env_param_values = parse_data_from_file(env_params_file)
; modify config view
when( conf_list
edit_config_view(tb_lib tb_cell "config" conf_list)
)
tmp_list = open_adexl_session(tb_lib tb_cell "adexl" session_name "a")
session = car(tmp_list)
sdb = cadr(tmp_list)
; change corners, parameters, and run options
enable_adexl_corners( sdb corner_list env_param_values)
set_adexl_parameters( sdb param_values )
set_run_options( session sdb run_params )
; save and close
axlSaveSetupState(session "adexl_default" "All")
axlSaveSetupState(session "ocean_default" "All")
axlMainAppSaveSetup(session_name)
axlCommitSetupDB(sdb)
axlCloseSetupDB(sdb)
axlCloseSession(session)
)
)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; BAG server related functions ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
procedure( stdoutHandler(ipcId data)
let( (result result_str)
if( bag_server_started > 0 then
printf("*INFO* Evaluate expression from BAG process: %s\n" data)
if( result = errsetstring(data 't) then
sprintf(result_str "%A\n" car(result))
else
sprintf(result_str "%s\n" car(nthelem(5 errset.errset)))
)
printf("*INFO* Sending result to BAG process: %s" result_str)
ipcWriteProcess(ipcId sprintf(nil "%d\n" strlen(result_str)))
ipcWriteProcess(ipcId result_str)
't
else
if( data == "BAG skill server has started. Yay!\n" then
bag_server_started = 1
printf("*INFO* BAG skill server started.\n")
else
printf("*INFO* Waiting for BAG skill server. Message: %s\n" data)
)
)
)
)
procedure( stderrHandler(ipcId data)
warn("BAG server process error: %s\n" data)
warn("Shutting down BAG server.")
ipcKillProcess(ipcId)
't
)
procedure( exitHandler(ipcId exitId)
printf("*INFO* BAG server process exited with status: %d\n" exitId)
't
)
procedure( start_bag_server()
bag_server_started = 0
printf("*INFO* Starting BAG server process.\n")
ipcBeginProcess("bash virt_server.sh" "" 'stdoutHandler 'stderrHandler 'exitHandler "")
)
bag_server_started = 0
bag_modify_adexl_counter = 0
bag_proc = start_bag_server()
================================================
FILE: run_scripts/virt_server.sh
================================================
#!/usr/bin/env bash
export PYTHONPATH="${BAG_FRAMEWORK}"
export cmd="-m bag.virtuoso run_skill_server"
export min_port=5000
export max_port=9999
export port_file="BAG_server_port.txt"
export log="skill_server.log"
export cmd="${BAG_PYTHON} ${cmd} ${min_port} ${max_port} ${port_file} ${log}"
exec $cmd
================================================
FILE: setup.py
================================================
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='bag',
version='2.0',
description='Berkeley Analog Generator',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
author='Eric Chang',
author_email='pkerichang@berkeley.edu',
packages=find_packages(),
python_requires='>=3.5',
install_requires=[
'setuptools>=18.5',
'PyYAML>=3.11',
'Jinja2>=2.9',
'numpy>=1.10',
'networkx>=1.11',
'pexpect>=4.0',
'pyzmq>=15.2.0',
'scipy>=0.17',
'matplotlib>=1.5',
'rtree',
'h5py',
'Shapely',
],
extras_require={
'mdao': ['openmdao']
},
tests_require=[
'openmdao',
'pytest',
],
package_data={
'bag.interface': ['templates/*'],
'bag.verification': ['templates/*'],
},
)
================================================
FILE: tests/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: tests/__init__.py
================================================
================================================
FILE: tests/layout/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: tests/layout/__init__.py
================================================
================================================
FILE: tests/layout/routing/LICENSE
================================================
BSD 3-Clause License
Copyright (c) 2018, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: tests/layout/routing/__init__.py
================================================
================================================
FILE: tests/layout/routing/test_fill.py
================================================
from itertools import product
import pytest
from bag.layout.routing.fill import fill_symmetric_helper
def check_disjoint_union(outer_list, inner_list, start, stop):
# test outer list has 1 more element than inner list
assert len(outer_list) == len(inner_list) + 1
sintv, eintv = outer_list[0], outer_list[-1]
if inner_list:
# test outer list covers more range than inner list
assert sintv[0] <= inner_list[0][0] and eintv[1] >= inner_list[-1][1]
# test outer list touches both boundaries
assert sintv[0] == start and eintv[1] == stop
# test intervals are disjoint and union is equal to given interval
for idx in range(len(outer_list)):
intv1 = outer_list[idx]
# test interval is non-negative
assert intv1[0] <= intv1[1]
if idx < len(inner_list):
intv2 = inner_list[idx]
# test interval is non-negative
assert intv2[0] <= intv2[1]
# test interval abuts
assert intv1[1] == intv2[0]
assert intv2[1] == outer_list[idx + 1][0]
def check_symmetric(intv_list, start, stop):
# test given interval list is symmetric
flip_list = [(stop + start - b, stop + start - a) for a, b in reversed(intv_list)]
for i1, i2 in zip(intv_list, flip_list):
assert i1[0] == i2[0] and i1[1] == i2[1]
def check_props(fill_list, space_list, num_diff_sp1, num_diff_sp2, n, tot_intv, inc_sp, sp,
eq_sp_parity, num_diff_sp_max, num_fill, fill_first, start, stop, n_flen_max, sp_edge_tweak=False):
# check num_diff_sp is the same
assert num_diff_sp1 == num_diff_sp2
if n % 2 == eq_sp_parity and not sp_edge_tweak:
# check all spaces are the same
assert num_diff_sp1 == 0
else:
# check num_diff_sp is less than or equal to 1
assert num_diff_sp1 <= num_diff_sp_max
# test we get correct number of fill
assert len(fill_list) == num_fill
# test fill and space are disjoint and union is correct
if fill_first:
check_disjoint_union(fill_list, space_list, start, stop)
else:
check_disjoint_union(space_list, fill_list, start, stop)
# check symmetry
check_symmetric(fill_list, tot_intv[0], tot_intv[1])
check_symmetric(space_list, tot_intv[0], tot_intv[1])
# check fill has only two lengths, and they differ by 1
len_list = sorted(set((b - a) for a, b in fill_list))
assert len(len_list) <= n_flen_max
assert (len_list[-1] - len_list[0]) <= n_flen_max - 1
if space_list:
# check space has only two lengths, and they differ by 1
len_list = sorted(set((b - a) for a, b in space_list))
assert len(len_list) <= (2 if num_diff_sp1 > 0 else 1)
assert (len_list[-1] - len_list[0]) <= 1
# check that space is the right values
if len(len_list) == 1:
# if only one space, check that it is sp + inc only if num_diff_sp > 0
if num_diff_sp1 > 0:
sp_correct = sp + 1 if inc_sp else sp - 1
else:
sp_correct = sp
assert len_list[0] == sp_correct
else:
# check it has space sp and sp + inc_sp
if inc_sp:
assert len_list[0] == sp
else:
assert len_list[-1] == sp
def test_fill_symmetric_non_cyclic():
# test fill symmetric for non-cyclic
sp_list = [3, 4, 5]
inc_sp_list = [True, False]
offset_list = [0, 4, 7]
foe_list = [True, False]
area_max = 50
for sp, inc_sp, offset, foe in product(sp_list, inc_sp_list, offset_list, foe_list):
for area in range(sp + 1, area_max + 1):
tot_intv = offset, offset + area
for nfill in range(1, area - sp + 1):
nsp = nfill - 1 if foe else nfill + 1
# compute minimum possible footprint
if nfill % 2 == 1 or inc_sp:
# minimum possible footprint
min_footprint = nfill * 1 + nsp * sp
else:
# if we have even fill and we can decrease space, then we can decrease middle space by 1
min_footprint = nfill * 1 + nsp * sp - 1
if min_footprint > area:
# test exception when drawing with no solution
# we have no solution when minimum possible footprint > area
with pytest.raises(ValueError):
fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=False, fill_on_edge=foe, cyclic=False)
with pytest.raises(ValueError):
fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=True, fill_on_edge=foe, cyclic=False)
else:
# get fill and space list
fill_list, num_diff_sp1 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=False, fill_on_edge=foe, cyclic=False)
space_list, num_diff_sp2 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=True, fill_on_edge=foe, cyclic=False)
check_props(fill_list, space_list, num_diff_sp1, num_diff_sp2, nfill, tot_intv, inc_sp, sp,
1, 1, nfill, foe, tot_intv[0], tot_intv[1], 2)
def test_fill_symmetric_cyclic_edge_fill():
# test fill symmetric for cyclic, fill on edge
sp_list = [3, 4, 5]
inc_sp_list = [True, False]
offset_list = [0, 4, 7]
area_max = 50
for sp, inc_sp, offset in product(sp_list, inc_sp_list, offset_list):
for area in range(sp + 1, area_max + 1):
tot_intv = offset, offset + area
for nfill in range(1, area - sp + 1):
nsp = nfill
if nfill % 2 == 0 or inc_sp:
# minimum possible footprint. Edge fill block must be even (hence the + 1)
min_footprint = nfill * 1 + 1 + nsp * sp
else:
# if we have odd fill and we can decrease space, then we can decrease middle space by 1
min_footprint = nfill * 1 + 1 + nsp * sp - 1
if min_footprint > area:
# test exception when drawing with no solution
# we have no solution when minimum possible footprint > area
with pytest.raises(ValueError):
fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=False, fill_on_edge=True, cyclic=True)
with pytest.raises(ValueError):
fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=True, fill_on_edge=True, cyclic=True)
else:
# get fill and space list
fill_list, num_diff_sp1 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=False, fill_on_edge=True, cyclic=True)
space_list, num_diff_sp2 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=True, fill_on_edge=True, cyclic=True)
# test boundary fills centers on edge
sintv, eintv = fill_list[0], fill_list[-1]
assert (sintv[1] + sintv[0]) % 2 == 0 and (eintv[1] + eintv[0]) % 2 == 0
assert (sintv[1] + sintv[0]) // 2 == tot_intv[0] and (eintv[1] + eintv[0]) // 2 == tot_intv[1]
# test other properties
check_props(fill_list, space_list, num_diff_sp1, num_diff_sp2, nfill, tot_intv, inc_sp, sp,
0, 1, nfill + 1, True, sintv[0], eintv[1], 3)
def test_fill_symmetric_cyclic_edge_space():
# test fill symmetric for cyclic, space on edge
sp_list = [3, 4, 5]
inc_sp_list = [True, False]
offset_list = [0, 4, 7]
area_max = 50
for sp, inc_sp, offset in product(sp_list, inc_sp_list, offset_list):
for area in range(sp + 1, area_max + 1):
tot_intv = offset, offset + area
for nfill in range(1, area - sp + 1):
nsp = nfill
adj_sp = 1 if inc_sp else -1
sp_edge_tweak = sp % 2 == 1
if sp_edge_tweak:
# minimum possible footprint. Edge space block must be even (hence the + adj_sp)
min_footprint = nfill * 1 + nsp * sp + adj_sp
else:
min_footprint = nfill * 1 + nsp * sp
if nfill % 2 == 0 and not inc_sp:
# if we have middle space block, we can subtract one more from middle.
min_footprint -= 1
if min_footprint > area:
# test exception when drawing with no solution
# we have no solution when minimum possible footprint > area
with pytest.raises(ValueError):
fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=False, fill_on_edge=False, cyclic=True)
print(area, nfill, sp, inc_sp)
with pytest.raises(ValueError):
fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=True, fill_on_edge=False, cyclic=True)
else:
# get fill and space list
fill_list, num_diff_sp1 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=False, fill_on_edge=False, cyclic=True)
space_list, num_diff_sp2 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,
invert=True, fill_on_edge=False, cyclic=True)
# test boundary space centers on edge
sintv, eintv = space_list[0], space_list[-1]
assert (sintv[1] + sintv[0]) % 2 == 0 and (eintv[1] + eintv[0]) % 2 == 0
assert (sintv[1] + sintv[0]) // 2 == tot_intv[0] and (eintv[1] + eintv[0]) // 2 == tot_intv[1]
# test other properties
check_props(fill_list, space_list, num_diff_sp1, num_diff_sp2, nfill, tot_intv, inc_sp, sp,
1, 2, nfill, False, sintv[0], eintv[1], 2, sp_edge_tweak)