code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""description of revision
Revision ID: 38462bfd9dc
Revises: 544673ac99ab
Create Date: 2015-11-06 14:26:02.305436
"""
# revision identifiers, used by Alembic.
revision = '38462bfd9dc'
down_revision = '544673ac99ab'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('fortinet_firewall_vips')
op.drop_table('fortinet_firewall_policies')
op.drop_table('fortinet_ml2_subnets')
op.drop_table('fortinet_firewall_ippools')
op.drop_table('fortinet_static_routers')
op.drop_table('fortinet_interface_subips')
op.drop_table('fortinet_floatingip_allocations')
op.drop_table('fortinet_vlink_ip_allocations')
op.drop_table('fortinet_vlink_vlan_allocations')
op.drop_table('fortinet_vdom_vlinks')
op.drop_table('fortinet_ml2_namespaces')
op.drop_table('fortinet_firewall_addresses')
op.drop_table('fortinet_ml2_reservedips')
op.drop_table('fortinet_interfaces')
### end Alembic commands ###
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('fortinet_interfaces',
sa.Column('name', mysql.VARCHAR(length=36), server_default=sa.text(u"''"), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('vlanid', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('interface', mysql.VARCHAR(length=11), nullable=True),
sa.Column('type', mysql.VARCHAR(length=32), server_default=sa.text(u"'vlan'"), nullable=True),
sa.Column('ip', mysql.VARCHAR(length=32), nullable=True),
sa.Column('secondary_ip', mysql.VARCHAR(length=11), server_default=sa.text(u"'enable'"), nullable=True),
sa.Column('alias', mysql.VARCHAR(length=32), nullable=True),
sa.PrimaryKeyConstraint('name'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_ml2_reservedips',
sa.Column('port_id', mysql.VARCHAR(length=36), nullable=False),
sa.Column('subnet_id', mysql.VARCHAR(length=36), nullable=False),
sa.Column('mac', mysql.VARCHAR(length=32), nullable=False),
sa.Column('ip', mysql.VARCHAR(length=32), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('edit_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('port_id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_firewall_addresses',
sa.Column('name', mysql.VARCHAR(length=36), server_default=sa.text(u"''"), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('subnet', mysql.VARCHAR(length=32), nullable=True),
sa.Column('associated_interface', mysql.VARCHAR(length=11), nullable=True),
sa.Column('group', mysql.VARCHAR(length=32), nullable=True),
sa.PrimaryKeyConstraint('name', 'vdom'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_ml2_namespaces',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('tenant_id', mysql.VARCHAR(length=36), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.PrimaryKeyConstraint('id', 'tenant_id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_vdom_vlinks',
sa.Column('name', mysql.VARCHAR(length=11)),
sa.Column('vdom', mysql.VARCHAR(length=11)),
sa.PrimaryKeyConstraint('name', 'vdom'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_vlink_vlan_allocations',
sa.Column('id', mysql.VARCHAR(length=36), server_default=sa.text(u"''"), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('inf_name_int_vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('inf_name_ext_vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('vlanid', mysql.INTEGER(display_width=11)),
sa.Column('allocated', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_vlink_ip_allocations',
sa.Column('vlink_ip_subnet', mysql.VARCHAR(length=32), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('vlink_id', mysql.VARCHAR(length=36), nullable=True),
sa.Column('allocated', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['vlink_id'], ['fortinet_vlink_vlan_allocations.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('vlink_ip_subnet'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_floatingip_allocations',
sa.Column('ip_subnet', mysql.VARCHAR(length=32), server_default=sa.text(u"''"), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('floating_ip_address', mysql.VARCHAR(length=36), nullable=True),
sa.Column('vip_name', mysql.VARCHAR(length=50), nullable=True),
sa.Column('allocated', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('ip_subnet'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_interface_subips',
sa.Column('ip', mysql.VARCHAR(length=32), server_default=sa.text(u"''"), nullable=False),
sa.Column('name', mysql.VARCHAR(length=11), nullable=True),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.PrimaryKeyConstraint('ip'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_static_routers',
sa.Column('id', mysql.VARCHAR(length=36), server_default=sa.text(u"''"), nullable=False),
sa.Column('subnet_id', mysql.VARCHAR(length=36), nullable=True),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('dst', mysql.VARCHAR(length=32), nullable=True),
sa.Column('device', mysql.VARCHAR(length=32), nullable=True),
sa.Column('gateway', mysql.VARCHAR(length=32), nullable=True),
sa.Column('edit_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_firewall_ippools',
sa.Column('name', mysql.VARCHAR(length=36), server_default=sa.text(u"''"), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('startip', mysql.VARCHAR(length=32), nullable=True),
sa.Column('endip', mysql.VARCHAR(length=32), nullable=True),
sa.Column('type', mysql.VARCHAR(length=32), server_default=sa.text(u"'one-to-one'"), nullable=True),
sa.Column('comments', mysql.VARCHAR(length=32), nullable=True),
sa.PrimaryKeyConstraint('name'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_ml2_subnets',
sa.Column('subnet_id', mysql.VARCHAR(length=36), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('interface', mysql.VARCHAR(length=32), nullable=True),
sa.Column('gateway', mysql.VARCHAR(length=32), nullable=True),
sa.Column('netmask', mysql.VARCHAR(length=32), nullable=True),
sa.Column('start_ip', mysql.VARCHAR(length=32), nullable=True),
sa.Column('end_ip', mysql.VARCHAR(length=32), nullable=True),
sa.Column('edit_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('subnet_id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_firewall_policies',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=11), nullable=True),
sa.Column('srcintf', mysql.VARCHAR(length=11), nullable=True),
sa.Column('dstintf', mysql.VARCHAR(length=11), nullable=True),
sa.Column('srcaddr', mysql.VARCHAR(length=32), server_default=sa.text(u"'all'"), nullable=True),
sa.Column('dstaddr', mysql.VARCHAR(length=32), server_default=sa.text(u"'all'"), nullable=True),
sa.Column('poolname', mysql.VARCHAR(length=32), nullable=True),
sa.Column('nat', mysql.VARCHAR(length=7), server_default=sa.text(u"'disable'"), nullable=True),
sa.Column('edit_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('fortinet_firewall_vips',
sa.Column('name', mysql.VARCHAR(length=36), nullable=False),
sa.Column('vdom', mysql.VARCHAR(length=32), nullable=False),
sa.Column('extip', mysql.VARCHAR(length=32), nullable=False),
sa.Column('extintf', mysql.VARCHAR(length=32), nullable=False),
sa.Column('mappedip', mysql.VARCHAR(length=32), nullable=True),
sa.PrimaryKeyConstraint('name', 'vdom'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
### end Alembic commands ###
| samsu/neutron | db/migration/alembic_migrations/versions/38462bfd9dc_fortinet_plugin_database.py | Python | apache-2.0 | 9,904 |
#!/usr/bin/env python3
#
# Copyright 2014 Ryan Peck
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast functions for gathering info on a group of IPv4 or IPv6 Networks.
Library contains functions used to learn details and generalize about a list of
IPv4 and IPv6 addresses and networks.
Based almost exclusively on the capbilities of the ipaddress module.
"""
import ipaddress
from collections import defaultdict
from itertools import combinations
__version__ = '0.0.5'
class _BaseGroup:
"""A generic group of IP Addresses/Networks
This class will containt version indepenent methods for grouping.
"""
def __init__(self, ip_objs, net_bits=None, t=None, cache=True):
self.IPVersion = t
self.addrs = self._listify_params(ip_objs)
if cache:
self.addrs_cache = self.addrs.copy()
self.net_bits = net_bits
if net_bits:
self.group = self._group_IPs(self.net_bits)
def reGroup(self, bits):
"""Regroup the IP addresses according to a new CIDR Prefix"""
self.old_group = self.group
self.addrs = self.addrs_cache
new_group = self._group_IPs(bits)
self.group = dict(new_group)
def _group_IPs(self, bits):
""" Group IPs by the bits that match """
self.super_nets = set([i.supernet(new_prefix=bits)
for i in self.addrs])
ip_objs = self.addrs
group = defaultdict(int)
while ip_objs != []:
n = ip_objs.pop()
for x in self.super_nets:
if x.overlaps(n):
group[str(x)] += 1
break
# Return it to a normal dictionary
return dict(group)
def _listify_params(self, args):
"""
Create a list of IP Network Objects from parameters, must be either
IPv4 or IPv6...
"""
assert(self._validate_ips_param(args))
if isinstance(args, str):
args = [ipaddress.ip_network(args, strict=False)]
new_args = []
for i in args:
n = ipaddress.ip_network(i, strict=False)
# If the IP Type is unset, use whatever comes along first
if self.IPVersion is not None:
assert(isinstance(n, self.IPVersion))
else:
self.IPVersion == type(n)
new_args.append(n)
return new_args
# TODO Write tests for this
def _validate_ips_param(self, ips):
"""
Validate that the parameters passed are types we accept.
"""
# Acceptable inputs
assert(isinstance(ips, (str, list, self.IPVersion)))
# Unpack a list
if isinstance(ips, list):
for i in ips:
assert(isinstance(i, (str, ipaddress._IPAddressBase)))
if isinstance(i, str):
assert(self._validate_IPNetwork_str(i))
return True
# TODO Write tests for this
# Should use ipaddress.ip_network here
def _validate_IPNetwork_str(self, string):
""" Validate that a valid IP Network string was passed """
if isinstance(string, str):
temp = ipaddress.ip_network(string, strict=False)
del temp
return True
def _overlapping_bits(self, ips):
overlapping_bit = False
# Networks that contain others.
master_networks = set()
two_pair_combinations = combinations(ips, 2)
for a, b in two_pair_combinations:
if a.prefixlen == b.prefixlen:
if a == b:
master_networks.add(a)
elif a.prefixlen < b.prefixlen:
if a.overlaps(b):
master_networks.add(a)
else:
if b.overlaps(a):
master_networks.add(b)
# Check if there is any overlap in master_networks
for a, b in combinations(master_networks, 2):
if a.overlaps(b):
overlapping_bit = True
break
if overlapping_bit:
return self._overlapping_bits(master_networks)
else:
return master_networks
def totalAddresses(self, ip_objs):
""" Returns the number of total unique addresses in a list of
networks """
ips = self._listify_params(ip_objs)
total = 0
overlapping_bit = False
# If networks overlap - handle differently
for a, b in combinations(ips, 2):
if a.overlaps(b):
overlapping_bit = True
break
if overlapping_bit:
ips = self._overlapping_bits(ips)
for i in ips:
total += i.num_addresses
return total
class IPv4Group(_BaseGroup):
"""Group of IPv4 Addresses"""
def __init__(self, ip_objs, net_bits=24):
_BaseGroup.__init__(self, ip_objs, net_bits, ipaddress._BaseV4)
class IPv6Group(_BaseGroup):
"""Group of IPv6 Addresses"""
def __init__(self, ip_objs, net_bits=48):
_BaseGroup.__init__(self, ip_objs, net_bits, ipaddress._BaseV6)
def totalAddresses(ips):
""" function for getting total addresses """
i = _BaseGroup(ips)
return i.totalAddresses(ips)
| RyPeck/python-ipgroup | ipgroup.py | Python | apache-2.0 | 5,777 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class timers(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Timers relating to OSPFv2 on the interface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "timers"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
"interface",
"timers",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers/config (container)
YANG Description: Configuration parameters for OSPFv2 timers on the
interface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters for OSPFv2 timers on the
interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers/state (container)
YANG Description: Operational state parameters for OSPFv2 timers on
the interface
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters for OSPFv2 timers on
the interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class timers(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Timers relating to OSPFv2 on the interface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "timers"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
"interface",
"timers",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers/config (container)
YANG Description: Configuration parameters for OSPFv2 timers on the
interface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters for OSPFv2 timers on the
interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers/state (container)
YANG Description: Operational state parameters for OSPFv2 timers on
the interface
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters for OSPFv2 timers on
the interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/timers/__init__.py | Python | apache-2.0 | 18,500 |
#/***********************************************************************
# * Licensed Materials - Property of IBM
# *
# * IBM SPSS Products: Statistics Common
# *
# * (C) Copyright IBM Corp. 1989, 2020
# *
# * US Government Users Restricted Rights - Use, duplication or disclosure
# * restricted by GSA ADP Schedule Contract with IBM Corp.
# ************************************************************************/
# post parameters for script
"""This module provides a parameter passing mechanism for Python scripts run with the SCRIPT command via a PROGRAM
and a mechanism for the script to return a value.
The PROGRAM calls the runscript function with a parameter dictionary and script name.
The script call the getscriptparams function to retrieve a parameter dictionary"""
__version__ = '1.0.2'
__author__ = 'SPSS, JKP'
# history
#22-Oct-2008 Change mmap setup to support *nix os's.
import mmap, pickle, os, tempfile
import spss, spssaux
if os.sys.platform == "darwin":
tempfile.tempdir="/tmp"
def runscript(scriptname, params={}):
"""Construct a parameter dictionary and run a Python script.
scriptname is the path to run.
params is a Python dictionary of parameter names and values.
The total size of the parameter dictionary is limited to 4K (after pickling).
This function returns a dictionary of values set by the script via setreturnvalue.
If the script sets no return value, the result is an empty dictionary."""
fnparams = tempfile.gettempdir() + os.sep + "__SCRIPT__"
fnreturn = tempfile.gettempdir() + os.sep + "__SCRIPTRETURN__"
f = open(fnparams, "w+")
# ensure file size is 4096 for *nix os's.
f.write(1024*"0000")
f.flush()
shmem = mmap.mmap(f.fileno(), 4096, access=mmap.ACCESS_WRITE)
shmem.write(pickle.dumps(params))
f.close()
try:
os.remove(fnreturn) # ensure that no stale returns file exists
except:
pass
###import wingdbstub
spss.Submit("SCRIPT " + spssaux._smartquote(scriptname))
shmem.close()
# The _SYNC command is required in order to ensure that the script has completed
spss.Submit("_SYNC")
# The parameter file will be removed by the script if it calls getscriptparam, but
# the following code will clean up in case the script doesn't make that call.
try:
os.remove(fnparams)
except:
pass
# get the return value, if any
###import wingdbstub
try:
f = open(fnreturn, "r")
shmem = mmap.mmap(f.fileno(), 4096, access=mmap.ACCESS_READ)
ret = pickle.loads(shmem.read(4096))
shmem.close()
f.close()
os.remove(fnreturn)
except:
ret = {}
return ret
def getscriptparams():
"""Return the script parameters, if any.
The parameters are assumed to be set by the runscript function.
The return value is a dictionary of parameter names and values.
The parameter set is read-once. Calling this function again will return
an empty dictionary.
If no parameters were set, the return value is an empty dictionary.
"""
fnparams = tempfile.gettempdir() + os.sep + "__SCRIPT__"
fnreturn = tempfile.gettempdir() + os.sep + "__SCRIPTRETURN__"
try:
f = open(fnparams, "r")
shmem = mmap.mmap(f.fileno(), 4096, access=mmap.ACCESS_READ)
ps = shmem.read(4096)
try:
f.close()
shmem.close()
os.remove(fnparams)
except:
pass
try:
os.remove(fnreturn)
except:
pass
except:
return {}
if ord(ps[0]) == 0:
return {}
d =pickle.loads(ps)
return d
def setreturnvalue(returns):
"""Create a dictionary of return values for use by the program that invoked the script.
returns is a dictionary to be made available to the calling program.
If no return value is set, retrieving it will produce an empty dictionary."""
fnreturn = tempfile.gettempdir() + os.sep + "__SCRIPTRETURN__"
f = file(fnreturn, "w+")
# ensure file size is 4096 for *nix os's.
f.write(1024*"0000")
f.flush()
shmem = mmap.mmap(f.fileno(), 4096, access=mmap.ACCESS_WRITE)
shmem.write(pickle.dumps(returns))
shmem.close()
f.close() | IBMPredictiveAnalytics/SCRIPTEX | src/scriptwparams.py | Python | apache-2.0 | 4,446 |
# -*- coding: utf-8 -*-
'''
Manage ini files
================
:maintainer: <akilesh1597@gmail.com>
:maturity: new
:depends: re
:platform: all
use section as DEFAULT_IMPLICIT if your ini file does not have any section
for example /etc/sysctl.conf
'''
__virtualname__ = 'ini'
def __virtual__():
'''
Only load if the mysql module is available
'''
return __virtualname__ if 'ini.set_option' in __salt__ else False
def options_present(name, sections=None):
'''
.. code-block:: yaml
/home/saltminion/api-paste.ini:
ini.options_present:
- sections:
test:
testkey: 'testval'
secondoption: 'secondvalue'
test1:
testkey1: 'testval121'
options present in file and not specified in sections
dict will be untouched
changes dict will contain the list of changes made
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'No anomaly detected'
}
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('ini file {0} shall be validated for presence of '
'given options under their respective '
'sections').format(name)
return ret
for section in sections or {}:
for key in sections[section]:
current_value = __salt__['ini.get_option'](name,
section,
key)
# Test if the change is necessary
if current_value == str(sections[section][key]):
continue
ret['changes'] = __salt__['ini.set_option'](name,
sections)
if 'error' in ret['changes']:
ret['result'] = False
ret['comment'] = 'Errors encountered. {0}'.\
format(ret['changes'])
ret['changes'] = {}
else:
ret['comment'] = 'Changes take effect'
return ret
def options_absent(name, sections=None):
'''
.. code-block:: yaml
/home/saltminion/api-paste.ini:
ini.options_present:
- sections:
test:
- testkey
- secondoption
test1:
- testkey1
options present in file and not specified in sections
dict will be untouched
changes dict will contain the list of changes made
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'No anomaly detected'
}
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('ini file {0} shall be validated for absence of '
'given options under their respective '
'sections').format(name)
return ret
for section in sections or {}:
for key in sections[section]:
current_value = __salt__['ini.remove_option'](name,
section,
key)
if not current_value:
continue
if section not in ret['changes']:
ret['changes'].update({section: {}})
ret['changes'][section].update({key: {'before': current_value,
'after': None}})
ret['comment'] = 'Changes take effect'
return ret
def sections_present(name, sections=None):
'''
.. code-block:: yaml
/home/saltminion/api-paste.ini:
ini.sections_present:
- sections:
test:
testkey: testval
secondoption: secondvalue
test1:
testkey1: 'testval121'
options present in file and not specified in sections will be deleted
changes dict will contain the sections that changed
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'No anomaly detected'
}
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('ini file {0} shall be validated for '
'presence of given sections with the '
'exact contents').format(name)
return ret
for section in sections or {}:
cur_section = __salt__['ini.get_section'](name, section)
if _same(cur_section, sections[section]):
continue
__salt__['ini.remove_section'](name, section)
changes = __salt__['ini.set_option'](name, {section:
sections[section]},
summary=False)
if 'error' in changes:
ret['result'] = False
ret['changes'] = 'Errors encountered'
return ret
ret['changes'][section] = {'before': {section: cur_section},
'after': changes['changes']}
ret['comment'] = 'Changes take effect'
return ret
def sections_absent(name, sections=None):
'''
.. code-block:: yaml
/home/saltminion/api-paste.ini:
ini.sections_absent:
- sections:
- test
- test1
options present in file and not specified in sections will be deleted
changes dict will contain the sections that changed
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'No anomaly detected'
}
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('ini file {0} shall be validated for absence of '
'given sections').format(name)
return ret
for section in sections or []:
cur_section = __salt__['ini.remove_section'](name, section)
if not cur_section:
continue
ret['changes'][section] = {'before': cur_section,
'after': None}
ret['comment'] = 'Changes take effect'
return ret
def _same(dict1, dict2):
diff = _DictDiffer(dict1, dict2)
return not (diff.added() or diff.removed() or diff.changed())
class _DictDiffer(object):
def __init__(self, current_dict, past_dict):
self.current_dict = current_dict
self.past_dict = past_dict
self.set_current = set(current_dict)
self.set_past = set(past_dict)
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if
self.past_dict[o] != self.current_dict[o])
| smallyear/linuxLearn | salt/salt/states/ini_manage.py | Python | apache-2.0 | 7,011 |
import os, sys, subprocess, shutil
import unittest
sys.path.append(os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..', 'bench')))
from vard import Client
import time
VARDLOG = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'vardlog.native')
class TestVard(unittest.TestCase):
client = None
processes = None
def startProcesses(self):
self.processes = []
for i in range(3):
port = 8000 + i
args = [VARDLOG,
'-dbpath', 'db-%d' % i,
'-port', '%d' % port,
'-node', '0,localhost:9000',
'-node', '1,localhost:9001',
'-node', '2,localhost:9002',
'-me', '%d' % i]
FNULL = open(os.devnull, 'w')
proc = subprocess.Popen(args, stdout=FNULL, stderr=subprocess.STDOUT, close_fds=True)
self.processes.append(proc)
time.sleep(1)
def connectClient(self):
cluster = [('localhost', 8000),
('localhost', 8001),
('localhost', 8002)]
host, port = Client.find_leader(cluster)
self.client = Client(host, port)
def terminateProcesses(self):
for i in range(3):
self.processes[i].terminate()
self.client = None
self.processes = None
def removeProcessDirs(self):
for i in range(3):
shutil.rmtree('db-%d' % i)
def setUp(self):
"""Start up a cluster"""
self.startProcesses()
self.connectClient()
def tearDown(self):
self.terminateProcesses()
self.removeProcessDirs()
def test_put_get(self):
self.client.put('answer', '42')
self.assertEqual(self.client.get('answer'), '42')
def test_crash(self):
self.client.put('answer', '42')
self.client.put('plse', 'lab')
self.client.put('average', 'joe')
self.terminateProcesses()
self.startProcesses()
self.connectClient()
self.assertEqual(self.client.get('answer'), '42')
self.assertEqual(self.client.get('plse'), 'lab')
self.assertEqual(self.client.get('average'), 'joe')
def test_put_delete_get(self):
self.client.put('answer', '42')
self.client.delete('answer')
self.assertEqual(self.client.get('answer'), None)
if __name__ == '__main__':
unittest.main()
| uwplse/verdi-raft | extraction/vard-log/test/integration.py | Python | bsd-2-clause | 2,450 |
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from u2flib_host import u2f, exc, __version__
from u2flib_host.constants import APDU_USE_NOT_SATISFIED
from u2flib_host.utils import u2str
from u2flib_host.yubicommon.compat import text_type
import time
import json
import argparse
import sys
def register(devices, params, facet):
"""
Interactively registers a single U2F device, given the RegistrationRequest.
"""
for device in devices[:]:
try:
device.open()
except:
devices.remove(device)
sys.stderr.write('\nTouch the U2F device you wish to register...\n')
try:
while devices:
removed = []
for device in devices:
try:
return u2f.register(device, params, facet)
except exc.APDUError as e:
if e.code == APDU_USE_NOT_SATISFIED:
pass
else:
removed.append(device)
except exc.DeviceError:
removed.append(device)
devices = [d for d in devices if d not in removed]
for d in removed:
d.close()
time.sleep(0.25)
finally:
for device in devices:
device.close()
sys.stderr.write('\nUnable to register with any U2F device.\n')
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser(
description="Registers a U2F device.\n"
"Takes a JSON formatted RegisterRequest object on stdin, and returns "
"the resulting RegistrationResponse on stdout.",
add_help=True
)
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('facet', help='the facet for registration')
parser.add_argument('-i', '--infile', help='specify a file to read '
'RegistrationRequest from, instead of stdin')
parser.add_argument('-o', '--outfile', help='specify a file to write '
'the RegistrationResponse to, instead of stdout')
parser.add_argument('-s', '--soft', help='Specify a soft U2F device file '
'to use')
return parser.parse_args()
def main():
args = parse_args()
facet = text_type(args.facet)
if args.infile:
with open(args.infile, 'r') as f:
data = f.read()
else:
if sys.stdin.isatty():
sys.stderr.write('Enter RegistrationRequest JSON data...\n')
data = sys.stdin.read()
params = json.loads(data)
if args.soft:
from u2flib_host.soft import SoftU2FDevice
devices = [SoftU2FDevice(args.soft)]
else:
devices = u2f.list_devices()
result = register(devices, params, facet)
if args.outfile:
with open(args.outfile, 'w') as f:
json.dump(result, f)
sys.stderr.write('Output written to %s\n' % args.outfile)
else:
sys.stderr.write('\n---Result---\n')
print(json.dumps(result))
if __name__ == '__main__':
main()
| Yubico/python-u2flib-host | u2flib_host/register.py | Python | bsd-2-clause | 4,482 |
"""
Utility classes
---------------
"""
from collections import namedtuple
from collections.abc import Set
__all__ = ['NameTitle', 'LabeledEnum', 'InspectableSet', 'classmethodproperty']
NameTitle = namedtuple('NameTitle', ['name', 'title'])
class _LabeledEnumMeta(type):
"""Construct labeled enumeration"""
def __new__(cls, name, bases, attrs, **kwargs):
labels = {}
names = {}
def pop_name_by_value(value):
for k, v in list(names.items()):
if v == value:
names.pop(k)
return k
for key, value in tuple(attrs.items()):
if key != '__order__' and isinstance(value, tuple):
# value = tuple of actual value (0), label/name (1), optional title (2)
if len(value) == 2:
labels[value[0]] = value[1]
attrs[key] = names[key] = value[0]
elif len(value) == 3:
labels[value[0]] = NameTitle(value[1], value[2])
attrs[key] = names[key] = value[0]
else: # pragma: no cover
raise AttributeError(f"Unprocessed attribute {key}")
elif key != '__order__' and isinstance(value, set):
# value = set of other unprocessed values
attrs[key] = names[key] = {
v[0] if isinstance(v, tuple) else v for v in value
}
if '__order__' in attrs:
ordered_labels = {}
ordered_names = {}
for value in attrs['__order__']:
ordered_labels[value[0]] = labels.pop(value[0])
attr_name = pop_name_by_value(value[0])
if attr_name is not None:
ordered_names[attr_name] = value[0]
for (
key,
value,
) in (
labels.items()
): # Left over items after processing the list in __order__
ordered_labels[key] = value
attr_name = pop_name_by_value(value)
if attr_name is not None:
ordered_names[attr_name] = value
ordered_names.update(names) # Left over names that don't have a label
else: # This enum doesn't care about ordering, or is using Py3 with __prepare__
ordered_labels = labels
ordered_names = names
attrs['__labels__'] = ordered_labels
attrs['__names__'] = ordered_names
return type.__new__(cls, name, bases, attrs)
def __getitem__(cls, key):
return cls.__labels__[key]
def __contains__(cls, key):
return key in cls.__labels__
class LabeledEnum(metaclass=_LabeledEnumMeta):
"""
Labeled enumerations. Declarate an enumeration with values and labels
(for use in UI)::
>>> class MY_ENUM(LabeledEnum):
... FIRST = (1, "First")
... THIRD = (3, "Third")
... SECOND = (2, "Second")
:class:`LabeledEnum` will convert any attribute that is a 2-tuple into
a value and label pair. Access values as direct attributes of the enumeration::
>>> MY_ENUM.FIRST
1
>>> MY_ENUM.SECOND
2
>>> MY_ENUM.THIRD
3
Access labels via dictionary lookup on the enumeration::
>>> MY_ENUM[MY_ENUM.FIRST]
'First'
>>> MY_ENUM[2]
'Second'
>>> MY_ENUM.get(3)
'Third'
>>> MY_ENUM.get(4) is None
True
Retrieve a full list of values and labels with ``.items()``. Definition order is
preserved in Python 3.x, but not in 2.x::
>>> sorted(MY_ENUM.items())
[(1, 'First'), (2, 'Second'), (3, 'Third')]
>>> sorted(MY_ENUM.keys())
[1, 2, 3]
>>> sorted(MY_ENUM.values())
['First', 'Second', 'Third']
However, if you really want ordering in Python 2.x, add an __order__ list.
Anything not in it will default to Python's ordering::
>>> class RSVP(LabeledEnum):
... RSVP_Y = ('Y', "Yes")
... RSVP_N = ('N', "No")
... RSVP_M = ('M', "Maybe")
... RSVP_U = ('U', "Unknown")
... RSVP_A = ('A', "Awaiting")
... __order__ = (RSVP_Y, RSVP_N, RSVP_M, RSVP_A)
>>> RSVP.items()
[('Y', 'Yes'), ('N', 'No'), ('M', 'Maybe'), ('A', 'Awaiting'), ('U', 'Unknown')]
Three value tuples are assumed to be (value, name, title) and the name and
title are converted into NameTitle(name, title)::
>>> class NAME_ENUM(LabeledEnum):
... FIRST = (1, 'first', "First")
... THIRD = (3, 'third', "Third")
... SECOND = (2, 'second', "Second")
... __order__ = (FIRST, SECOND, THIRD)
>>> NAME_ENUM.FIRST
1
>>> NAME_ENUM[NAME_ENUM.FIRST]
NameTitle(name='first', title='First')
>>> NAME_ENUM[NAME_ENUM.SECOND].name
'second'
>>> NAME_ENUM[NAME_ENUM.THIRD].title
'Third'
To make it easier to use with forms and to hide the actual values, a list of
(name, title) pairs is available::
>>> NAME_ENUM.nametitles()
[('first', 'First'), ('second', 'Second'), ('third', 'Third')]
Given a name, the value can be looked up::
>>> NAME_ENUM.value_for('first')
1
>>> NAME_ENUM.value_for('second')
2
Values can be grouped together using a set, for performing "in" operations.
These do not have labels and cannot be accessed via dictionary access::
>>> class RSVP_EXTRA(LabeledEnum):
... RSVP_Y = ('Y', "Yes")
... RSVP_N = ('N', "No")
... RSVP_M = ('M', "Maybe")
... RSVP_U = ('U', "Unknown")
... RSVP_A = ('A', "Awaiting")
... __order__ = (RSVP_Y, RSVP_N, RSVP_M, RSVP_U, RSVP_A)
... UNCERTAIN = {RSVP_M, RSVP_U, 'A'}
>>> isinstance(RSVP_EXTRA.UNCERTAIN, set)
True
>>> sorted(RSVP_EXTRA.UNCERTAIN)
['A', 'M', 'U']
>>> 'N' in RSVP_EXTRA.UNCERTAIN
False
>>> 'M' in RSVP_EXTRA.UNCERTAIN
True
>>> RSVP_EXTRA.RSVP_U in RSVP_EXTRA.UNCERTAIN
True
Labels are stored internally in a dictionary named ``__labels__``, mapping
the value to the label. Symbol names are stored in ``__names__``, mapping
name to the value. The label dictionary will only contain values processed
using the tuple syntax, which excludes grouped values, while the names
dictionary will contain both, but will exclude anything else found in the
class that could not be processed (use ``__dict__`` for everything)::
>>> list(RSVP_EXTRA.__labels__.keys())
['Y', 'N', 'M', 'U', 'A']
>>> list(RSVP_EXTRA.__names__.keys())
['RSVP_Y', 'RSVP_N', 'RSVP_M', 'RSVP_U', 'RSVP_A', 'UNCERTAIN']
"""
@classmethod
def get(cls, key, default=None):
return cls.__labels__.get(key, default)
@classmethod
def keys(cls):
return list(cls.__labels__.keys())
@classmethod
def values(cls):
return list(cls.__labels__.values())
@classmethod
def items(cls):
return list(cls.__labels__.items())
@classmethod
def value_for(cls, name):
for key, value in list(cls.__labels__.items()):
if isinstance(value, NameTitle) and value.name == name:
return key
@classmethod
def nametitles(cls):
return [(name, title) for name, title in cls.values()]
class InspectableSet(Set):
"""
Given a set, mimics a read-only dictionary where the items are keys and
have a value of ``True``, and any other key has a value of ``False``. Also
supports attribute access. Useful in templates to simplify membership
inspection::
>>> myset = InspectableSet({'member', 'other'})
>>> 'member' in myset
True
>>> 'random' in myset
False
>>> myset.member
True
>>> myset.random
False
>>> myset['member']
True
>>> myset['random']
False
>>> joinset = myset | {'added'}
>>> isinstance(joinset, InspectableSet)
True
>>> joinset = joinset | InspectableSet({'inspectable'})
>>> isinstance(joinset, InspectableSet)
True
>>> 'member' in joinset
True
>>> 'other' in joinset
True
>>> 'added' in joinset
True
>>> 'inspectable' in joinset
True
>>> emptyset = InspectableSet()
>>> len(emptyset)
0
"""
def __init__(self, members=()):
if not isinstance(members, Set):
members = set(members)
object.__setattr__(self, '_members', members)
def __repr__(self):
return f'InspectableSet({self._members!r})'
def __len__(self):
return len(self._members)
def __contains__(self, key):
return key in self._members
def __iter__(self):
yield from self._members
def __getitem__(self, key):
return key in self._members # Returns True if present, False otherwise
def __getattr__(self, attr):
return attr in self._members # Returns True if present, False otherwise
def __setattr__(self, attr, value):
raise AttributeError(attr)
class classmethodproperty: # noqa: N801
"""
Class method decorator to make class methods behave like properties::
>>> class Foo:
... @classmethodproperty
... def test(cls):
... return repr(cls)
...
Works on classes::
>>> Foo.test
"<class 'coaster.utils.classes.Foo'>"
Works on class instances::
>>> Foo().test
"<class 'coaster.utils.classes.Foo'>"
Works on subclasses too::
>>> class Bar(Foo):
... pass
...
>>> Bar.test
"<class 'coaster.utils.classes.Bar'>"
>>> Bar().test
"<class 'coaster.utils.classes.Bar'>"
Due to limitations in Python's descriptor API, :class:`classmethodproperty`
can block write and delete access on an instance...
::
>>> Foo().test = 'bar'
Traceback (most recent call last):
AttributeError: test is read-only
>>> del Foo().test
Traceback (most recent call last):
AttributeError: test is read-only
...but not on the class itself::
>>> Foo.test = 'bar'
>>> Foo.test
'bar'
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, cls=None):
if cls is None:
cls = type(obj)
return self.func(cls)
def __set__(self, obj, value):
raise AttributeError(f"{self.func.__name__} is read-only")
def __delete__(self, obj):
raise AttributeError(f"{self.func.__name__} is read-only")
| hasgeek/coaster | coaster/utils/classes.py | Python | bsd-2-clause | 10,952 |
from django.conf import settings
from django.db.models import F
from geotrek.api.v2 import serializers as api_serializers, \
viewsets as api_viewsets, filters as api_filters
from geotrek.signage import models as signage_models
from geotrek.api.v2.functions import Transform
class SignageViewSet(api_viewsets.GeotrekGeometricViewset):
filter_backends = api_viewsets.GeotrekGeometricViewset.filter_backends + (api_filters.NearbyContentFilter, api_filters.UpdateOrCreateDateFilter)
serializer_class = api_serializers.SignageSerializer
queryset = signage_models.Signage.objects.existing() \
.select_related('topo_object', 'type', ) \
.annotate(geom3d_transformed=Transform(F('geom_3d'), settings.API_SRID)) \
.prefetch_related('topo_object__aggregations', 'attachments') \
.order_by('pk')
class SignageTypeViewSet(api_viewsets.GeotrekViewSet):
filter_backends = api_viewsets.GeotrekViewSet.filter_backends + (api_filters.SignageRelatedPortalFilter, )
serializer_class = api_serializers.SignageTypeSerializer
queryset = signage_models.SignageType.objects.all().order_by('pk')
class DirectionViewSet(api_viewsets.GeotrekViewSet):
filter_backends = api_viewsets.GeotrekViewSet.filter_backends
serializer_class = api_serializers.DirectionSerializer
queryset = signage_models.Direction.objects.all().order_by('pk')
class SealingViewSet(api_viewsets.GeotrekViewSet):
filter_backends = api_viewsets.GeotrekViewSet.filter_backends
serializer_class = api_serializers.SealingSerializer
queryset = signage_models.Sealing.objects.all().order_by('pk')
class ColorViewSet(api_viewsets.GeotrekViewSet):
filter_backends = api_viewsets.GeotrekViewSet.filter_backends
serializer_class = api_serializers.ColorSerializer
queryset = signage_models.Color.objects.all().order_by('pk')
class BladeTypeViewSet(api_viewsets.GeotrekViewSet):
filter_backends = api_viewsets.GeotrekViewSet.filter_backends
serializer_class = api_serializers.BladeTypeSerializer
queryset = signage_models.BladeType.objects.all().order_by('pk')
| makinacorpus/Geotrek | geotrek/api/v2/views/signage.py | Python | bsd-2-clause | 2,113 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------
class MorphologyImportError(IOError):
pass
class MorphologyExportError(IOError):
pass
class MorphologyFrameworkRegistrationError(RuntimeError):
pass
| mikehulluk/morphforge | src/morphforge/morphology/errors/__init__.py | Python | bsd-2-clause | 1,704 |
import pickle
import pytest
from praw.models import Draft, Subreddit
from ... import UnitTest
class TestDraft(UnitTest):
def test_construct_failure(self):
message = "Exactly one of `id` or `_data` must be provided."
with pytest.raises(TypeError) as excinfo:
Draft(self.reddit)
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
Draft(self.reddit, "dummy", _data={"id": "dummy"})
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
Draft(self.reddit, id="dummy", _data={"id": "dummy"})
assert str(excinfo.value) == message
def test_create_failure(self):
with pytest.raises(TypeError) as excinfo:
self.reddit.drafts.create(url="url", selftext="selftext")
assert (
str(excinfo.value) == "Exactly one of `selftext` or `url` must be provided."
)
def test_equality(self):
draft1 = Draft(self.reddit, _data={"id": "dummy1"})
draft2 = Draft(self.reddit, _data={"id": "dummy1"})
draft3 = Draft(self.reddit, _data={"id": "dummy3"})
assert draft1 == draft1
assert draft2 == draft2
assert draft3 == draft3
assert draft1 == draft2
assert draft2 != draft3
assert draft1 != draft3
assert "dummy1" == draft1
assert draft2 == "dummy1"
draft1 = Draft(
self.reddit, _data={"id": "dummy1", "body": "body1", "kind": "markdown"}
)
draft2 = Draft(
self.reddit, _data={"id": "dummy1", "body": "body1", "kind": "markdown"}
)
draft3 = Draft(
self.reddit, _data={"id": "dummy3", "body": "body2", "kind": "markdown"}
)
assert draft1 == draft1
assert draft2 == draft2
assert draft3 == draft3
assert draft1 == draft2
assert draft2 != draft3
assert draft1 != draft3
draft1 = Draft(
self.reddit, _data={"id": "dummy1", "body": "url1", "kind": "link"}
)
draft2 = Draft(
self.reddit, _data={"id": "dummy1", "body": "url1", "kind": "link"}
)
draft3 = Draft(
self.reddit, _data={"id": "dummy3", "body": "url3", "kind": "link"}
)
assert draft1 == draft1
assert draft2 == draft2
assert draft3 == draft3
assert draft1 == draft2
assert draft2 != draft3
assert draft1 != draft3
def test_hash(self):
draft1 = Draft(
self.reddit, _data={"id": "dummy1", "body": "body1", "kind": "markdown"}
)
draft2 = Draft(
self.reddit, _data={"id": "dummy1", "body": "body2", "kind": "markdown"}
)
draft3 = Draft(
self.reddit, _data={"id": "dummy3", "body": "body2", "kind": "markdown"}
)
assert hash(draft1) == hash(draft1)
assert hash(draft2) == hash(draft2)
assert hash(draft3) == hash(draft3)
assert hash(draft1) == hash(draft2)
assert hash(draft2) != hash(draft3)
assert hash(draft1) != hash(draft3)
def test_pickle(self):
draft = Draft(self.reddit, _data={"id": "dummy"})
for level in range(pickle.HIGHEST_PROTOCOL + 1):
other = pickle.loads(pickle.dumps(draft, protocol=level))
assert draft == other
def test_repr(self):
draft = Draft(self.reddit, id="draft_id")
assert repr(draft) == "Draft(id='draft_id')"
data = {"id": "draft_id", "body": "body", "kind": "markdown"}
subreddit = Subreddit(None, "subreddit")
draft = Draft(
self.reddit, _data={**data, "subreddit": subreddit, "title": None}
)
assert repr(draft) == "Draft(id='draft_id' subreddit='subreddit')"
draft = Draft(self.reddit, _data={**data, "subreddit": None, "title": "title"})
assert repr(draft) == "Draft(id='draft_id' title='title')"
draft = Draft(
self.reddit, _data={**data, "subreddit": subreddit, "title": "title"}
)
assert repr(draft) == "Draft(id='draft_id' subreddit='subreddit' title='title')"
def test_str(self):
draft = Draft(self.reddit, _data={"id": "dummy"})
assert str(draft) == "dummy"
def test_submit_failure(self):
draft = Draft(
self.reddit,
_data={
"id": "draft_id",
"body": "body",
"kind": "markdown",
"subreddit": None,
},
)
with pytest.raises(ValueError) as excinfo:
draft.submit()
assert (
str(excinfo.value)
== "`subreddit` must be set on the Draft or passed as a keyword argument."
)
| praw-dev/praw | tests/unit/models/reddit/test_draft.py | Python | bsd-2-clause | 4,823 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'button05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_button('C2', {'macro': 'my_macro',
'x_scale': 2,
'y_scale': 1.5
})
workbook.close()
self.assertExcelEqual()
| jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_button05.py | Python | bsd-2-clause | 1,181 |
import webracer
from wolis import utils
from wolis.test_case import WolisTestCase
class PruneGroupTestCase(WolisTestCase):
def test_prune_group(self):
username = 'prune1'
group_name = 'testgroup1'
self.login_and_nav()
db = utils.current.db or utils.instantiate_db(self.conf)
with db.cursor() as c:
c.execute('select user_id from phpbb_users where username=%s',
(username,))
row = c.fetchone()
assert row is not None
form = self.response.form()
elements = form.elements.mutable
doc = self.response.lxml_etree
elt = webracer.utils.xpath_first_check(doc, '//option[text()="%s"]' % group_name)
elements.set_value('group_id', elt.attrib['value'])
elements.set_value('action', 'delete')
self.post(form.computed_action, body=elements.params.list)
self.assert_successish()
assert 'Users to be pruned' in self.response.body
assert username in self.response.body
self.submit_confirm_form()
#assert 'The selected users have been deactivated successfully' in self.response.body
assert 'The selected users have been deleted successfully' in self.response.body
with db.cursor() as c:
c.execute('select user_id from phpbb_users where username=%s',
(username,))
row = c.fetchone()
assert row is None
def login_and_nav(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
start_url = '/adm/index.php'
self.get_with_sid(start_url)
self.assert_successish()
assert 'Board statistics' in self.response.body
url = self.response.urljoin(self.link_href_by_acp_tab_title('Users and Groups'))
self.get(url)
self.assert_successish()
url = self.response.urljoin(self.link_href_by_text('Prune users'))
assert 'i=acp_prune' in url
self.get(url)
if __name__ == '__main__':
import unittest
unittest.main()
| p/wolis-phpbb | tests/prune_group.py | Python | bsd-2-clause | 2,176 |
import numpy as np
import random
import json
import os
import copy
import progressbar
params = {'step': 10, # step은 한번 sample을 뽑고 몇 frame 이동 후에 뽑을지 결정합니다.
'interval': 30, # interval은 sample을 최대 몇 frame 연속으로 이어 붙일지를 결정합니다.
'threshold': 30, # sample을 만들 때 투기 pose가 threshold값 이상이라면 sample도 투기로 labeling합니다.
'posi_label': 1
}
kBasePath = "C:/Users/JM/Desktop/Data/ETRIrelated/BMVC/posetrack/"
kKeypointBasePath = os.path.join(kBasePath, "posetrack_coco_processed")
kSaveActionPath = os.path.join(kBasePath, "posetrack_action_data")
kNumKeypointTypes = 14
kOriginCoord = 0
class MakeAction():
def __init__(self, _save_dir_path, _track_root_path):
self.save_dir_path = _save_dir_path
self.track_root_path = _track_root_path
# self.ground_truth_path = _gt_path
# self.ground_truth = None
def read_track_data(self, _cur_file):
data = {}
track_data_path = self.track_root_path + '/' + _cur_file
f = open(track_data_path, 'r')
for line in f.readlines():
split_line = line.split(" ")
if not int(split_line[0]) in data.keys():
data[int(split_line[0])] = {}
data[int(split_line[0])][int(split_line[1])] = []
split_data = split_line[2:]
for i, dat in enumerate(split_data):
data[int(split_line[0])][int(split_line[2])].append(float(dat))
return data
def make_action_data(self, _file_name, pose_data, n_channel=3):
action_data = []
sample_info = []
for person_id in pose_data.keys():
cur_person = pose_data[person_id]
frame_key = list(cur_person.keys())
frame_key.sort()
# 액션을 만들만큼 충분한 pose가 있지 않은 경우
if len(frame_key) < params['interval']:
continue
start = 0
end = params['interval']
# print(frame_key)
while 1:
# print(frame_key[start])
# 액션의 끝 frame number가 존재하는 frame number 범위를 벗어나는 경우
if end >= len(frame_key):
break
if frame_key[end] != frame_key[start] + params['interval']:
start += 1
end += 1
continue
# break
# sample 정보 저장(file number, pose 시작 frame number, pose 끝 frame number
sample_info.append([_file_name, person_id, frame_key[start], frame_key[end]])
# first frame info
first_frame_neck = [cur_person[frame_key[start]][3 * 1 + 0], cur_person[frame_key[start]][3 * 1 + 1]]
right_point = [cur_person[frame_key[start]][3 * 8 + 0], cur_person[frame_key[start]][3 * 8 + 1]]
left_point = [cur_person[frame_key[start]][3 * 11 + 0], cur_person[frame_key[start]][3 * 11 + 1]]
dist1 = distance(first_frame_neck, right_point)
dist2 = distance(first_frame_neck, left_point)
first_frame_dist = (dist1 + dist2) / 2
label_check = 0
# action_data.append([])
#if n_channel == 3:
x_channel = y_channel = c_channel = []
action = []
for i in frame_key[start:end]:
# print(len(cur_person[i]))
tmp_list = np.array(copy.deepcopy(cur_person[i]))
# 첫프레임 목좌표 0,0으로 만드는 좌표계로 변환!
# print("prev:", tmp_list)
tmp_list = self.normalize_pose(tmp_list, first_frame_neck, first_frame_dist)
# print("next:", tmp_list)
if n_channel == 3:
for j in range(kNumKeypointTypes):
x_channel.append(tmp_list[3 * j + 0])
y_channel.append(tmp_list[3 * j + 1])
c_channel.append(tmp_list[3 * j + 2])
else:
action.append([])
for j in range(kNumKeypointTypes):
# action_data[-1].append(tmp_list[j])
action[-1].append(tmp_list[3 * j + 0])
action[-1].append(tmp_list[3 * j + 1])
if n_channel == 3:
x_channel = np.array(x_channel).reshape(params['interval'], kNumKeypointTypes)
y_channel = np.array(y_channel).reshape(params['interval'], kNumKeypointTypes)
c_channel = np.array(c_channel).reshape(params['interval'], kNumKeypointTypes)
action = np.dstack((x_channel, y_channel, c_channel))
# action frame동안 투기로 labeled 된 pose가 몇갠지 세는 것
if cur_person[i][-1] == 1:
label_check += 1
else:
action = np.asarray(action)
# print("shape", action.shape)
# print(action)
class_label = None
# labeled 된것이 threshold 값보다 높다면 action을 투기action으로 labeling
if label_check > params['threshold']:
# action_data[-1].append(1)
class_label = 1
else:
# action_data[-1].append(0)
class_label = 0
str_neck_x = format(first_frame_neck[0] + kOriginCoord, '4.3f')
str_neck_y = format(first_frame_neck[1] + kOriginCoord, '4.3f')
str_dist = format(first_frame_dist, '4.3f')
str_neck_x = str_neck_x.replace('.', '_')
str_neck_y = str_neck_y.replace('.', '_')
str_dist = str_dist.replace('.', '_')
save_file_name = "%s-%02d-%04d-%03d-%02d-%s-%s-%s-%d.npy" \
% (_file_name, person_id, frame_key[start], params['interval'], params['step'],
str_neck_x, str_neck_y, str_dist, class_label)
self.save_action_npy(action, save_file_name)
start += params['step']
end += params['step']
return action_data, sample_info
@staticmethod
def normalize_pose(_pose_data, _neck, norm_constant):
kXIdx = 0
kYIdx = 1
kConfidencIdx = 2
if isinstance(_neck, list):
_neck = np.array(_neck)
if isinstance(_pose_data, list):
_pose_data = np.array(_pose_data)
rescaled_origin = _neck[0:2] / norm_constant
for base_index in range(kNumKeypointTypes):
# 좌표가 (0,0) 인 것들을 가려내기 위해서 confidence 값을 사용한 것.
pos_offset = 3 * base_index
if _pose_data[pos_offset + kConfidencIdx] == 0:
continue
cur_point = _pose_data[pos_offset + kXIdx:pos_offset + kYIdx + 1]
_pose_data[pos_offset + kXIdx:pos_offset + kYIdx + 1] = \
cur_point / norm_constant - rescaled_origin + [kOriginCoord, kOriginCoord]
return _pose_data
def save_action_npy(self, _action, _save_file_name):
save_file = self.save_dir_path + "\\" + _save_file_name
np.save(save_file, _action)
def read_labeled_data(self, _file_name):
file_path = self.track_root_path + "\\" + _file_name
data = {}
f = open(file_path, 'r')
for line in f.readlines():
split_line = line.split(' ')
if not int(split_line[0]) in data.keys():
data[int(split_line[0])] = {}
data[int(split_line[0])][int(split_line[2])] = []
split_data = split_line[3:]
for i, dat in enumerate(split_data):
if len(split_data) == i + 1:
data[int(split_line[0])][int(split_line[2])].append(int(dat))
continue
data[int(split_line[0])][int(split_line[2])].append(float(dat))
return data
def run(self):
action = []
info = []
file_list = os.listdir(self.track_root_path)
num_of_file = len(file_list)
for i in progressbar.progressbar(range(num_of_file)):
file_name = file_list[i]
# file_number = int(file_name.split(".")[0])
labeled_data = self.read_labeled_data(file_name)
file_name = file_name.split(".")[0] # .replace("_", "-")
tmp_action, tmp_info = self.make_action_data(file_name, labeled_data)
if not action:
action = tmp_action
info = tmp_info
continue
action.extend(tmp_action)
info.extend(tmp_info)
return action, info
def distance(v1,v2):
return sum([(x-y)**2 for (x,y) in zip(v1,v2)])**(0.5)
if __name__ == "__main__":
action_loader = MakeAction(kSaveActionPath, kKeypointBasePath)
data, info = action_loader.run()
# print(data[0])
| neohanju/GarbageDumping | EventEncoder/make_sample.py | Python | bsd-2-clause | 9,327 |
from rex.exploit.cgc import CGCType1Exploit
class CGCType1RopExploit(CGCType1Exploit):
'''
A CGC type1 exploit object, which sets a register via Rop.
'''
def __init__(self, crash, register, reg_bitmask, ip_bitmask, ch_mem, value_var, ip_var):
'''
:param crash: a crash object which has been modified to exploit a vulnerability
:param register: the register set by the exploit
:param reg_bitmask: bitmask to apply to the register value
:param ip_bitmask: bitmask to apply to the ip value
:param ch_mem: memory representing the chain in memory
:param value_var: claripy variable representing the value to set
:param ip_var: claripy variable representing the ip to set
'''
super(CGCType1RopExploit, self).__init__(crash, register,
bypasses_nx=True, bypasses_aslr=True,
reg_bitmask=reg_bitmask, ip_bitmask=ip_bitmask)
self.method_name = 'rop'
self._mem = ch_mem
self._arg_vars = [value_var, ip_var]
self._generate_formula()
def __str__(self):
return "<%s> rop type1" % self.register
| shellphish/rex | rex/exploit/cgc/type1/cgc_type1_rop_exploit.py | Python | bsd-2-clause | 1,157 |
#
# Incremental request parser.
#
# Author: Max Kellermann <mk@cm4all.com>
#
from __future__ import print_function
import six
import array, struct
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
from .protocol import *
import beng_proxy.translation.uri
def _parse_port(address):
if address[0] == '[':
i = address.find(']', 1)
if i < 0 or len(address) <= i + 2 or address[i + 1] != ':':
return None
port = address[i + 2:]
else:
i = address.find(':')
if i < 0 or address.find(':', i + 1) > i:
# more than one colon: IPv6 address without port
return None
port = address[i + 1:]
try:
return int(port)
except ParserError:
return None
class Request:
"""An OO wrapper for a translation request. This object is empty
when created, and is completed incrementally by calling
packetReceived() until it returns true.
Never ever access the 'args' property."""
def __init__(self):
self.protocol_version = 0
self.host = None
self.alt_host = None
self.raw_uri = None
self.args = None
self.query_string = None
self.widget_type = None
self.session = None
self.check = None
self.auth = None
self.http_auth = None
self.token_auth = None
self.auth_token = None
self.recover_session = None
self.want_full_uri = None
self.chain = None
self.chain_header = None
self.param = None
self.layout = None
self.listener_tag = None
self.local_address = None
self.local_port = None
self.remote_host = None
self.user_agent = None
self.ua_class = None
self.accept_language = None
self.authorization = None
self.status = None
self.want = None
self.file_not_found = None
self.directory_index = None
self.internal_redirect = None
self.enotdir = None
self.content_type_lookup = None
self.suffix = None
self.error_document = False
self.error_document_payload = None
self.probe_path_suffixes = None
self.probe_suffix = None
self.read_file = None
self.pool = None
self.user = None
self.login = False
self.password = None
self.service = None
self.cron = False
self.base = None
self.regex = None
def __getattr__(self, name):
if name == 'uri':
# compatibility with pre-0.7: return the unquoted URI
if self.raw_uri is None:
return None
return unquote(self.raw_uri)
else:
raise AttributeError(name)
def packetReceived(self, packet):
"""Feed a packet into this object. Returns true when the
request is finished."""
if packet.command == TRANSLATE_BEGIN:
if len(packet.payload) > 0:
# this "struct" kludge is for Python 2/3 compatibility
self.protocol_version = struct.unpack('B', packet.payload[:1])[0]
elif packet.command == TRANSLATE_END:
return True
elif packet.command == TRANSLATE_HOST:
self.host = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_ALT_HOST:
self.alt_host = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_URI:
self.raw_uri = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_ARGS:
self.args = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_QUERY_STRING:
self.query_string = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_WIDGET_TYPE:
self.widget_type = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_SESSION:
self.session = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_CHECK:
self.check = packet.payload
elif packet.command == TRANSLATE_AUTH:
self.auth = packet.payload
elif packet.command == TRANSLATE_HTTP_AUTH:
self.http_auth = packet.payload
elif packet.command == TRANSLATE_TOKEN_AUTH:
self.token_auth = packet.payload
elif packet.command == TRANSLATE_AUTH_TOKEN:
self.auth_token = packet.payload
elif packet.command == TRANSLATE_RECOVER_SESSION:
self.recover_session = packet.payload
elif packet.command == TRANSLATE_WANT_FULL_URI:
self.want_full_uri = packet.payload
elif packet.command == TRANSLATE_CHAIN:
self.chain = packet.payload
elif packet.command == TRANSLATE_CHAIN_HEADER:
self.chain_header = packet.payload
elif packet.command == TRANSLATE_PARAM:
self.param = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_LAYOUT:
self.layout = packet.payload
elif packet.command == TRANSLATE_LISTENER_TAG:
self.listener_tag = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_LOCAL_ADDRESS_STRING:
self.local_address = packet.payload.decode('ascii')
self.local_port = _parse_port(self.local_address)
elif packet.command == TRANSLATE_REMOTE_HOST:
self.remote_host = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_USER_AGENT:
self.user_agent = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_LANGUAGE:
self.accept_language = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_AUTHORIZATION:
self.authorization = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_STATUS:
if len(packet.payload) == 2:
self.status = struct.unpack('H', packet.payload)[0]
elif packet.command == TRANSLATE_WANT:
self.want = array.array('H')
if six.PY2:
self.want.fromstring(packet.payload)
else:
self.want.frombytes(packet.payload)
elif packet.command == TRANSLATE_FILE_NOT_FOUND:
self.file_not_found = packet.payload
elif packet.command == TRANSLATE_DIRECTORY_INDEX:
self.directory_index = packet.payload
elif packet.command == TRANSLATE_INTERNAL_REDIRECT:
self.internal_redirect = packet.payload
elif packet.command == TRANSLATE_ENOTDIR:
self.enotdir = packet.payload
elif packet.command == TRANSLATE_CONTENT_TYPE_LOOKUP:
self.content_type_lookup = packet.payload
elif packet.command == TRANSLATE_SUFFIX:
self.suffix = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_ERROR_DOCUMENT:
self.error_document = True
self.error_document_payload = packet.payload
elif packet.command == TRANSLATE_PROBE_PATH_SUFFIXES:
self.probe_path_suffixes = packet.payload
elif packet.command == TRANSLATE_PROBE_SUFFIX:
self.probe_suffix = packet.payload.decode('utf-8')
elif packet.command == TRANSLATE_READ_FILE:
self.read_file = packet.payload
elif packet.command == TRANSLATE_POOL:
self.pool = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_USER:
self.user = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_LOGIN:
self.login = True
elif packet.command == TRANSLATE_PASSWORD:
self.password = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_SERVICE:
self.service = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_CRON:
if packet.payload:
self.cron = packet.payload.decode('ascii')
else:
self.cron = True
elif packet.command == TRANSLATE_BASE:
self.base = packet.payload.decode('ascii')
elif packet.command == TRANSLATE_REGEX:
self.regex = packet.payload.decode('ascii')
elif packet.command != TRANSLATE_LOCAL_ADDRESS:
print("Invalid command:", packet.command)
return False
def absolute_uri(self, scheme=None, host=None, uri=None, query_string=None,
param=None):
"""Returns the absolute URI of this request. You may override
some of the attributes."""
return beng_proxy.translation.uri.absolute_uri(self, scheme=scheme,
host=host, uri=uri,
query_string=query_string,
param=param)
| CM4all/beng-proxy | python/beng_proxy/translation/request.py | Python | bsd-2-clause | 8,915 |
import sys
import os
import subprocess as sp
import numpy
from distutils.core import setup, Extension
git_version = sp.check_output('git rev-parse HEAD', shell=True)
git_changed = sp.check_output('git diff | wc -l', shell=True)
if git_changed != '0':
print("Git has changed. Please build first!")
sys.exit(0)
numpy_include = os.path.join(os.path.abspath(os.path.dirname(numpy.__file__)), 'core', 'include')
libraries = ['opencv_imgcodecs', 'opencv_imgproc', 'opencv_core', 'libjasper', 'libjpeg', 'libpng', 'libtiff', 'libwebp', 'zlib', 'boost_filesystem', 'boost_system', 'boost_python35', 'glog']
picpac = Extension('picpac',
language = 'c++',
extra_compile_args = ['-O3', '-std=c++1y', '-I/opt/cbox/include'],
include_dirs = ['/usr/local/include', 'pyboostcvconverter/include', 'json11', numpy_include],
libraries = libraries,
library_dirs = ['/usr/local/lib', '/opt/cbox/lib'],
sources = ['python-api.cpp', 'picpac.cpp', 'picpac-image.cpp', 'shapes.cpp', 'transforms.cpp', 'picpac-cv.cpp', 'json11/json11.cpp'],
depends = ['json11/json11.hpp', 'picpac.h', 'picpac-image.h', 'bachelor/bachelor.h'])
setup (name = 'picpac',
version = '0.2.2',
url = 'https://github.com/aaalgo/picpac',
author = 'Wei Dong',
author_email = 'wdong@wdong.org',
license = 'BSD',
description = 'This is a demo package',
ext_modules = [picpac],
)
| aaalgo/picpac | setup-static.py | Python | bsd-2-clause | 1,454 |
import time
sendOut("i am here to slow things down")
sendOut("i am also", __prog_name)
time.sleep(10)
sendOut("now I am done")
| AsherGlick/Olympus-Server-Management | RemoteServer/plugins/delayLoad.py | Python | bsd-2-clause | 127 |
"""
App configuration
=================
"""
from typing import Callable, List, NamedTuple, Optional
import json
from flask import Flask
from flask.sessions import SecureCookieSessionInterface
import itsdangerous
import toml
import yaml
from . import logger
from .auth import current_auth
from .views import current_view
__all__ = [
'KeyRotationWrapper',
'RotatingKeySecureCookieSessionInterface',
'Flask',
'init_app',
]
class ConfigLoader(NamedTuple):
extn: str
loader: Optional[Callable]
_additional_config = {
'dev': 'development',
'development': 'development',
'test': 'testing',
'testing': 'testing',
'prod': 'production',
'production': 'production',
}
_config_loaders = {
'py': ConfigLoader(extn='.py', loader=None),
'json': ConfigLoader(extn='.json', loader=json.load),
'toml': ConfigLoader(extn='.toml', loader=toml.load),
'yaml': ConfigLoader(extn='.yaml', loader=yaml.safe_load),
'yml': ConfigLoader(extn='.yml', loader=yaml.safe_load),
}
class KeyRotationWrapper:
"""
Wrapper to support multiple secret keys in itsdangerous.
The first secret key is used for all operations, but if it causes a BadSignature
exception, the other secret keys are tried in order.
:param cls: Signing class from itsdangerous (eg: URLSafeTimedSerializer)
:param secret_keys: List of secret keys
:param kwargs: Arguments to pass to each signer/serializer
"""
def __init__(self, cls, secret_keys, **kwargs):
"""Init key rotation wrapper."""
if isinstance(secret_keys, str):
raise ValueError("Secret keys must be a list")
self._engines = [cls(key, **kwargs) for key in secret_keys]
def __getattr__(self, attr):
"""Read a wrapped attribute."""
item = getattr(self._engines[0], attr)
return self._make_wrapper(attr) if callable(item) else item
def _make_wrapper(self, attr):
def wrapper(*args, **kwargs):
last = len(self._engines) - 1
for counter, engine in enumerate(self._engines):
try:
return getattr(engine, attr)(*args, **kwargs)
except itsdangerous.exc.BadSignature:
if counter == last:
# We've run out of engines. Raise error to caller
raise
return wrapper
class RotatingKeySecureCookieSessionInterface(SecureCookieSessionInterface):
"""Replaces the serializer with key rotation support."""
def get_signing_serializer(self, app):
"""Return serializers wrapped for key rotation."""
if not app.config.get('SECRET_KEYS'):
return None
signer_kwargs = {
'key_derivation': self.key_derivation,
'digest_method': self.digest_method,
}
return KeyRotationWrapper(
itsdangerous.URLSafeTimedSerializer,
app.config['SECRET_KEYS'],
salt=self.salt,
serializer=self.serializer,
signer_kwargs=signer_kwargs,
)
def init_app(app: Flask, config: List[str] = None, init_logging: bool = True) -> None:
"""
Configure an app depending on the environment.
Loads settings from a file named ``settings.py`` in the instance folder, followed
by additional settings from one of ``development.py``, ``production.py`` or
``testing.py``. Can also load from JSON, TOML or YAML files if requested. Typical
usage::
from flask import Flask
import coaster.app
app = Flask(__name__, instance_relative_config=True)
# Any one of the following lines. Runtime environment will be as per FLASK_ENV
coaster.app.init_app(app) # Load config from Python files
coaster.app.init_app(app, config=['json']) # Load config from JSON files
coaster.app.init_app(app, config=['toml']) # Load config from TOML files
coaster.app.init_app(app, config=['yaml']) # Load config from YAML files
coaster.app.init_app(app, config=['py', 'toml']) # Both Python & TOML config
:func:`init_app` also configures logging by calling
:func:`coaster.logger.init_app`.
:param app: App to be configured
:param config: Types of config files, one or more of of ``py`` (default), ``json``,
``toml`` and ``yaml``
:param bool init_logging: Call `coaster.logger.init_app` (default `True`)
"""
if not config:
config = ['py']
# Make current_auth available to app templates
app.jinja_env.globals['current_auth'] = current_auth
# Make the current view available to app templates
app.jinja_env.globals['current_view'] = current_view
# Disable Flask-SQLAlchemy events.
# Apps that want it can turn it back on in their config
app.config.setdefault('SQLALCHEMY_TRACK_MODIFICATIONS', False)
# Load config from the app's settings[.py]
for config_option in config:
if config_option not in _config_loaders:
raise ValueError(f"{config_option} is not a recognized type of config")
load_config_from_file(
app,
'settings' + _config_loaders[config_option].extn,
load=_config_loaders[config_option].loader,
)
# Load additional settings from the app's environment-specific config file(s):
# Flask sets ``ENV`` configuration variable based on ``FLASK_ENV`` environment
# variable. So we can directly get it from ``app.config['ENV']``.
# Lowercase because that's how flask defines it.
# ref: https://flask.palletsprojects.com/en/1.1.x/config/#environment-and-debug-features
additional = _additional_config.get(app.config['ENV'].lower())
if additional:
for config_option in config:
load_config_from_file(
app,
additional + _config_loaders[config_option].extn,
load=_config_loaders[config_option].loader,
)
if init_logging:
logger.init_app(app)
def load_config_from_file(
app: Flask, filepath: str, load: Optional[Callable] = None
) -> bool:
"""Load config from a specified file with a specified loader (default Python)."""
try:
if load is None:
app.config.from_pyfile(filepath)
else:
app.config.from_file(filepath, load=load)
return True
except OSError:
app.logger.warning(
"Did not find settings file %s for additional settings, skipping it",
filepath,
)
return False
| hasgeek/coaster | coaster/app.py | Python | bsd-2-clause | 6,555 |
# LARC code from NVIDIA apex project, which was released under BSD 3-Clause "New" or "Revised" License
# Retrieved on 26 February 2019
# https://github.com/NVIDIA/apex/blob/d74fda260c403f775817470d87f810f816f3d615/apex/parallel/LARC.py
import torch
from torch.optim import Optimizer
class LARC(Optimizer):
"""
:class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC,
in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive
local learning rate for each individual parameter. The algorithm is designed to improve
convergence of large batch training.
See https://arxiv.org/abs/1708.03888 for calculation of the local learning rate.
In practice it modifies the gradients of parameters as a proxy for modifying the learning rate
of the parameters. This design allows it to be used as a wrapper around any torch.optim Optimizer.
```
model = ...
optim = torch.optim.Adam(model.parameters(), lr=...)
optim = LARC(optim)
```
It can even be used in conjunction with apex.fp16_utils.FP16_optimizer.
```
model = ...
optim = torch.optim.Adam(model.parameters(), lr=...)
optim = LARC(optim)
optim = apex.fp16_utils.FP16_Optimizer(optim)
```
Args:
optimizer: Pytorch optimizer to wrap and modify learning rate for.
trust_coefficient: Trust coefficient for calculating the lr. See https://arxiv.org/abs/1708.03888
clip: Decides between clipping or scaling mode of LARC. If `clip=True` the learning rate is set to `min(optimizer_lr, local_lr)` for each parameter. If `clip=False` the learning rate is set to `local_lr*optimizer_lr`.
eps: epsilon kludge to help with numerical stability while calculating adaptive_lr
"""
def __init__(self, optimizer, trust_coefficient=0.002, clip=True, eps=1e-8, epsilon=1.0/16000.0):
self.param_groups = optimizer.param_groups
self.optim = optimizer
self.trust_coefficient = trust_coefficient
self.eps = eps
self.clip = clip
self.epsilon = epsilon
def __getstate__(self):
return self.optim.__getstate__()
def __setstate__(self, state):
self.optim.__setstate__(state)
print(self)
def __repr__(self):
# print(self)
return self.optim.__repr__()
def state_dict(self):
return self.optim.state_dict()
def load_state_dict(self, state_dict):
self.optim.load_state_dict(state_dict)
def zero_grad(self):
self.optim.zero_grad()
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
def step(self):
with torch.no_grad():
weight_decays = []
for group in self.optim.param_groups:
# absorb weight decay control from optimizer
weight_decay = group['weight_decay'] if 'weight_decay' in group else 0
weight_decays.append(weight_decay)
group['weight_decay'] = 0
for p in group['params']:
if p.grad is None:
continue
param_norm = torch.norm(p.data)
grad_norm = torch.norm(p.grad.data)
if param_norm != 0 and grad_norm != 0:
# calculate adaptive lr + weight decay
larc_local_lr = self.trust_coefficient * (param_norm) / (grad_norm + param_norm * weight_decay + self.eps)
else:
larc_local_lr = self.epsilon
# clip learning rate for LARC
if self.clip:
# calculation of adaptive_lr so that when multiplied by lr it equals `min(adaptive_lr, lr)`
adaptive_lr = min(larc_local_lr/group['lr'], 1)
else: #scale mode
adaptive_lr = larc_local_lr
p.grad.data += weight_decay * p.data
p.grad.data *= adaptive_lr
self.optim.step()
# return weight decay control to optimizer
for i, group in enumerate(self.optim.param_groups):
group['weight_decay'] = weight_decays[i]
| probprog/pyprob | pyprob/nn/optimizer_larc.py | Python | bsd-2-clause | 4,264 |
from cno.admin import install
import nose
import logging
import sys
from nose.plugins.attrib import attr
@attr('skip_travis')
def test_install():
install.install_all_cellnopt_dependencies(verbose=True)
| cellnopt/cellnopt | test/admin/test_install.py | Python | bsd-2-clause | 212 |
#!/usr/bin/python3
# coding: utf-8
import os
import string
import sys
import time
try:
import serial
except ImportError:
print("Error importing pyserial. Please check if it is installed.")
sys.exit(1)
class Uploader(object):
"""Uploads a XSVF file to the arduino board.
"""
# Create a translation array of printable characters
_printable_chars = string.printable
_translate_str = ''.join(
[(chr(x) in _printable_chars) and chr(x) or '.' for x in range(256)])
@staticmethod
def add_arguments(p):
"""Adds the necessary arguments to the parser."""
p.add_argument(
'-p', '--port',
default='/dev/ttyACM0',
help='Serial port device name'
' (default=%(default)s)')
p.add_argument(
'-b', '--baud',
default=115200,
type=int,
help='BAUD rate'
' (type %(type)s, default=%(default)s)')
def __init__(self, args):
self._args = args
self._serial = serial.Serial(port=args.port, baudrate=args.baud)
# Help printing new lines
self._need_lf = False
#
self._file_size = 0
# Hashes
self._sum = 0
# To compute the elapsed time
self._start_time = 0
# Error code
self._error_code = 0
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
def reset_arduino(self):
"""Resets the arduino and clear any garbage on the serial port."""
self._serial.setDTR(False)
time.sleep(1)
self._serial.flushInput()
self._serial.flushOutput()
self._serial.setDTR(True)
self._start_time = 0
def print_lf(self):
if self._need_lf:
self._need_lf = False
print
def initialize_hashes(self):
self._sum = 0
def update_hashes(self, s):
for c in s:
self._sum += ord(c)
def print_hashes(self):
cksum = (-self._sum) & 0xFF
if self._args.debug > 1:
print(' Expected checksum: 0x%02X/%lu.' %
(cksum, self._file_size))
print(' Expected sum: 0x%08lX/%lu.' %
(self._sum, self._file_size))
if self._start_time > 0:
print('Elapsed time: %.02f seconds.' %
(time.time() - self._start_time))
def upload_one_file(self, fd):
self.reset_arduino()
self._file_size = os.fstat(fd.fileno()).st_size
bytes_written = 0
while True:
line = self._serial.readline().strip()
if not line:
continue
command = line[0]
argument = line[1:]
if command == 'S':
num_bytes = int(argument)
xsvf_data = fd.read(num_bytes)
bytes_written += len(xsvf_data)
self.update_hashes(xsvf_data)
xsvf_data += chr(0xff) * (num_bytes - len(xsvf_data))
self._serial.write(xsvf_data)
if self._args.debug > 1:
print('\rSent: %8d bytes, %8d remaining' %
(bytes_written, self._file_size - bytes_written), end='')
sys.stdout.flush()
self._need_lf = True
elif command == 'R':
self.initialize_hashes()
if self._args.debug > 1:
print('File: %s' % os.path.realpath(fd.name))
print('Ready to send %d bytes.' % self._file_size)
self._start_time = time.time()
elif command == 'Q':
self.print_lf()
# Split the argument. The first field is the error code,
# the next field is the error message.
args = argument.split(',')
self.error_code = int(args[0])
if self._args.debug > 1:
print('Quit: {1:s} ({0:d}).'.format(
self.error_code, args[1]))
self.print_hashes()
return self.error_code == 0
elif command == 'D':
if self._args.debug > 0:
self.print_lf()
print('Device:', argument)
elif command == '!':
if self._args.debug > 0:
self.print_lf()
print('IMPORTANT:', argument)
else:
self.print_lf()
print('Unrecognized line:',
line.translate(Uploader._translate_str))
def upload_all_files(self, fd_list):
ok = True
for fd in fd_list:
with fd:
ok = self.upload_one_file(fd)
if not ok:
break
return ok
| mrjimenez/JTAG | extras/python/Uploader.py | Python | bsd-2-clause | 4,930 |
# -*- coding: UTF-8 -*-
# Copyright 2013-2018 by Luc Saffre.
# License: BSD, see file LICENSE for more details.
"""
This defines the :manage:`dump2py` management command.
.. management_command:: dump2py
Write a dump of your database to a set of Python modules. This dump
is useful for creating a daily backup or before an upgrade with data
migration.
Usage: cd to your project directory and say::
$ python manage.py dump2py TARGET
This will create a python dump of your database to the directory
`TARGET`.
The directory will contain a file :xfile:`restore.py` and a series of
`.py` files (one for every model) which are being :func:`execfile`\ d
from that :xfile:`restore.py`.
Options:
.. option:: --noinput
Do not prompt for user input of any kind.
.. option:: --tolerate
Tolerate database errors. This can help making a partial snapshot
of a database which is not (fully) synced with the application
code.
.. option:: --overwrite
Don't complain if the TARGET directory already exists. This will
potentially overwrite existing files.
.. option:: --max-row-count <NUM>
Change the maximum number of rows per source file from its default
value (50000) to NUM.
When a table contains many rows, the resulting :file:`.py` file
can become so large that it doesn't fit into memory, causing the
Python process to get killed when it tries to execute it. To
avoid this limitation, :xfile:`dump2py` distributes the content
over several files if a table contains are more than NUM rows.
.. You might theoretically use Django's :manage:`dumpdata` command for
writing a Python fixture, but this possibility is currently
deactivated because a huge database would create a huge Python module
which might not fit into memory.
FILES
=====
.. xfile:: restore.py
The main script of a Python dump generated by the :manage:`dump2py`
command.
To restore a dump created using :manage:`dump2py` to your database,
simply run the `restore.py` script using the :manage:`run` management
command::
$ python manage.py run mydump/restore.py
SEE ALSO
========
- :doc:`/specs/dumps`
- :doc:`/dev/datamig`
- :doc:`/dev/dump2py`
"""
from __future__ import unicode_literals
from builtins import str
from io import open
import logging
logger = logging.getLogger(__name__)
import os
from decimal import Decimal
import argparse
from clint.textui import progress
from django.db import models
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.utils import DatabaseError
from django.utils.timezone import make_naive, is_aware, utc
from lino.utils import puts
from lino.core.utils import sorted_models_list, full_model_name
from lino.core.choicelists import ChoiceListField
from lino.utils.mldbc.fields import BabelCharField, BabelTextField
def is_pointer_to_contenttype(f):
if not settings.SITE.is_installed('contenttypes'):
return False
if not isinstance(f, models.ForeignKey):
return False
return f.remote_field.model is settings.SITE.models.contenttypes.ContentType
def write_create_function(model, stream):
fields = [f for f in model._meta.get_fields()
if f.concrete and f.model is model]
for f in fields:
if getattr(f, 'auto_now_add', False):
# raise Exception("%s.%s.auto_now_add is True : values will be lost!" % (
# full_model_name(model), f.name))
logger.warning(
"%s.%s.auto_now_add is True : values will be lost!",
full_model_name(model), f.name)
# f.auto_now_add = False
stream.write('def create_%s(%s):\n' % (
model._meta.db_table, ', '.join([
f.attname for f in fields
if not getattr(f, '_lino_babel_field', False)])))
for f in fields:
if f.model is model:
pre = ' '
else:
pre = '# '
if isinstance(f, models.DecimalField):
stream.write(
pre+'if %s is not None: %s = Decimal(%s)\n' % (
f.attname, f.attname, f.attname))
elif isinstance(f, ChoiceListField):
lstname = 'settings.SITE.models.{0}.{1}'.format(
f.choicelist.app_label, f.choicelist.__name__)
ln = pre+'if {0}: {0} = {1}.get_by_value({0})\n'
ln = '#' + ln # no longer needed but maybe useful as a comment
stream.write(ln.format(f.attname, lstname))
elif is_pointer_to_contenttype(f):
stream.write(
pre+'%s = new_content_type_id(%s)\n' % (
f.attname, f.attname))
if model._meta.parents:
if len(model._meta.parents) != 1:
msg = "%s : model._meta.parents is %r" % (
model, model._meta.parents)
raise Exception(msg)
pm, pf = list(model._meta.parents.items())[0]
fields = [f for f in fields if f != pf]
stream.write(" kw = dict()\n")
for f in fields:
if f.model is model:
pre = ' '
else:
pre = '# '
if getattr(f, '_lino_babel_field', False):
continue
elif isinstance(f, (BabelCharField, BabelTextField)):
stream.write(
pre + 'if %s is not None: kw.update(bv2kw(%r,%s))\n' % (
f.attname, f.attname, f.attname))
else:
stream.write(
pre + 'kw.update(%s=%s)\n' % (f.attname, f.attname))
if model._meta.parents:
stream.write(
' return create_mti_child(%s, %s, %s, **kw)\n\n' % (
full_model_name(pm, '_'), pf.attname,
full_model_name(model, '_')))
else:
stream.write(' return %s(**kw)\n\n' %
full_model_name(model, '_'))
class Command(BaseCommand):
# tmpl_dir = ''
# args = "output_dir"
def add_arguments(self, parser):
parser.add_argument(
'output_dir',
help='The directory where to write output files.')
parser.add_argument('--noinput', action='store_false',
dest='interactive', default=True,
help='Do not prompt for input of any kind.'),
parser.add_argument('--tolerate', action='store_true',
dest='tolerate', default=False,
help='Tolerate database errors.')
parser.add_argument('-o', '--overwrite', action='store_true',
dest='overwrite', default=False,
help='Overwrite existing files.'),
parser.add_argument('-m', '--max-row-count', type=int,
dest='max_row_count', default=50000,
help='Maximum number of rows per file.'),
#~ make_option('--quick', action='store_true',
#~ dest='quick', default=False,
#~ help='Do not call full_clean() method on restored instances.'),
def write_files(self):
puts("Writing {0}...".format(self.main_file))
self.stream = open(self.main_file, 'wt')
current_version = settings.SITE.version
self.stream.write('''\
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# This is a Python dump created using dump2py.
# DJANGO_SETTINGS_MODULE was %r, TIME_ZONE was %r.
''' % (settings.SETTINGS_MODULE, settings.TIME_ZONE))
self.stream.write('''
from __future__ import unicode_literals
import logging
logger = logging.getLogger('%s')
''' % __name__)
self.stream.write('SOURCE_VERSION = %r\n' % str(current_version))
self.stream.write('''
import os
import six
from decimal import Decimal
from datetime import datetime
from datetime import time, date
from django.conf import settings
from django.utils.timezone import make_aware, utc
from django.core.management import call_command
# from django.contrib.contenttypes.models import ContentType
from lino.utils.dpy import create_mti_child
from lino.utils.dpy import DpyLoader
from lino.core.utils import resolve_model
if settings.USE_TZ:
def dt(*args):
return make_aware(datetime(*args), timezone=utc)
else:
def dt(*args):
return datetime(*args)
def new_content_type_id(m):
if m is None: return m
ct = settings.SITE.models.contenttypes.ContentType.objects.get_for_model(m)
if ct is None: return None
return ct.pk
def pmem():
# Thanks to https://stackoverflow.com/questions/938733/total-memory-used-by-python-process
process = psutil.Process(os.getpid())
print(process.memory_info().rss)
def execfile(fn, *args):
logger.info("Execute file %s ...", fn)
six.exec_(compile(open(fn, "rb").read(), fn, 'exec'), *args)
# pmem() # requires pip install psutil
''')
s = ','.join([
'%s=values[%d]' % (lng.name, lng.index)
for lng in settings.SITE.languages])
self.stream.write('''
def bv2kw(fieldname, values):
"""
Needed if `Site.languages` changed between dumpdata and loaddata
"""
return settings.SITE.babelkw(fieldname, %s)
''' % s)
self.models = sorted_models_list()
if settings.SITE.is_installed('contenttypes'):
from django.contrib.contenttypes.models import ContentType
self.models = [m for m in self.models
if not issubclass(m, ContentType)]
if settings.SITE.is_installed('sessions'):
from django.contrib.sessions.models import Session
self.models = [m for m in self.models
if not issubclass(m, Session)]
for model in self.models:
self.stream.write('%s = resolve_model("%s")\n' % (
full_model_name(model, '_'), full_model_name(model)))
self.stream.write('\n')
self.models = self.sort_models(self.models)
self.stream.write('\n')
for model in self.models:
write_create_function(model, self.stream)
self.stream.write('\n')
#~ used_models = set()
self.stream.write("""
def main(args):
loader = DpyLoader(globals())
from django.core.management import call_command
call_command('initdb', interactive=args.interactive)
os.chdir(os.path.dirname(__file__))
loader.initialize()
args = (globals(), locals())
""")
max_row_count = self.options['max_row_count']
for model in progress.bar(self.models):
try:
qs = model.objects.all()
total_count = qs.count()
except DatabaseError as e:
self.database_errors += 1
if not self.options['tolerate']:
raise
self.stream.write('\n')
logger.warning("Tolerating database error %s in %s",
e, model._meta.db_table)
msg = ("The data of table {0} has not been dumped"
"because an error {1} occured.").format(
model._meta.db_table, e)
self.stream.write('raise Exception("{0}")\n'.format(msg))
continue
fields = [f for f in model._meta.get_fields()
if f.concrete and f.model is model]
fields = [
f for f in fields
if not getattr(f, '_lino_babel_field', False)]
chunks = [] # list of tuples (i, filename, queryset)
if total_count > max_row_count:
num_files = (total_count // max_row_count) + 1
for i in range(num_files):
o1 = max_row_count * i
o2 = max_row_count * (i+1)
t = (i+1,
'%s_%d.py' % (model._meta.db_table, i+1),
qs[o1:o2])
chunks.append(t)
else:
chunks.append((1, '%s.py' % model._meta.db_table, qs))
for i, filename, qs in chunks:
self.stream.write(' execfile("%s", *args)\n' % filename)
filename = os.path.join(self.output_dir, filename)
# puts("Writing {0}...".format(filename))
# stream = file(filename, 'wt')
stream = open(filename, 'wt')
stream.write('# -*- coding: UTF-8 -*-\n')
txt = "%d objects" % total_count
if len(chunks) > 1:
txt += " (part %d of %d)" % (i, len(chunks))
stream.write(
'logger.info("Loading %s to table %s...")\n' % (
txt, model._meta.db_table))
stream.write(
"# fields: %s\n" % ', '.join(
[f.name for f in fields]))
for obj in qs:
self.count_objects += 1
#~ used_models.add(model)
stream.write('loader.save(create_%s(%s))\n' % (
obj._meta.db_table,
','.join([self.value2string(obj, f) for f in fields])))
stream.write('\n')
if i == len(chunks):
stream.write('loader.flush_deferred_objects()\n')
stream.close()
#~ self.stream.write('\nfilename = os.path.join(os.path.dirname(__file__),"%s.py")\n' % )
self.stream.write(
' loader.finalize()\n')
# 20180416 why was the following message commented out?
# reactivated it because otherwise we have no log entry when
# the process has finished.
self.stream.write(
' logger.info("Loaded %d objects", loader.count_objects)\n')
self.stream.write(
" call_command('resetsequences')\n")
self.stream.write("""
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Restore the data.')
parser.add_argument('--noinput', dest='interactive',
action='store_false', default=True,
help="Don't ask for confirmation before flushing the database.")
args = parser.parse_args()
main(args)
""")
#~ self.stream.write('\nsettings.SITE.load_from_file(globals())\n')
self.stream.close()
def sort_models(self, unsorted):
sorted = []
hope = True
"""
20121120 if we convert the list to a set, we gain some performance
for the ``in`` tests, but we obtain a random sorting order for all
independent models, making the double dump test less evident.
"""
#~ 20121120 unsorted = set(unsorted)
while len(unsorted) and hope:
hope = False
guilty = dict()
#~ puts("hope for", [m.__name__ for m in unsorted])
for model in unsorted:
deps = set([f.remote_field.model
for f in model._meta.fields
if f.remote_field is not None and f.remote_field.model is not model and f.remote_field.model in unsorted])
#~ deps += [m for m in model._meta.parents.keys()]
for m in sorted:
if m in deps:
deps.remove(m)
if len(deps):
guilty[model] = deps
else:
sorted.append(model)
unsorted.remove(model)
hope = True
break
#~ ok = True
#~ for d in deps:
#~ if d in unsorted:
#~ ok = False
#~ if ok:
#~ sorted.append(model)
#~ unsorted.remove(model)
#~ hope = True
#~ break
#~ else:
#~ guilty[model] = deps
#~ print model.__name__, "depends on", [m.__name__ for m in deps]
if unsorted:
assert len(unsorted) == len(guilty)
msg = "There are %d models with circular dependencies :\n" % len(
unsorted)
msg += "- " + '\n- '.join([
full_model_name(m) + ' (depends on %s)' % ", ".join([
full_model_name(d) for d in deps])
for m, deps in guilty.items()])
if False:
# we don't write them to the .py file because they are
# in random order which would cause false ddt to fail
for ln in msg.splitlines():
self.stream.write('\n# %s' % ln)
logger.info(msg)
sorted.extend(unsorted)
return sorted
def value2string(self, obj, field):
if isinstance(field, (BabelCharField, BabelTextField)):
#~ return repr([repr(x) for x in dbutils.field2args(obj,field.name)])
return repr(settings.SITE.field2args(obj, field.name))
# value = field._get_val_from_obj(obj)
value = field.value_from_object(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
if value is None:
#~ if value is None or value is NOT_PROVIDED:
return 'None'
if isinstance(field, models.DateTimeField):
if is_aware(value):
d = make_naive(value, timezone=utc)
else:
d = value
return 'dt(%d,%d,%d,%d,%d,%d)' % (
d.year, d.month, d.day, d.hour, d.minute, d.second)
if isinstance(field, models.TimeField):
d = value
return 'time(%d,%d,%d)' % (d.hour, d.minute, d.second)
if is_pointer_to_contenttype(field):
ContentType = settings.SITE.models.contenttypes.ContentType
ct = ContentType.objects.get(pk=value)
return full_model_name(ct.model_class(), '_')
#~ return "'"+full_model_name(ct.model_class())+"'"
#~ return repr(tuple(value.app_label,value.model))
if isinstance(field, models.DateField):
d = value
return 'date(%d,%d,%d)' % (d.year, d.month, d.day)
#~ return 'i2d(%4d%02d%02d)' % (d.year,d.month,d.day)
if isinstance(value, (float, Decimal)):
return repr(str(value))
if isinstance(value, int):
return str(value)
return repr(field.value_to_string(obj))
def handle(self, *args, **options):
# if len(args) != 1:
# raise CommandError("No output_dir specified.")
# print("No output_dir specified.")
# sys.exit(-1)
# import lino
# lino.startup()
output_dir = options['output_dir']
self.output_dir = os.path.abspath(output_dir)
# self.output_dir = os.path.abspath(args[0])
self.main_file = os.path.join(self.output_dir, 'restore.py')
self.count_objects = 0
self.database_errors = 0
if os.path.exists(self.output_dir):
if options['overwrite']:
pass
# TODO: remove all files?
else:
raise CommandError(
"Specified output_dir %s already exists. "
"Delete it yourself if you dare!" % self.output_dir)
else:
os.makedirs(self.output_dir)
self.options = options
#~ logger.info("Running %s to %s.", self, self.output_dir)
self.write_files()
logger.info("Wrote %s objects to %s and siblings." % (
self.count_objects, self.main_file))
if self.database_errors:
raise CommandError(
"There were %d database errors. "
"The dump in %s is not complete.",
self.database_errors, self.output_dir)
| khchine5/lino | lino/management/commands/dump2py.py | Python | bsd-2-clause | 19,997 |
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Mts20140618QueryMediaAnalysisRequest(RestApi):
def __init__(self,domain='mts.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.MediaId = None
def getapiname(self):
return 'mts.aliyuncs.com.QueryMediaAnalysis.2014-06-18'
| wanghe4096/website | aliyun/api/rest/Mts20140618QueryMediaAnalysisRequest.py | Python | bsd-2-clause | 339 |
#!/usr/bin/env python
"""Run a query on youtube and save the top x video search results with meta variables"""
__author__ = "Peter J Usherwood"
__python_version__ = "3.5"
import os
import time
from usherwood_ds.data_imports.youtube_api.api_class import YoutubeAPI
from usherwood_ds.data_imports.youtube_import import create_youtube_video_df
def video_query_to_excel(query='dogs',
max_videos=50,
root_dir='D:/documents/work/projects/api_data/'):
api = YoutubeAPI()
videos = api.get_videos_by_search_term(query, max_videos=max_videos, location=None)
print(str(len(videos)), 'videos found')
parsed_videos = []
for video in videos:
parsed_videos += [api.parse_video_to_youtube_video(video)]
df_video = create_youtube_video_df(parsed_videos)
filename = 'downloads/youtube_video_query_'+str(query)+'_'+str(time.time())+'.xlsx'
df_video.to_excel(os.path.join(root_dir, filename))
return filename
if __name__ == "__main__":
video_query_to_excel(root_dir='C:/Users/UsherwoodP/Documents/youtube_app_fix/')
| Usherwood/usherwood_ds | usherwood_ds/data_imports/youtube_api/run_scripts/video_query_to_excel.py | Python | bsd-2-clause | 1,108 |
# Django settings for cigar_example project.
from os.path import dirname, abspath, join
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
MANAGERS = ADMINS
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
root = lambda *x: abspath(join(abspath(DJANGO_ROOT), *x))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': root('db.sql'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Montreal'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = join(dirname(__file__), "..", "static_root")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = ')(gpv*_6l+$vcscsox7=xwyfexxw^do!n8998a054wa450-tnl'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cigar_example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'cigar_example.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
SOUTH_TESTS_MIGRATE = False
NOSE_ARGS = [
'--nocapture',
'--nologcapture',
'--with-coverage',
'--cover-package=rest_framework_swagger',
'cigar_example',
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_nose',
'cigar_example.app',
'cigar_example.restapi',
'rest_framework',
'rest_framework_swagger',
'django.contrib.admin',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
SWAGGER_SETTINGS = {
"exclude_namespaces": [], # List URL namespaces to ignore
"api_version": '0.1.10', # Specify your API's version (optional)
"token_type": 'Bearer',
"enabled_methods": [ # Methods to enable in UI
'get',
'post',
'put',
'patch',
'delete'
],
"is_authenticated": False
}
APPEND_SLASH = False
| 18F/django-rest-swagger | tests/cigar_example/cigar_example/settings.py | Python | bsd-2-clause | 6,008 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
This plotting function is for plotting when the adjoint-flagging method
is used.
The main difference is that the inner product plot is produced.
"""
from clawpack.geoclaw import topotools
import pylab
import glob
from numpy import loadtxt
# --------------------------
def setplot(plotdata):
# --------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
plotdata.clearfigures() # clear any old figures,axes,items dat
plotdata.format = 'binary' # 'ascii', 'binary', 'netcdf'
try:
tsudata = open(plotdata.outdir+'/geoclaw.data').readlines()
for line in tsudata:
if 'sea_level' in line:
sea_level = float(line.split()[0])
print "sea_level = ",sea_level
except:
print "Could not read sea_level, setting to 0."
sea_level = 0.
clim_ocean = 0.3
clim_CC = 0.5
cmax_ocean = clim_ocean + sea_level
cmin_ocean = -clim_ocean + sea_level
cmax_CC = clim_CC + sea_level
cmin_CC = -clim_CC + sea_level
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
from clawpack.visclaw import gaugetools
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=True)
def timeformat(t):
from numpy import mod
hours = int(t/3600.)
tmin = mod(t,3600.)
min = int(tmin/60.)
sec = int(mod(tmin,60.))
timestr = '%s:%s:%s' % (hours,str(min).zfill(2),str(sec).zfill(2))
return timestr
def title(current_data):
from pylab import title
title(' ')
def plotcc(current_data):
from pylab import plot,text
plot([235.8162], [41.745616],'wo')
text(235.8,41.9,'Cr.City',color='w',fontsize=10)
#-----------------------------------------
# Figure for big area
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Pacific', figno=0)
plotfigure.kwargs = {'figsize': (8,7)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Pacific'
plotaxes.scaled = True
def aa(current_data):
from pylab import ticklabel_format, xticks, gca, cos, pi, yticks
plotcc(current_data)
title(current_data)
ticklabel_format(format='plain',useOffset=False)
xticks([180, 200, 220, 240], rotation=20, fontsize = 28)
yticks(fontsize = 28)
a = gca()
a.set_aspect(1./cos(41.75*pi/180.))
plotaxes.afteraxes = aa
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.surface_or_depth
my_cmap = colormaps.make_colormap({-1.0: [0.0,0.0,1.0], \
-0.5: [0.5,0.5,1.0], \
0.0: [1.0,1.0,1.0], \
0.5: [1.0,0.5,0.5], \
1.0: [1.0,0.0,0.0]})
plotitem.imshow_cmap = my_cmap
plotitem.imshow_cmin = cmin_ocean
plotitem.imshow_cmax = cmax_ocean
plotitem.add_colorbar = False
plotitem.colorbar_shrink = 0.7
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.land
plotitem.imshow_cmap = geoplot.land_colors
plotitem.imshow_cmin = 0.0
plotitem.imshow_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0]
plotaxes.xlimits = [180,240]
plotaxes.ylimits = [10,62]
# Add contour lines of bathymetry:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = linspace(-6000,0,7)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,1,0] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
# Add contour lines of topography:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
from numpy import arange, linspace
plotitem.contour_levels = arange(0., 11., 1.)
plotitem.amr_contour_colors = ['g'] # color on each level
plotitem.kwargs = {'linestyles':'solid'}
plotitem.amr_contour_show = [0,0,0,1] # show contours only on finest level
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figure for inner product
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Inner Product', figno=1)
plotfigure.kwargs = {'figsize': (8,7)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Inner Product'
plotaxes.scaled = True
def aa_innerprod(current_data):
from pylab import ticklabel_format, xticks, gca, cos, pi, yticks
plotcc(current_data)
title(current_data)
ticklabel_format(format='plain',useOffset=False)
xticks([180, 200, 220, 240], rotation=20, fontsize = 28)
yticks(fontsize = 28)
a = gca()
a.set_aspect(1./cos(41.75*pi/180.))
plotaxes.afteraxes = aa_innerprod
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = 4
plotitem.imshow_cmap = colormaps.white_red
plotitem.imshow_cmin = 0.0
plotitem.imshow_cmax = 0.04
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_imshow')
plotitem.plot_var = geoplot.land
plotitem.imshow_cmap = geoplot.land_colors
plotitem.imshow_cmin = 0.0
plotitem.imshow_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0]
plotaxes.xlimits = [180,240]
plotaxes.ylimits = [10,62]
#-----------------------------------------
# Figure for levels
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Grid patches', figno=10)
plotfigure.kwargs = {'figsize': (8,7)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Grid patches'
plotaxes.scaled = True
def aa_patches(current_data):
from pylab import ticklabel_format, xticks, gca, cos, pi, yticks
plotcc(current_data)
title(current_data)
ticklabel_format(format='plain',useOffset=False)
xticks([180, 200, 220, 240], rotation=20, fontsize = 28)
yticks(fontsize = 28)
a = gca()
a.set_aspect(1./cos(41.75*pi/180.))
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_patch')
plotitem.amr_patch_bgcolor = [[1,1,1], [.7,.7,1], [1,0.4,0.4]]
plotitem.amr_patchedges_color = ['k','b','r']
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0,1,1,0]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0]
plotaxes.afteraxes = aa_patches
plotaxes.xlimits = [180,240]
plotaxes.ylimits = [10,62]
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
def fix_gauge(current_data):
from pylab import plot, legend, xticks, floor, yticks,xlabel,savefig, title
t = current_data.t
gaugeno = current_data.gaugeno
xticks([18000, 21600, 25200, 28800, 32400, 36000],\
[str(180/36), str(216/36), str(252/36), str(288/36), \
str(324/36), str(360/36)], fontsize=17)
if (gaugeno == 1):
yticks([-0.3, -0.15, 0, 0.15, 0.3],\
['-0.3', '-0.15', '0', '0.15', '0.3'],fontsize=17)
if (gaugeno == 2):
yticks([-1.5, -0.75, 0, 0.75, 1.5],\
['-1.5', '-0.75', '0', '0.75', '1.5'],fontsize=17)
title('Surface at gauge ' + str(gaugeno), fontsize=17)
xlabel(' ')
plotfigure = plotdata.new_plotfigure(name='gauge plot', figno=300, \
type='each_gauge')
plotfigure.kwargs = {'figsize': (10.5,3)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'axes([0.12,0.12,0.79,0.79])'
plotaxes.title = 'Surface'
plotaxes.xlimits = [15000, 39600]
plotaxes.afteraxes = fix_gauge
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
# Plot q[0] from previous high tol run as red line:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'r-'
import os
plotitem.outdir = os.path.join(os.getcwd(), '_output_sflag_14')
# Plot q[0] from previous low tol run as green line:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'g-'
plotitem.outdir = os.path.join(os.getcwd(), '_output_sflag_09')
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.html_movie = 'JSAnimation' # new style, or "4.x" for old style
return plotdata
| clawpack/adjoint | paper1_examples/tsunami_Alaska/compare/setplot_compare.py | Python | bsd-2-clause | 11,089 |
#!/usr/bin/env python3
import sys, os, signal, time, getopt
BASE_DIR = os.path.dirname(sys.argv[0])
if not BASE_DIR: BASE_DIR = "."
sys.path.append(BASE_DIR)
PID_PATH = "/tmp/LANd.pid"
LOG_FILE = "/tmp/LANd.log"
ERR_FILE = "/tmp/LANd_err.log"
CFG_FILE = "%s/fdslight_etc/LANd.ini" % BASE_DIR
import pywind.evtframework.evt_dispatcher as dispatcher
import pywind.lib.configfile as cfg
import freenet.lib.proc as proc
import freenet.lib.cfg_check as cfg_check
import freenet.handlers.LANd_forward as lan_fwd
import freenet.handlers.wol_handler as wol_handler
import freenet.lib.logging as logging
class service(dispatcher.dispatcher):
__conns = None
__debug = None
__sessions = None
__configs = None
__time = None
__wol_fd = None
def init_func(self, wol_key, wol_port=5888, wol_bind_ip="0.0.0.0", debug=False):
self.__wol_fd = -1
self.__debug = debug
self.__sessions = {}
self.__configs = {}
self.__conns = {}
self.__time = time.time()
self.__debug = debug
if not cfg_check.is_port(wol_port):
sys.stderr.write("wrong wol port number %s\r\n")
return
self.create_poll()
self.create_wol(wol_key, wol_port, wol_bind_ip)
self.create_connections()
def create_wol(self, wol_key, wol_port, wol_bind_ip):
self.__wol_fd = self.create_handler(
-1, wol_handler.listener, ("127.0.0.1", wol_port), wol_bind_ip, wol_key
)
def __create_conn(self, name, configs):
if "host" not in configs:
sys.stderr.write("not found host from configure %s\r\n" % name)
return False
if "auth_id" not in configs:
sys.stderr.write("not found auth_id from configure %s\r\n" % name)
return False
if "URI" not in configs:
sys.stderr.write("not found URI from configure %s\r\n" % name)
return False
if not cfg_check.is_number(configs.get("force_ipv6", "0")):
sys.stderr.write("wrong force_ipv6 value from configure %s\r\n" % name)
return False
force_ipv6 = bool(int(configs.get("force_ipv6", "0")))
host = configs["host"]
is_ipv6 = False
if not cfg_check.is_ipv4(host) and not cfg_check.is_ipv6(host): is_ipv6 = force_ipv6
if cfg_check.is_ipv6(host): is_ipv6 = True
if not cfg_check.is_port(configs.get("port", "443")):
sys.stderr.write("wrong port value from configure %s\r\n" % name)
return False
port = int(configs.get("port", "443"))
auth_id = configs["auth_id"]
uri = configs["URI"]
if auth_id in self.__conns:
sys.stderr.write("auth id %s exists\r\n" % auth_id)
return False
fd = self.create_handler(-1, lan_fwd.client, (host, port,), uri, auth_id, is_ipv6=is_ipv6)
if fd < 0:
sys.stderr.write("create %s connection is failed\r\n" % name)
return False
self.__conns[auth_id] = fd
return True
def create_connections(self):
cfgs = cfg.ini_parse_from_file(CFG_FILE)
for name in cfgs:
rs = self.__create_conn(name, cfgs[name])
if not rs:
self.release()
break
''''''
self.__configs = cfgs
def release(self):
seq = []
for session_id in self.__sessions:
seq.append(self.__sessions[session_id])
for fd in seq:
self.delete_handler(fd)
seq = []
for auth_id in self.__conns:
fd = self.__conns[auth_id]
seq.append(fd)
for fd in seq:
self.delete_handler(fd)
if self.__wol_fd > 0:
self.delete_handler(self.__wol_fd)
@property
def debug(self):
return self.__debug
def delete_fwd_conn(self, auth_id):
if auth_id not in self.__conns: return
del self.__conns[auth_id]
def handle_conn_request(self, address, path, auth_id, session_id, remote_addr, remote_port, is_ipv6):
if session_id in self.__sessions:
fd = self.__sessions[session_id]
logging.print_general("delete %s,%s" % (auth_id, session_id,), (remote_addr, remote_port,))
self.delete_handler(fd)
del self.__sessions[session_id]
fd = self.create_handler(-1, lan_fwd.client, address, path, auth_id, session_id=session_id,
is_msg_tunnel=True, is_ipv6=is_ipv6)
self.get_handler(fd).set_forwarding_addr((remote_addr, remote_port,), is_ipv6=is_ipv6)
self.__sessions[session_id] = fd
def myloop(self):
"""检查哪些连接已经丢失,对于连接失败的重新建立连接
"""
# 每隔一段时间重新建立连接
t = time.time()
if t - self.__time < 10: return
self.__time = time.time()
names = []
for name in self.__configs:
config = self.__configs[name]
auth_id = config["auth_id"]
if auth_id not in self.__conns: names.append((name, config,))
''''''
for name, config in names:
self.__create_conn(name, config)
def update_configs():
pid = proc.get_pid(PID_PATH)
if pid < 0:
sys.stderr.write("not found process\r\n")
sys.stderr.flush()
return
os.kill(pid, signal.SIGUSR1)
def start(debug, wol_key, wol_port, wol_bind_ip):
if not debug:
if os.path.exists(PID_PATH):
sys.stderr.write("the process exists\r\n")
sys.exit(-1)
pid = os.fork()
if pid != 0: sys.exit(0)
os.setsid()
os.umask(0)
pid = os.fork()
if pid != 0: sys.exit(0)
sys.stderr = open(ERR_FILE, "a")
sys.stdout = open(LOG_FILE, "a")
proc.write_pid(PID_PATH)
cls = service()
try:
cls.ioloop(wol_key, wol_bind_ip=wol_bind_ip, debug=debug, wol_port=wol_port)
except KeyboardInterrupt:
cls.release()
except:
cls.release()
logging.print_error()
if os.path.isfile(PID_PATH): os.remove(PID_PATH)
sys.exit(0)
def main():
help_doc = """
start | stop | debug
start | debug --wol_listen_port=port --wol_key=key --wol_bind_ip=ip
"""
if len(sys.argv) < 2:
print(help_doc)
return
if sys.argv[1] not in ("start", "stop", "debug",):
print(help_doc)
return
d = sys.argv[1]
if d == "stop":
pid = proc.get_pid(PID_PATH)
if pid > 0: os.kill(pid, signal.SIGINT)
return
try:
opts, args = getopt.getopt(sys.argv[2:], "", ["wol_listen_port=", "wol_key=", "wol_bind_ip="])
except getopt.GetoptError:
print(help_doc)
return
except IndexError:
print(help_doc)
return
wol_port = 5888
wol_key = None
wol_bind_ip = None
for k, v in opts:
if k == "--wol_listen_port":
if not cfg_check.is_port(v):
sys.stderr.write("wrong port number\r\n")
return
wol_port = int(v)
if k == "--wol_key": wol_key = v
if k == "--wol_bind_ip": wol_bind_ip = v
''''''
if not wol_key:
sys.stderr.write("please set wol key\r\n")
return
if not wol_bind_ip:
sys.stderr.write("please set wol bind ip")
return
if d == "debug":
debug = True
else:
debug = False
start(debug, wol_key, wol_port, wol_bind_ip)
if __name__ == '__main__': main()
| fdslight/fdslight | LANd_pass.py | Python | bsd-2-clause | 7,614 |
from dumptruck import DumpTruck
import datetime
import re
import os
def _connect(dbname = 'data.sqlite'):
'Initialize the database (again). This is mainly for testing'
global dt
dt = DumpTruck(dbname = dbname, adapt_and_convert = False)
_connect()
def execute(sqlquery, data=[], verbose=1):
""" Emulate scraperwiki as much as possible by mangling dumptruck result """
# Allow for a non-list to be passed as data.
if type(data) != list and type(data) != tuple:
data = [data]
result = dt.execute(sqlquery, data, commit=False)
# None (non-select) and empty list (select) results
if not result:
return {u'data': [], u'keys': []}
dtos = lambda d: str(d) if isinstance(d, datetime.date) else d
# Select statement with results
return {u'data': map(lambda row: map(dtos, row.values()), result),
u'keys': result[0].keys()}
def save(unique_keys, data, table_name="swdata", verbose=2, date=None):
if not data:
return
dt.create_table(data, table_name = table_name, error_if_exists = False)
if unique_keys != []:
dt.create_index(unique_keys, table_name, unique = True, if_not_exists = True)
return dt.upsert(data, table_name = table_name)
def commit(verbose=1):
dt.commit()
def select(sqlquery, data=[], verbose=1):
sqlquery = "select %s" % sqlquery # maybe check if select or another command is there already?
result = dt.execute(sqlquery, data, commit = False)
# Convert dates to strings to conform to scraperwiki classic
if result != []:
keys = result[0].keys()
for row in result:
for key in keys:
if isinstance(row[key], datetime.date):
row[key] = str(row[key])
return result
def show_tables(dbname=""):
name = "sqlite_master"
if dbname:
name = "`%s`.%s" % (dbname, name)
response = select('name, sql from %s where type = "table";' % name)
return {row['name']: row['sql'] for row in response}
def save_var(name, value, verbose=2):
data = dt.save_var(name, value)
dt.execute(u"CREATE TABLE IF NOT EXISTS swvariables (`value_blob` blob, `type` text, `name` text PRIMARY KEY)", commit = False)
dt.execute(u'INSERT OR REPLACE INTO swvariables SELECT `value`, `type`, `key` FROM `%s`' % dt._DumpTruck__vars_table, commit = False)
dt.execute(u'DROP TABLE `%s`' % dt._DumpTruck__vars_table, commit = False)
dt.commit()
return data
def get_var(name, default=None, verbose=2):
if 'swvariables' not in show_tables(): # this should be unecessary
return default
dt.execute(u"CREATE TABLE IF NOT EXISTS swvariables (`value_blob` blob, `type` text, `name` text PRIMARY KEY)", commit = False)
dt.execute(u"CREATE TEMPORARY TABLE IF NOT EXISTS %s (`value` blob, `type` text, `key` text PRIMARY KEY)" % dt._DumpTruck__vars_table, commit = False)
sql = u'INSERT INTO `%s` (value, type, key) SELECT `value_blob`, `type`, `name` FROM `swvariables`' % dt._DumpTruck__vars_table
dt.execute(sql, commit = False)
try:
value = dt.get_var(name)
except NameError:
dt.connection.rollback()
return default
dt.execute(u'DROP TABLE `%s`' % dt._DumpTruck__vars_table, commit = False)
dt.commit()
return value
| abdulla16/yp_python_scraper | scraperwiki/sqlite.py | Python | bsd-2-clause | 3,270 |
import competition_utilities as cu
import csv
import datetime
import basic_features as features
import numpy as np
import pandas as pd
import re
def camel_to_underscores(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def words(text):
return text.split(" ")
def nouns():
pass
def word_count(text):
return len(words(text))
##############################################################
###### FEATURE FUNCTIONS
##############################################################
def body_length(data):
return pd.DataFrame.from_dict({"BodyLength": data["BodyMarkdown"].apply(len)})
def num_tags(data):
return pd.DataFrame.from_dict({"NumTags": [sum(map(lambda x:
pd.isnull(x), row)) for row in (data[["Tag%d" % d
for d in range(1,6)]].values)] } ) ["NumTags"]
def title_length(data):
return pd.DataFrame.from_dict({"TitleLength": data["Title"].apply(len)})
def title_word_count(data):
return pd.DataFrame.from_dict({"TitleWordCount": data["Title"].apply(word_count)})
def user_age(data):
return pd.DataFrame.from_dict({"UserAge": (data["PostCreationDate"]
- data["OwnerCreationDate"]).apply(lambda x: x.total_seconds())})
###########################################################
def extract_features(feature_names, data):
fea = pd.DataFrame(index=data.index)
for name in feature_names:
if name in data:
fea = fea.join(data[name])
else:
fea = fea.join(getattr(features,
camel_to_underscores(name))(data))
return fea
if __name__=="__main__":
feature_names = [ "BodyLength"
, "NumTags"
, "OwnerUndeletedAnswerCountAtPostTime"
, "ReputationAtPostCreation"
, "TitleLength"
, "TitleWordCount"
, "UserAge"
]
data = cu.get_dataframe("train-sample.csv")
features = extract_features(feature_names, data)
print(features) | mattalcock/stack-kaggle | basic_features.py | Python | bsd-2-clause | 2,121 |
#!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['c', 'cc', 'cpp', 'cxx', 'c++', 'h', 'hpp', 'hxx',
'h++'])
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are %s.
Other file types will be ignored.
Change the extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
""" % (list(_valid_extensions))
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
]
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
'readability/streams',
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
if sys.version_info < (3,):
def u(x):
return codecs.unicode_escape_decode(x)[0]
TEXT_TYPE = unicode
# BINARY_TYPE = str
range = xrange
itervalues = dict.itervalues
iteritems = dict.iteritems
else:
def u(x):
return x
TEXT_TYPE = str
# BINARY_TYPE = bytes
xrange = range
itervalues = dict.values
iteritems = dict.items
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in iteritems(self.errors_by_category):
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in _valid_extensions
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
m = '%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence)
sys.stderr.write(m)
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
3) raw_lines member contains all the lines without processing.
4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
strings removed.
All these members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in range(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a .cc file does not include its header."""
# Do not check test files
if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'):
return
fileinfo = FileInfo(filename)
headerfile = filename[0:len(filename) - 2] + 'h'
if not os.path.exists(headerfile):
return
headername = FileInfo(headerfile).RepositoryName()
first_include = 0
for section_list in include_state.include_list:
for f in section_list:
if headername in f[0] or f[0] in headername:
return
if not first_include:
first_include = f[1]
error(filename, first_include, 'build/include', 5,
'%s should include its header file %s' % (fileinfo.RepositoryName(),
headername))
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u('\ufffd') in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
for i in xrange(linenum - 1, self.starting_linenum, -1):
match = Search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
if match:
if seen_last_thing_in_class:
error(filename, i, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
break
if not Match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in range(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is the end of template<>.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is end of a template parameter list, False otherwise.
"""
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(typenames, clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
typenames: set of type names from template-argument-list.
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for known types and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if (match.group(2) in typenames or
match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']):
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*\S.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(typenames, clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
def IsRValueAllowed(clean_lines, linenum, typenames):
"""Check if RValue reference is allowed on a particular line.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
typenames: set of type names from template-argument-list.
Returns:
True if line is within the region where RValue references are allowed.
"""
# Allow region marked by PUSH/POP macros
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
# Allow operator=
line = clean_lines.elided[linenum]
if Search(r'\boperator\s*=\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
# Allow constructors
match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
if match and match.group(1) == match.group(2):
return IsDeletedOrDefault(clean_lines, linenum)
if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
if Match(r'\s*[\w<>]+\s*\(', line):
previous_line = 'ReturnType'
if linenum > 0:
previous_line = clean_lines.elided[linenum - 1]
if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
return IsDeletedOrDefault(clean_lines, linenum)
# Reject types not mentioned in template-argument-list
while line:
match = Match(r'^.*?(\w+)\s*&&(.*)$', line)
if not match:
break
if match.group(1) not in typenames:
return False
line = match.group(2)
# All RValue types that were in template-argument-list should have
# been removed by now. Those were allowed, assuming that they will
# be forwarded.
#
# If there are no remaining RValue types left (i.e. types that were
# not found in template-argument-list), flag those as not allowed.
return line.find('&&') < 0
def GetTemplateArgs(clean_lines, linenum):
"""Find list of template arguments associated with this function declaration.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Line number containing the start of the function declaration,
usually one line after the end of the template-argument-list.
Returns:
Set of type names, or empty set if this does not appear to have
any template parameters.
"""
# Find start of function
func_line = linenum
while func_line > 0:
line = clean_lines.elided[func_line]
if Match(r'^\s*$', line):
return set()
if line.find('(') >= 0:
break
func_line -= 1
if func_line == 0:
return set()
# Collapse template-argument-list into a single string
argument_list = ''
match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line])
if match:
# template-argument-list on the same line as function name
start_col = len(match.group(1))
_, end_line, end_col = CloseExpression(clean_lines, func_line, start_col)
if end_col > -1 and end_line == func_line:
start_col += 1 # Skip the opening bracket
argument_list = clean_lines.elided[func_line][start_col:end_col]
elif func_line > 1:
# template-argument-list one line before function name
match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1])
if match:
end_col = len(match.group(1))
_, start_line, start_col = ReverseCloseExpression(
clean_lines, func_line - 1, end_col)
if start_col > -1:
start_col += 1 # Skip the opening bracket
while start_line < func_line - 1:
argument_list += clean_lines.elided[start_line][start_col:]
start_col = 0
start_line += 1
argument_list += clean_lines.elided[func_line - 1][start_col:end_col]
if not argument_list:
return set()
# Extract type names
typenames = set()
while True:
match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$',
argument_list)
if not match:
break
typenames.add(match.group(1))
argument_list = match.group(2)
return typenames
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
typenames = GetTemplateArgs(clean_lines, linenum)
and_pos = len(match.group(1))
if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum, typenames):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs:
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z0-9_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, TEXT_TYPE):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif (include.endswith('.cc') and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/include', 4,
'Do not include .cc files from other packages')
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Don't warn on out-of-line method definitions, as we would warn on the
# in-line declaration, if it isn't marked with 'override'.
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
# [](int) -> bool {
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in range(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = list(include_dict.keys())
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
"""Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# A lambda introducer specifies a default capture if it starts with "[="
# or if it starts with "[&" _not_ followed by an identifier.
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# Found a potential error, check what comes after the lambda-introducer.
# If it's not open parenthesis (for lambda-declarator) or open brace
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4, # 4 = high confidence
'Default lambda captures are an unapproved C++ feature.')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Flag unapproved C++11 headers.
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if file_extension == 'h':
CheckForHeaderGuard(filename, clean_lines, error)
for line in range(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if file_extension == 'cc':
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
sys.stderr.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
backup_err = sys.stderr
try:
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReader(sys.stderr,
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
finally:
sys.stderr = backup_err
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| mbitsnbites/viewer | tools/cpplint.py | Python | bsd-2-clause | 242,295 |
import unittest
from troposphere.route53 import AliasTarget
class TestAliasTarget(unittest.TestCase):
def test_bucket_template(self):
AliasTarget("zone", "dnsname", True)
AliasTarget(hostedzoneid="zone", dnsname="dnsname", evaluatetargethealth=True)
AliasTarget(HostedZoneId="zone", DNSName="dnsname", EvaluateTargetHealth=True)
if __name__ == "__main__":
unittest.main()
| cloudtools/troposphere | tests/test_route53.py | Python | bsd-2-clause | 409 |
from app import app
if __name__ == "__main__":
app.run('0.0.0.0')
| toolmakers/ahoy | application/web/manage.py | Python | bsd-2-clause | 71 |
"""Leetcode 929. Unique Email Addresses
Easy
URL: https://leetcode.com/problems/unique-email-addresses/
Every email consists of a local name and a domain name, separated by the @ sign.
For example, in alice@leetcode.com, alice is the local name, and
leetcode.com is the domain name.
Besides lowercase letters, these emails may contain '.'s or '+'s.
If you add periods ('.') between some characters in the local name part of an
email address, mail sent there will be forwarded to the same address without
dots in the local name. For example, "alice.z@leetcode.com" and
"alicez@leetcode.com" forward to the same email address.
(Note that this rule does not apply for domain names.)
If you add a plus ('+') in the local name, everything after the first plus sign
will be ignored. This allows certain emails to be filtered, for example
m.y+name@email.com will be forwarded to my@email.com.
(Again, this rule does not apply for domain names.)
It is possible to use both of these rules at the same time.
Given a list of emails, we send one email to each address in the list.
How many different addresses actually receive mails?
Example 1:
Input: ["test.email+alex@leetcode.com","test.e.mail+bob.cathy@leetcode.com","testemail+david@lee.tcode.com"]
Output: 2
Explanation: "testemail@leetcode.com" and "testemail@lee.tcode.com" actually receive mails
Note:
- 1 <= emails[i].length <= 100
- 1 <= emails.length <= 100
- Each emails[i] contains exactly one '@' character.
- All local and domain names are non-empty.
- Local names do not start with a '+' character.
"""
class SolutionSplitReplace(object):
def numUniqueEmails(self, emails):
"""
:type emails: List[str]
:rtype: int
Time complexity: O(m*n), where
- m is the number of emails,
- n is the length of email address.
Space complexity: O(m*n).
"""
emails_set = set()
for email in emails:
# Split email by '@' into local and domain.
local, domain = email.split('@')
# Remove local's '.' and truncate chars after '+'.
trunc_local = local.replace('.', '').split('+')[0]
# Concat modified local and domain.
modified_email = '@'.join([trunc_local, domain])
emails_set.add(modified_email)
return len(emails_set)
def main():
# Output: 2
emails = ["test.email+alex@leetcode.com",
"test.e.mail+bob.cathy@leetcode.com",
"testemail+david@lee.tcode.com"]
print SolutionSplitReplace().numUniqueEmails(emails)
if __name__ == '__main__':
main()
| bowen0701/algorithms_data_structures | lc0929_unique_email_addresses.py | Python | bsd-2-clause | 2,625 |
"""
서비스 관리 도구 국제화 지원 모듈.
`django-modeltranslation` 을 이용하여 다국어 지원을 하는 모듈입니다.
"""
from modeltranslation.translator import TranslationOptions, register
from .models import Category, BaseService, Service, TopBanner
@register(Category)
class CategoryTranslationOptions(TranslationOptions):
"""
:class:`Category` 모델에 대한 국제화 지원.
"""
fields = ('name',)
@register(BaseService)
class BaseServiceTranslationOptions(TranslationOptions):
"""
:class:`Service` 모델에 대한 국제화 지원.
"""
fields = ('name',)
@register(Service)
class ServiceTranslationOptions(TranslationOptions):
"""
:class:`Service` 모델에 대한 국제화 지원.
"""
fields = ()
@register(TopBanner)
class TopBannerTranslationOptions(TranslationOptions):
"""
:class:`TopBanner` 모델에 대한 국제화 지원.
"""
fields = ('text',) | hangpark/kaistusc | apps/manager/translation.py | Python | bsd-2-clause | 965 |
# -*- coding: utf-8 -*-
import os
import platform
import pytest
from grep import print_helper
from grep import file_helper
from grep.grep import Searcher
from tests.helper_for_tests import *
def test_run_with_empty_str_not_regex_line_by_line(with_f_write):
with_f_write.write('docopt\nasdfwer')
with_f_write.seek(0)
caller_dir = os.path.dirname(with_f_write.name)
search_term = ''
is_regex_pattern = False
is_search_line_by_line = True
matched_files = Searcher.run(
Searcher(
caller_dir=caller_dir,
search_term=search_term,
specific_file='',
is_recursive=False,
is_abs_path=False,
is_regex_pattern=is_regex_pattern,
is_search_line_by_line=is_search_line_by_line,
is_from_stdin=False))
assert matched_files == [os.path.abspath(with_f_write.name)]
def test_run_with_empty_str_is_regex_line_by_line(with_f_write):
with_f_write.write('docopt\nasdfwer')
with_f_write.seek(0)
caller_dir = os.path.dirname(with_f_write.name)
search_term = ''
is_regex_pattern = True
is_search_line_by_line = True
matched_files = Searcher.run(
Searcher(
caller_dir=caller_dir,
search_term=search_term,
specific_file='',
is_recursive=False,
is_abs_path=False,
is_regex_pattern=is_regex_pattern,
is_search_line_by_line=is_search_line_by_line,
is_from_stdin=False))
assert matched_files == [os.path.abspath(with_f_write.name)]
def test_run_with_empty_str_not_regex_file_level(with_f_write):
with_f_write.write('docopt\nasdfwer')
with_f_write.seek(0)
caller_dir = os.path.dirname(with_f_write.name)
search_term = ""
is_regex_pattern = False
is_search_line_by_line = False
matched_files = Searcher.run(
Searcher(
caller_dir=caller_dir,
search_term=search_term,
specific_file='',
is_recursive=False,
is_abs_path=False,
is_regex_pattern=is_regex_pattern,
is_search_line_by_line=is_search_line_by_line,
is_from_stdin=False))
assert matched_files == [os.path.abspath(with_f_write.name)]
def test_run_with_empty_str_is_regex_file_level(with_f_write):
with_f_write.write('docopt\nasdfwer')
with_f_write.seek(0)
caller_dir = os.path.dirname(with_f_write.name)
search_term = ""
is_regex_pattern = True
is_search_line_by_line = False
matched_files = Searcher.run(
Searcher(
caller_dir=caller_dir,
search_term=search_term,
specific_file='',
is_recursive=False,
is_abs_path=False,
is_regex_pattern=is_regex_pattern,
is_search_line_by_line=is_search_line_by_line,
is_from_stdin=False))
assert matched_files == [os.path.abspath(with_f_write.name)]
@pytest.mark.skipif("platform.system() == 'Windows'")
def test_ioerror_due_to_restricted_file(with_restricted_file):
caller_dir = with_restricted_file
Searcher.run(
Searcher(
caller_dir=caller_dir,
search_term="",
specific_file='',
is_recursive=False,
is_abs_path=False,
is_regex_pattern=False,
is_search_line_by_line=True,
is_from_stdin=False))
def test_regular_expression_error_file_level(with_f_read):
search_term = "[\\]"
is_regex_pattern = True
is_search_line_by_line = False
f = with_f_read.name
Searcher.search_f(
Searcher(
caller_dir='',
specific_file='',
search_term=search_term,
is_recursive=False,
is_abs_path=False,
is_regex_pattern=is_regex_pattern,
is_search_line_by_line=is_search_line_by_line,
is_from_stdin=False), f)
def test_regular_expression_error_line_by_line(with_f_read):
search_term = "[\\]"
is_regex_pattern = True
is_search_line_by_line = True
f = with_f_read.name
# Directory option is irrelevant for the test.
Searcher.search_f(
Searcher(
caller_dir='',
search_term=search_term,
specific_file='',
is_recursive=False,
is_abs_path=False,
is_regex_pattern=is_regex_pattern,
is_search_line_by_line=is_search_line_by_line,
is_from_stdin=False), f)
| florianbegusch/simple_grep | tests/test_grep_edge_cases.py | Python | bsd-2-clause | 4,515 |
# --------------------------------------------------------
# Theano @ Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import sys
import copy
import numpy as np
from collections import OrderedDict
from six.moves import xrange
import dragon.core.mpi as mpi
import dragon.core.workspace as ws
import dragon.protos.dragon_pb2 as pb
from dragon.core.utils import MakeArgument
from dragon.core.gradient_maker import GraphGradientMaker
from dragon.core.scope import GetOperatorName, GetTensorName
from dragon.core.tensor import Tensor
def GraphDef_Grad(meta_graph, targets):
"""Inject the gradient targets into GraphDef.
Parameters
----------
meta_graph : dragon_pb2.GraphDef
The definition of meta graph.
targets : list
The solving targets.
Returns
-------
None
See Also
--------
`T.grad(*args, **kwargs)`_ - How the generate gradient targets.
"""
all_pairs = set()
for target in targets:
for wrt in target.grad_wrts:
all_pairs.add((target.name, wrt))
for pair in all_pairs:
g_target = pb.GradientTarget()
g_target.cost = str(pair[0])
g_target.wrt = str(pair[1])
meta_graph.g_target.extend([g_target])
def GraphDef_Phase(meta_graph, targets):
"""Inject the phase into GraphDef.
If existing gradients, we assume it should be ``TRAIN``, and vice versa.
Parameters
----------
meta_graph : dragon_pb2.GraphDef
The definition of meta graph.
targets : list
The solving targets.
Returns
-------
None
"""
phase = 'TEST'
from dragon.core.scope import _PHASE_SCOPE
if _PHASE_SCOPE != '':
phase = _PHASE_SCOPE.upper()
else:
for target in targets:
if len(target.grad_wrts) > 0:
phase = 'TRAIN'
break
meta_graph.arg.extend([MakeArgument('phase', phase)])
def GraphDef_Update(meta_graph, updater):
"""Inject the update targets into GraphDef.
The ``updater`` should generate update targets before.
Parameters
----------
meta_graph : dragon_pb2.GraphDef
The definition of meta graph.
updater : BaseUpdater
The updater.
Returns
-------
None
"""
if updater is None: return
updater._prefix = meta_graph.name + '_'
extra_arguments = updater._extra_kwargs
extra_arguments['domain'] = updater._prefix
parallel_arguments = {}
# wrap hyper-parameters as Tensor for CC
for k, v in updater._hyper_params.items():
ws.FeedTensor(updater._prefix + k, np.array([v], dtype=np.float32))
# check data parallel if necessary
if mpi.Is_Init():
idx, group = mpi.AllowParallel()
if idx != -1:
parallel_arguments['parallel_mode'] = mpi.GetParallelMode()
parallel_arguments['comm'], parallel_arguments['group'] \
= mpi.CreateGroup(root=group[0], incl=group)
parallel_arguments['root'] = group[0]
for k, v in parallel_arguments.items():
meta_graph.arg.add().CopyFrom(MakeArgument(k, v))
for tuple in updater._tuples:
tensors = tuple[0];
arguments = tuple[1]
kwargs = dict(arguments, **extra_arguments)
u_target = pb.UpdateTarget()
u_target.type = updater._type
_, u_target.name = GetOperatorName()
for tensor in tensors:
u_target.tensor.append(tensor)
for k, v in kwargs.items():
u_target.arg.add().CopyFrom(MakeArgument(k, v))
meta_graph.u_target.extend([u_target])
def GraphDef_Opt(meta_graph):
"""Inject the optimization options into GraphDef.
Parameters
----------
meta_graph : dragon_pb2.GraphDef
The definition of meta graph.
Returns
-------
None
References
----------
`config.SetDebugMode(*args, **kwargs)`_ - How the enable debug mode.
`memonger.share_grads(*args, **kwargs)`_ - How the enable gradients sharing.
"""
from dragon.config import option
meta_graph.debug_mode = option['debug_mode']
meta_graph.share_grads = option['share_grads']
def GraphDef_Device(meta_graph):
"""Inject the device option into GraphDef.
Parameters
----------
meta_graph : dragon_pb2.GraphDef
The definition of meta graph.
Returns
-------
None
References
----------
`config.EnableCPU()`_ - How to use CPU device.
`config.EnableCUDA(*args, **kwargs)`_ - How to use CUDA device.
`config.SetRandomSeed(*args, **kwargs)`_ - How to set random seed.
"""
from dragon.config import option
if option['device'] is not 'None':
supports = {'CPU': 0, 'CUDA': 1}
device_option = pb.DeviceOption()
device_option.device_type = supports[option['device']]
device_option.gpu_id = option['gpu_id']
device_option.random_seed = option['random_seed']
if option['use_cudnn']: device_option.engine = 'CUDNN'
meta_graph.device_option.CopyFrom(device_option)
def function(inputs=None, outputs=None, givens=None, updater=None):
"""Return a callable function that will compute ``outputs`` or apply ``updater``.
Set ``inputs`` to feed inputs into this callable function.
Set ``givens`` to substitute some tensors before making the computation graph.
Set ``updater`` to make update graph, but the update targets should be generated before.
Parameters
----------
inputs : Tensor, list of Tensor or None
The inputs to feed.
outputs : Tensor, list of Tensor or None
The outputs to solve.
givens : dict or None
The substitutions to use.
updater : BaseUpdater
The updater to use.
Returns
-------
function
The callable function.
Examples
--------
>>> x = Tensor('x').Variable()
>>> y = x * 2
>>> f = theano.function(outputs=y)
>>> x.set_value(np.ones((2, 3), dtype=np.float32))
>>> print(f())
>>> [[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> f = theano.function(inputs=x, outputs=y)
>>> print(f(np.ones((2, 3), dtype=np.float32)))
>>> [[ 2. 2. 2.]
[ 2. 2. 2.]]
"""
if not isinstance(inputs, list):
if inputs is None:
inputs = []
else:
inputs = [inputs]
if not isinstance(outputs, list):
if outputs is None:
outputs = []
else:
outputs = [outputs]
if len(outputs) > 0 and updater is not None:
raise RuntimeError('You can specific either outputs or updater, not both.')
all_exprs = {};
all_extra_targets = set()
if not isinstance(outputs, list): outputs = [outputs]
meta_graph = pb.GraphDef()
meta_graph.name = 'Graph_' + str(ws.CURRENT_GRAPH_IDX)
ws.CURRENT_GRAPH_IDX += 1
# extract operators and targets from expressions
existing_grads = False
for output in outputs:
meta_graph.target.extend([output.name])
if sys.version_info >= (3, 0):
all_exprs = OrderedDict(all_exprs, **output.expressions)
else:
all_exprs = dict(all_exprs, **output.expressions)
all_extra_targets = all_extra_targets.union(output.extra_targets)
if len(output.grad_wrts) > 0: existing_grads = True
# we should sort out the topology of these operators before using
all_exprs = sorted(all_exprs.items(), key=lambda d: d[0])
forward_ops = copy.deepcopy([v for k, v in all_exprs])
# handle givens
if givens is not None:
name_dict = {}
external_input_exprs = {}
for old_tenosr, new_tensor in givens.items():
if isinstance(new_tensor, Tensor):
name_dict[old_tenosr.name] = new_tensor._name
if sys.version_info >= (3, 0):
external_input_exprs = OrderedDict(external_input_exprs, **new_tensor.expressions)
else:
external_input_exprs = dict(external_input_exprs, **new_tensor.expressions)
external_input_exprs = OrderedDict(sorted(external_input_exprs.items(), key=lambda A: A[0]))
elif isinstance(new_tensor, np.ndarray):
ws.FeedTensor(new_tensor, GetTensorName())
all_extra_targets = all_extra_targets.union(new_tensor.extra_targets)
external_input_ops = [v for k, v in external_input_exprs.items()]
for op in forward_ops:
op.input.extend([name_dict[input] if input in name_dict
else input for input in op.input])
del op.input[:int(len(op.input) / 2)]
forward_ops = external_input_ops + forward_ops
# handle grads
if existing_grads:
targets = [output.name for output in outputs]
targets.extend(all_extra_targets)
forward_ops, grad_ops = GraphGradientMaker.Make(forward_ops, targets)
else:
grad_ops = []
# Write Ops
meta_graph.op.extend(forward_ops + grad_ops)
# Write Extra Targets
for extra_target in all_extra_targets:
meta_graph.target.extend([extra_target])
# Write Misc
if len(outputs) > 0:
GraphDef_Device(meta_graph)
GraphDef_Opt(meta_graph)
GraphDef_Grad(meta_graph, outputs)
GraphDef_Phase(meta_graph, outputs)
elif updater is not None:
GraphDef_Device(meta_graph)
GraphDef_Opt(meta_graph)
GraphDef_Update(meta_graph, updater)
# call c api to create graph
ws.CreateGraph(meta_graph)
# return a lambda point to run this graph
return lambda *args, **kwargs: \
ws.RunGraph(meta_graph.name, (inputs, args), outputs, **kwargs)
def eval(self, feed_dict=None):
if not hasattr(self, '_eval_func'):
if feed_dict is not None:
self._eval_func = function(inputs=feed_dict.keys(), outputs=self)
else:
self._eval_func = function(outputs=self)
# cond.1: run by feeding
if feed_dict is not None:
# checking
for key, value in feed_dict.items():
if not isinstance(key, Tensor):
raise TypeError('The key of feed_dict key should be a Tensor.')
if key.shape is not None:
if len(key.shape) != len(value.shape):
raise RuntimeError('The Tensor({}) was limited to {} dimensions, \
while feed a value with {} dimensions.'.
format(key.name, len(key.shape), len(value.shape)))
for i in xrange(len(key.shape)):
if key.shape[i] is None: continue
if key.shape[i] != value.shape[i]:
raise RuntimeError('The shape of Tensor({}) was limited as ('.format(key.name) +
','.join([str(dim) for dim in key.shape]) + '), ' +
'while feed a value with (' + ','.join([str(dim) for dim in value.shape]) + ').')
return self._eval_func(*feed_dict.values())
else:
# cond.2: run without feeding
return self._eval_func()
Tensor.eval = eval | neopenx/Dragon | Dragon/python/dragon/vm/theano/compile/function.py | Python | bsd-2-clause | 11,266 |
# Copyright 2008 Owen Taylor
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
from ConfigParser import RawConfigParser, ParsingError
import os
import time
def format_duration(past):
if past < 60: # Sanity ... a date before 1972
return ""
now = time.time()
diff = now - past
if diff <= 0:
return ""
elif diff < 90:
return "A minute ago"
elif diff < 60 * 60:
return "%.0f minutes ago" % (diff / 60.)
elif diff < 24 * 60 * 60:
return "%.0f hours ago" % (diff / (60. * 60.))
time_struct = time.localtime(now)
day_start = time.mktime((time_struct[0], time_struct[1], time_struct[2], 0, 0, 0, time_struct[6], time_struct[7], time_struct[8]))
diff_days = (day_start - past) / (60. * 60. * 24.)
if diff_days < 1:
return "Yesterday"
elif diff_days < 7:
return "%.0f days ago" % (diff_days)
elif diff_days < 10.5:
return "1 week ago"
elif diff_days < 30:
return "%.0f weeks ago" % (diff_days / 7.)
elif diff_days < 45:
return "1 month ago"
elif diff_days < 365:
return "%.0f months ago" % (diff_days / 30.)
elif diff_days < 547.5:
return "1 year ago"
else:
return "%.0f years ago" % (diff_days / 365.)
class NotebookInfo(object):
def __init__(self, folder):
self.folder = folder
self.__load()
def __load(self):
self.__parser = RawConfigParser()
# Fallback with the modtime of the folder as "last_modified"
st = os.stat(self.folder)
self.__parser.add_section('Notebook')
self.__parser.set('Notebook', 'last_modified', str(st.st_mtime))
index_file = os.path.join(self.folder, "index.rnb")
try:
f = open(index_file, "r")
except IOError, e:
# If not readable, just ignore
return
try:
self.__parser.readfp(f)
except ParsingError:
# If not readable, just ignore
return
finally:
f.close()
def __save(self):
self.__parser.set('Notebook', 'last_modified', str(time.time()))
index_file = os.path.join(self.folder, "index.rnb")
f = open(index_file, "w")
self.__parser.write(f)
f.close()
def update_last_modified(self):
# last_modified is updated to the current time every time we save
self.__save()
@property
def last_modified(self):
if self.__parser.has_option('Notebook', 'last_modified'):
return self.__parser.getfloat('Notebook', 'last_modified')
return os.path.basename(self.folder)
@property
def last_modified_text(self):
return format_duration(self.last_modified)
@property
def name(self):
return os.path.basename(self.folder)
def __get_description(self):
if self.__parser.has_option('Notebook', 'description'):
return self.__parser.get('Notebook', 'description')
else:
return ""
def __set_description(self, description):
self.__parser.set('Notebook', 'description', description)
self.__save()
description = property(__get_description, __set_description)
| rschroll/reinteract | lib/reinteract/notebook_info.py | Python | bsd-2-clause | 3,421 |
#
# Candelabra
#
# Copyright Alvaro Saurin 2013 - All right Reserved
#
"""
Plugins infrastructure for Candelabra..
You can add a plugin by defining an entry-point in your software distribution. For example, for a provider
for VMware, you should define an entry point like this in your `setup.py` file:
>>> entry_points={
>>> 'candelabra.provider': [
>>> 'vmware_provider = candelabra_vmware.plugin:register_me',
>>> ]
>>> }
Then, in your candelabra_vmware/plugin.py, there must be a register_me function like this:
>>> from candelabra.plugins import ProviderPlugin
>>>
>>> class VMWareProvider(ProviderPlugin):
>>> MACHINE = VMWareMachine
>>>
>>> provider_instance = VMWareProvider()
>>>
>>> def register_me(registry_instance):
>>> registry_instance.register('vmware', provider_instance)
"""
from logging import getLogger
import os
import sys
import pkg_resources
from candelabra.config import config
from candelabra.constants import CFG_DEFAULT_PROVIDER
from candelabra.errors import TopologyException, ComponentNotFoundException
logger = getLogger(__name__)
########################################################################################################################
class PluginsRegistry(object):
""" A registry for plugins
"""
def __init__(self):
self.plugins = {}
def register(self, name, plugin):
if self.validate(plugin):
self.plugins[name] = plugin
def validate(self, plugin):
return True
@property
def names(self):
return self.plugins.keys()
PLUGINS_REGISTRIES = {
'candelabra.provider': PluginsRegistry(),
'candelabra.provisioner': PluginsRegistry(),
'candelabra.guest': PluginsRegistry(),
'candelabra.command': PluginsRegistry(),
'candelabra.communicator': PluginsRegistry(),
}
def register_all():
""" Register all plugins we can find in the system.
For each plugin, we will invoke the entry point with a :class:`PluginsRegistry` instance as the
only parameter. Then, the entry point must call the instance :meth:`register: method in order
to register it...
"""
logger.debug('registering all plugins')
for family_name, registry_instance in PLUGINS_REGISTRIES.iteritems():
logger.debug('... looking for plugins for %s', family_name)
for entrypoint in pkg_resources.iter_entry_points(group=family_name):
# Grab the function that is the actual plugin.
plugin_entrypoint = entrypoint.load()
# Call the plugin with the data
plugin_entrypoint(registry_instance)
########################################################################################################################
class CommandPlugin(object):
""" A command from command line
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
def run(self, args, command):
""" Run the command
"""
raise NotImplementedError('must be implemented')
def run_with_topology(self, args, topology_file, command=None, save_state=True):
""" Run a command, managing the topology
"""
if command:
logger.info('running command "%s"', command)
if topology_file is None:
from candelabra.topology.root import guess_topology_file
topology_file = guess_topology_file()
if topology_file is None:
logger.critical('no topology file provided')
sys.exit(1)
if not os.path.exists(topology_file):
logger.critical('topology file %s does not exist', topology_file)
sys.exit(1)
from candelabra.errors import TopologyException, ProviderNotFoundException, CandelabraException
from candelabra.scheduler.base import TasksScheduler
# load the topology file and create a tree
try:
from candelabra.topology.root import TopologyRoot
topology = TopologyRoot()
topology.load(topology_file)
except TopologyException, e:
logger.critical(str(e))
sys.exit(1)
except ProviderNotFoundException, e:
logger.critical(str(e))
sys.exit(1)
except KeyboardInterrupt:
logger.critical('interrupted with Ctrl-C... bye!')
sys.exit(0)
scheduler = None
try:
if command:
scheduler = TasksScheduler()
tasks = topology.get_tasks(command)
assert all(isinstance(t, tuple) for t in tasks)
scheduler.append(tasks)
scheduler.run()
except CandelabraException:
raise
except KeyboardInterrupt:
raise CandelabraException('interrupted with Ctrl-C... bye!')
except Exception, e:
logger.critical('uncaught exception')
raise
finally:
if save_state:
if scheduler and scheduler.num_completed > 0:
topology.state.save()
return topology
class ProviderPlugin(object):
""" A provider
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
MACHINE = None # the machine class that will be instantiated for each definition in the topology
APPLIANCE = None # the appliance class that will be instantiated for each definition in the topology
INTERFACE = None
SHARED = None
COMMUNICATORS = None
class ProvisionerPlugin(object):
""" A provisioner
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
PROVISIONER = None # the provisioner class that will be instantiated for each machine
def run(self, command):
""" Run a command
"""
raise NotImplementedError('must be implemented')
class GuestPlugin(object):
""" A guest definition
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
class CommunicatorPlugin(object):
""" A communicator
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
ONLY_PROVIDERS = []
COMMUNICATOR = None # the communicator class that will be instantiated for each machine
########################################################################################################################
def get_provider(name):
""" Get a ProviderPlugin for a given name
"""
return PLUGINS_REGISTRIES['candelabra.provider'].plugins[name]
def _get_provider_class_from_dict(**kwargs):
""" Get a a provider class from a dictionary
"""
for name in ['class', 'cfg_class', '_class']:
if name in kwargs:
return kwargs[name]
for alternative in ['_container', '_parent']:
if alternative in kwargs:
alternative_attr = kwargs[alternative]
if alternative_attr:
for name in ['class', 'cfg_class', '_class']:
if hasattr(alternative_attr, name):
return getattr(alternative_attr, name)
return config.get_key(CFG_DEFAULT_PROVIDER)
def _get_instance_from_plugin(_family, _attr, _name, **kwargs):
_class_name = _get_provider_class_from_dict(**kwargs).lower()
if not _class_name:
raise TopologyException('internal: no %s class available' % (_name))
try:
_class = getattr(PLUGINS_REGISTRIES[_family].plugins[_class_name], _attr)
except KeyError, e:
m = 'cannot build a %s of class "%s"' % (_name, _class_name)
logger.warning(m)
raise ComponentNotFoundException(m)
return _class(**kwargs)
def build_machine_instance(**kwargs):
""" The factory for machine that returns a subclass fo MachineNode with the right node
"""
return _get_instance_from_plugin('candelabra.provider', 'MACHINE', 'communicator', **kwargs)
def build_provisioner_instance(**kwargs):
""" The factory for provisioners that returns a subclass fo Provisioner with the right node
"""
return _get_instance_from_plugin('candelabra.provisioner', 'PROVISIONER', 'provisioner', **kwargs)
def build_shared_instance(**kwargs):
""" The factory for shared folders that returns a subclass fo SharedNode with the right node
"""
return _get_instance_from_plugin('candelabra.provider', 'SHARED', 'shared folder', **kwargs)
def build_network_instance(**kwargs):
""" The factory for networks that returns a subclass fo NetworkNode with the right node
"""
return _get_instance_from_plugin('candelabra.provider', 'NETWORK', 'network', **kwargs)
def build_interface_instance(**kwargs):
""" The factory for interfaces that returns a subclass fo InterfaceNode with the right node
"""
return _get_instance_from_plugin('candelabra.provider', 'INTERFACE', 'interface', **kwargs)
def build_guest_instance(**kwargs):
""" The factory for guests that returns a subclass fo Guest with the right node
"""
return _get_instance_from_plugin('candelabra.guest', 'GUEST', 'guest', **kwargs)
def build_communicator_instance(**kwargs):
""" The factory for communicator that returns a subclass fo Communicator with the right node
"""
return _get_instance_from_plugin('candelabra.communicator', 'COMMUNICATOR', 'communicator', **kwargs)
| inercia/candelabra | candelabra/plugins.py | Python | bsd-2-clause | 9,282 |
"""
Cache interface for Google Cloud Store
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from . import Cache
from s3 import S3Cache
from . import copy_file_or_flo, get_logger
import os
global_logger = get_logger(__name__)
# logger.setLevel(logging.DEBUG)
class GcsCache(S3Cache):
'''A cache that transfers files to and from GCS
'''
def __init__(
self,
bucket=None,
prefix=None,
account=None,
upstream=None,
**kwargs):
'''Init a new S3Cache Cache
'''
from boto.gs.connection import GSConnection
super(
S3Cache,
self).__init__(
upstream=upstream) # Skip parent __init__
self.is_remote = False
self.access_key = account['access']
self.secret = account['secret']
self.project = account['project']
self.bucket_name = bucket
self.prefix = prefix
self.conn = GSConnection(self.access_key, self.secret, is_secure=False)
self.bucket = self.conn.get_bucket(self.bucket_name)
def __repr__(self):
return "GcsCache: bucket={} prefix={} access={} ".format(
self.bucket, self.prefix, self.access_key, self.upstream)
| CivicKnowledge/ckcache | ckcache/gcs.py | Python | bsd-2-clause | 1,357 |
#coding: utf-8
from sua import *
data = {
'order': latin['order'],
'order_triple': latin['order_triple'],
'plural': {
'suffix': {'':'s'}, # Noun classes, key-length = 1
},
'conjugate': {
'suffix': {
'ar': {
'inf': 'ar',
'tdy': {JE:'o', TU:'as', ELLE:'a', IT:'a', IL:'a', NOUS:'amos', VOUS:'avos', ILS:'an'},
'tmw': {JE:u'aré', TU:'arás', ELLE:u'ará', IT:u'ará', IL:u'ará', NOUS:'aremos', VOUS:'aréis', ILS:'arán'},
'ydy': {JE:u'é', TU:'aste', ELLE:u'ó', IT:u'ó', IL:u'ó', NOUS:'amos', VOUS:'asteis', ILS:'aron'},
},
},
},
'subs': [
('[a]', ''),
('_', ''),
('la libro', 'el libro'),
],
}
| kasahorow/kwl | data/sua_espanyol.py | Python | bsd-2-clause | 713 |
#! /usr/bin/env python
import abc
def ident(x):
"""
Identity function - Just returns whatever is passed in.
'id' is a built-in function, in Python, which doesn't give what we
want.
"""
return x
def compose(*functions):
"""
Function composition for arbitrary number of inputs.
Taken from:
https://mathieularose.com/function-composition-in-python/#solution
"""
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
class Bifunctor(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def bimap(self, g, h):
pass
class Eq(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def equals(self, other):
pass
class Pair(Bifunctor, Eq):
'Bifunctorial Product.'
def __init__(self, x, y):
self._x = x
self._y = y
def GetX(self):
return self._x
def SetX(self, val):
self._x = val
x = property(GetX, SetX)
def GetY(self):
return self._y
def SetY(self, val):
self._y = val
y = property(GetY, SetY)
def bimap(self, g, h):
return Pair(g(self.x), h(self.y))
def equals(self, other):
if(self.x == other.x and self.y == other.y):
return True
else:
return False
class K2(Bifunctor, Eq):
'Bifunctorial constant.'
def __init__(self, val):
self._val = val
def GetVal(self):
return self._val
def SetVal(self, val):
self._val = val
val = property(GetVal, SetVal)
def bimap(self, g, h):
return self
def equals(self, other):
if(self.val == other.val):
return True
else:
return False
class Fst(Bifunctor, Eq):
'First applicative bifunctor.'
def __init__(self, val):
self._val = val
def GetVal(self):
return self._val
def SetVal(self, val):
self._val = val
val = property(GetVal, SetVal)
def bimap(self, g, h):
return Fst(g(self.val))
def equals(self, other):
if(self.val == other.val):
return True
else:
return False
class Snd(Bifunctor, Eq):
'Second applicative bifunctor.'
def __init__(self, val):
self._val = val
def GetVal(self):
return self._val
def SetVal(self, val):
self._val = val
val = property(GetVal, SetVal)
def bimap(self, g, h):
return Snd(h(self.val))
def equals(self, other):
if(self.val == other.val):
return True
else:
return False
def g1(x):
return x + 1
def g2(x):
return x + 2
def h1(x):
return x * 2
def h2(x):
return x * 3
def main():
assert Pair(1, 2).bimap(ident, ident).equals(ident(Pair(1, 2)))
assert Pair(1, 2).bimap(g1, h1).bimap(g2, h2).equals(Pair(1, 2).bimap(compose(g1, g2), compose(h1, h2)))
assert K2(1).bimap(ident, ident).equals(ident(K2(1)))
assert K2(1).bimap(g1, h1).bimap(g2, h2).equals(K2(1).bimap(compose(g1, g2), compose(h1, h2)))
assert Fst(1).bimap(ident, ident).equals(ident(Fst(1)))
assert Fst(1).bimap(g1, h1).bimap(g2, h2).equals(Fst(1).bimap(compose(g1, g2), compose(h1, h2)))
assert Snd(1).bimap(ident, ident).equals(ident(Snd(1)))
assert Snd(1).bimap(g1, h1).bimap(g2, h2).equals(Snd(1).bimap(compose(g1, g2), compose(h1, h2)))
print "All assertions passed."
if(__name__ == '__main__'):
main()
| capn-freako/Haskell_Misc | Category_Theory_for_Programmers/bifunctor.py | Python | bsd-3-clause | 3,475 |
##
# @package RAMS.NXT
# @file NXTLightSensor.py
# @author Brian Kim
# @date 7/24/14
# @brief a python wrapper around the LightSensorAssembly to interface with
# the NXT light sensors
from NXTSensor import NXTSensor
class NXTLightSensor( NXTSensor ):
def getIntensity( self ):
light = self.asm()
return light.getIntensity()[0]
def getData( self ):
return self.getIntensity()
| briansan/rams | RAMS/nxt/NXTLightSensor.py | Python | bsd-3-clause | 415 |
import os
import tempfile
import pytest
from astropy.io.fits.tests import FitsTestCase
from stsci.tools import fileutil as F
from stsci.tools import stpyfits
class TestIsFits(FitsTestCase):
def setup(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.temp_dir = tempfile.mkdtemp(prefix='isfits-test-')
def test_isFits_fname(self):
assert F.isFits(self.data('cdva2.fits')) == (True, 'simple')
assert F.isFits(self.data('o4sp040b0_raw.fits')) == (True, 'mef')
assert F.isFits(self.data('waivered.fits')) == (True, 'waiver')
# isFits only verifies files with valid FITS extensions (.fits,...)
# Does not matter if file does not exist.
assert F.isFits(self.data('simple')) == (False, None)
# But if it has FITS extension, should raise error if nonexistent.
with pytest.raises(IOError):
F.isFits('isfits/no_such_file.fits')
def test_isFits_file_object(self):
with stpyfits.open(self.data('cdva2.fits')) as f:
assert F.isFits(f) == (True, 'simple')
with stpyfits.open(self.data('o4sp040b0_raw.fits')) as f:
assert F.isFits(f) == (True, 'mef')
with stpyfits.open(self.data('waivered.fits')) as f:
assert F.isFits(f) == (True, 'waiver')
| spacetelescope/stsci.tools | lib/stsci/tools/tests/test_isfits.py | Python | bsd-3-clause | 1,326 |
"""empty message
Revision ID: 232a843e636b
Revises: 427922082575
Create Date: 2015-10-26 14:35:17.299786
"""
# revision identifiers, used by Alembic.
revision = '232a843e636b'
down_revision = '427922082575'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('patient_referral_comment',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('patient_referral_id', sa.Integer(), nullable=True),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column('last_modified_by_id', sa.Integer(), nullable=True),
sa.Column('created_by_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['created_by_id'], ['app_user.id'], name='fk_created_by_id', use_alter=True),
sa.ForeignKeyConstraint(['last_modified_by_id'], ['app_user.id'], name='fk_last_modified_by_id', use_alter=True),
sa.ForeignKeyConstraint(['patient_referral_id'], ['patient_referral.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'patient_screening_result', sa.Column('patient_referral_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'patient_screening_result', 'patient_referral', ['patient_referral_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'patient_screening_result', type_='foreignkey')
op.drop_column(u'patient_screening_result', 'patient_referral_id')
op.drop_table('patient_referral_comment')
### end Alembic commands ###
| codeforamerica/rva-screening | migrations/versions/232a843e636b_.py | Python | bsd-3-clause | 1,712 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'ppurge'.
"""
from primaires.interpreteur.commande.commande import Commande
class CmdPpurge(Commande):
"""Commande 'ppurge'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "ppurge", "ppurge")
self.groupe = "administrateur"
self.nom_categorie = "batisseur"
self.schema = "<ident_prototype_pnj>"
self.aide_courte = "retire un PNJ de la salle"
self.aide_longue = \
"Cette commande permet de faire disparaître un PNJ " \
"de la salle. Vous devez préciser en paramètre l'identifiant " \
"du prototype et tous les PNJ de ce prototype présents " \
"dans la salle seront effacés. Cette suppression est " \
"définitive. Le prototype ne sera, en revanche, pas supprimé."
def interpreter(self, personnage, dic_masques):
"""Interprétation de la commande"""
prototype = dic_masques["ident_prototype_pnj"].prototype
salle = personnage.salle
nb_det = 0
for personnage in salle.personnages:
if hasattr(personnage, "prototype") and personnage.prototype is \
prototype:
salle.retirer_personnage(personnage)
personnage.salle = None
importeur.pnj.supprimer_PNJ(personnage.identifiant)
nb_det += 1
if nb_det == 1:
personnage << "{} PNJ a été retiré de cette salle.".format(
nb_det)
elif nb_det > 1:
personnage << "{} PNJs ont été retirés de cette salle.".format(
nb_det)
else:
personnage << "|att|Aucun PNJ modelé sur ce prototype n'est " \
"présent dans cette salle.|ff|"
| vlegoff/tsunami | src/primaires/pnj/commandes/ppurge/__init__.py | Python | bsd-3-clause | 3,426 |
"""
PowerSwitch graphics tests
"""
cases = [
('Generate FSM from PowerSwitch model program',
'pma PowerSwitch'),
('Generate dot graphics commands from generated PowerSwitchFSM',
'pmg PowerSwitchFSM'),
('Generate SVG file from dot commands',
'dotsvg PowerSwitchFSM'),
('Generate dot commands from SpeedControl FSM',
'pmg SpeedControl'),
('Generate SVG file from dot commands',
'dotsvg SpeedControl'),
('Generate FSM from composition of PowerSwitch and SpeedControl, show interleaving',
'pma SpeedControl PowerSwitch -o PowerSpeed'),
('Generate dot commands from composed FSM',
'pmg PowerSpeed'),
('Generate SVG from dot',
'dotsvg PowerSpeed')
# Now you can display PowerSwitch.svg, SpeedControl.svg and PowerSpeed.svg
# in three browser tabs
]
| jon-jacky/PyModel | samples/PowerSwitch/test_graphics.py | Python | bsd-3-clause | 843 |
#!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
from setuptools.extension import Extension
except ImportError:
raise RuntimeError('setuptools is required')
import versioneer
DESCRIPTION = ('A set of functions and classes for simulating the ' +
'performance of photovoltaic energy systems.')
LONG_DESCRIPTION = """
PVLIB Python is a community supported tool that provides a set of
functions and classes for simulating the performance of photovoltaic
energy systems. PVLIB Python was originally ported from the PVLIB MATLAB
toolbox developed at Sandia National Laboratories and it implements many
of the models and methods developed at the Labs. More information on
Sandia Labs PV performance modeling programs can be found at
https://pvpmc.sandia.gov/. We collaborate with the PVLIB MATLAB project,
but operate independently of it.
We need your help to make pvlib-python a great tool!
Documentation: http://pvlib-python.readthedocs.io
Source code: https://github.com/pvlib/pvlib-python
"""
DISTNAME = 'pvlib'
LICENSE = 'BSD 3-Clause'
AUTHOR = 'pvlib python Developers'
MAINTAINER_EMAIL = 'holmgren@email.arizona.edu'
URL = 'https://github.com/pvlib/pvlib-python'
INSTALL_REQUIRES = ['numpy >= 1.16.0',
'pandas >= 0.22.0',
'pytz',
'requests',
'scipy >= 1.2.0',
'h5py']
# include dataclasses as a dependency only on python 3.6
if sys.version_info.major == 3 and sys.version_info.minor == 6:
INSTALL_REQUIRES.append('dataclasses')
TESTS_REQUIRE = ['nose', 'pytest', 'pytest-cov', 'pytest-mock',
'requests-mock', 'pytest-timeout', 'pytest-rerunfailures',
'pytest-remotedata']
EXTRAS_REQUIRE = {
'optional': ['cython', 'ephem', 'netcdf4', 'nrel-pysam', 'numba',
'pvfactors', 'siphon', 'statsmodels',
'cftime >= 1.1.1'],
'doc': ['ipython', 'matplotlib', 'sphinx == 3.1.2',
'pydata-sphinx-theme == 0.8.0', 'sphinx-gallery',
'docutils == 0.15.2', 'pillow', 'netcdf4', 'siphon',
'sphinx-toggleprompt >= 0.0.5'],
'test': TESTS_REQUIRE
}
EXTRAS_REQUIRE['all'] = sorted(set(sum(EXTRAS_REQUIRE.values(), [])))
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
]
setuptools_kwargs = {
'zip_safe': False,
'scripts': [],
'include_package_data': True,
'python_requires': '>=3.6'
}
PROJECT_URLS = {
"Bug Tracker": "https://github.com/pvlib/pvlib-python/issues",
"Documentation": "https://pvlib-python.readthedocs.io/",
"Source Code": "https://github.com/pvlib/pvlib-python",
}
# set up pvlib packages to be installed and extensions to be compiled
PACKAGES = ['pvlib']
extensions = []
spa_sources = ['pvlib/spa_c_files/spa.c', 'pvlib/spa_c_files/spa_py.c']
spa_depends = ['pvlib/spa_c_files/spa.h']
spa_all_file_paths = map(lambda x: os.path.join(os.path.dirname(__file__), x),
spa_sources + spa_depends)
if all(map(os.path.exists, spa_all_file_paths)):
print('all spa_c files found')
PACKAGES.append('pvlib.spa_c_files')
spa_ext = Extension('pvlib.spa_c_files.spa_py',
sources=spa_sources, depends=spa_depends)
extensions.append(spa_ext)
else:
print('WARNING: spa_c files not detected. ' +
'See installation instructions for more information.')
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=PACKAGES,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=TESTS_REQUIRE,
ext_modules=extensions,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
project_urls=PROJECT_URLS,
classifiers=CLASSIFIERS,
**setuptools_kwargs)
| pvlib/pvlib-python | setup.py | Python | bsd-3-clause | 4,223 |
"""The user resource."""
from __future__ import unicode_literals
from . import Resource
class User(Resource):
"""An authorized user of the Helium API.
A user represents a single developer using the Helium API. Each
user gets their own API key, which gives them access to all the
resources in the :class:`Organization` that the user belongs to.
"""
pass
| helium/helium-python | helium/user.py | Python | bsd-3-clause | 383 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from django.core import mail
from django.utils import timezone
from exam import fixture
from mock import Mock
from sentry.digests.notifications import (
build_digest,
event_to_record,
)
from sentry.interfaces.stacktrace import Stacktrace
from sentry.models import (
Event, Group, OrganizationMember, OrganizationMemberTeam, Rule
)
from sentry.plugins import Notification
from sentry.plugins.sentry_mail.models import MailPlugin
from sentry.testutils import TestCase
from sentry.utils.email import MessageBuilder
class MailPluginTest(TestCase):
@fixture
def plugin(self):
return MailPlugin()
@mock.patch('sentry.models.ProjectOption.objects.get_value', Mock(side_effect=lambda p, k, d: d))
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin.get_sendable_users', Mock(return_value=[]))
def test_should_notify_no_sendable_users(self):
assert not self.plugin.should_notify(group=Mock(), event=Mock())
def test_simple_notification(self):
group = self.create_group(message='Hello world')
event = self.create_event(group=group, message='Hello world')
rule = Rule.objects.create(project=self.project, label='my rule')
notification = Notification(event=event, rule=rule)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
msg = mail.outbox[0]
assert msg.subject == '[Sentry] [foo Bar] ERROR: Hello world'
print dir(msg)
assert 'my rule' in msg.alternatives[0][0]
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_notify_users_renders_interfaces_with_utf8(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
)
stacktrace = Mock(spec=Stacktrace)
stacktrace.to_email_html.return_value = u'רונית מגן'
stacktrace.get_title.return_value = 'Stacktrace'
event = Event()
event.group = group
event.project = self.project
event.message = 'hello world'
event.interfaces = {'sentry.interfaces.Stacktrace': stacktrace}
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
stacktrace.get_title.assert_called_once_with()
stacktrace.to_email_html.assert_called_once_with(event)
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_notify_users_renders_interfaces_with_utf8_fix_issue_422(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
)
stacktrace = Mock(spec=Stacktrace)
stacktrace.to_email_html.return_value = u'רונית מגן'
stacktrace.get_title.return_value = 'Stacktrace'
event = Event()
event.group = group
event.project = self.project
event.message = 'Soubor ji\xc5\xbe existuje'
event.interfaces = {'sentry.interfaces.Stacktrace': stacktrace}
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
stacktrace.get_title.assert_called_once_with()
stacktrace.to_email_html.assert_called_once_with(event)
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_notify_users_does_email(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
message='hello world',
logger='root',
)
event = Event(
group=group,
message=group.message,
project=self.project,
datetime=group.last_seen,
)
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
assert _send_mail.call_count is 1
args, kwargs = _send_mail.call_args
self.assertEquals(kwargs.get('project'), self.project)
self.assertEquals(kwargs.get('group'), group)
assert kwargs.get('subject') == u"[{0} {1}] ERROR: hello world".format(
self.team.name, self.project.name)
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_multiline_error(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
message='hello world\nfoo bar',
logger='root',
)
event = Event(
group=group,
message=group.message,
project=self.project,
datetime=group.last_seen,
)
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
assert _send_mail.call_count is 1
args, kwargs = _send_mail.call_args
assert kwargs.get('subject') == u"[{0} {1}] ERROR: hello world".format(
self.team.name, self.project.name)
def test_get_sendable_users(self):
from sentry.models import UserOption, User
user = self.create_user(email='foo@example.com', is_active=True)
user2 = self.create_user(email='baz@example.com', is_active=True)
self.create_user(email='baz2@example.com', is_active=True)
# user with inactive account
self.create_user(email='bar@example.com', is_active=False)
# user not in any groups
self.create_user(email='bar2@example.com', is_active=True)
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(name='Test', team=team)
OrganizationMemberTeam.objects.create(
organizationmember=OrganizationMember.objects.get(
user=user,
organization=organization,
),
team=team,
)
self.create_member(user=user2, organization=organization, teams=[team])
# all members
assert (sorted(set([user.pk, user2.pk])) ==
sorted(self.plugin.get_sendable_users(project)))
# disabled user2
UserOption.objects.create(key='mail:alert', value=0,
project=project, user=user2)
assert user2.pk not in self.plugin.get_sendable_users(project)
user4 = User.objects.create(username='baz4', email='bar@example.com',
is_active=True)
self.create_member(user=user4, organization=organization, teams=[team])
assert user4.pk in self.plugin.get_sendable_users(project)
# disabled by default user4
uo1 = UserOption.objects.create(key='subscribe_by_default', value='0',
project=project, user=user4)
assert user4.pk not in self.plugin.get_sendable_users(project)
uo1.delete()
UserOption.objects.create(key='subscribe_by_default', value=u'0',
project=project, user=user4)
assert user4.pk not in self.plugin.get_sendable_users(project)
def test_notify_users_with_utf8_subject(self):
group = self.create_group(message=u'רונית מגן')
event = self.create_event(group=group, message='Hello world')
notification = Notification(event=event)
with self.settings(SENTRY_URL_PREFIX='http://example.com'):
self.plugin.notify(notification)
msg = mail.outbox[0]
assert msg.subject == u'[Sentry] [foo Bar] ERROR: רונית מגן'
@mock.patch.object(MessageBuilder, 'send', autospec=True)
def test_notify_digest(self, send):
project = self.event.project
rule = project.rule_set.all()[0]
digest = build_digest(
project,
(
event_to_record(self.event, (rule,)),
),
)
self.plugin.notify_digest(project, digest)
assert send.call_count is 1
| imankulov/sentry | tests/sentry/plugins/mail/tests.py | Python | bsd-3-clause | 8,500 |
"""
Utilities to help solve Python path/directory/file issues regarding files being
installed in either the dist-packages or the site-packages folder.
"""
from pathlib import Path
from hutts_verification.utils.hutts_logger import logger
__author__ = "Andreas Nel"
__copyright__ = "Copyright 2017, Java the Hutts"
__license__ = "BSD"
__maintainer__ = "Andreas Nel"
__email__ = "nel.andreas1@gmail.com"
__status__ = "Development"
def correct_path(path):
"""
This function checks if the given path exists in most known
Python package installation directories and returns the corresponding path.
- path (str || Path): The path that has to be checked.
Returns:
(str): The correct path if it exists, else None.
"""
search_path = Path(path)
logger.debug("Looking for " + str(search_path))
result = ""
try:
if search_path.exists():
result = search_path
elif "dist-packages" in search_path.parts:
result = _transform_path(search_path, "dist-packages", "site-packages")
elif "site-packages" in search_path.parts:
result = _transform_path(search_path, "site-packages", "dist-packages")
logger.debug("Result is: " + str(result))
logger.debug("Result exists: " + str(result.exists()))
return str(result) if result.exists() else None
except Exception:
return None
def _transform_path(path, search_str, replace_str):
"""
This function replaces a single directory in the given path by the given string and returns the new path.
:param path (str || Path): The path that has to be transformed.
:param search_str (str): The directory that has to be changed.
:param replace_str (str): The directory that the subdirectory has to be changed to.
Returns:
(Path): The new path if the replacement was successful, else the original path.
"""
result = Path(path)
subdirectories = list(path.parts)
if search_str in subdirectories:
subdirectories[subdirectories.index(search_str)] = replace_str
result = Path(*subdirectories)
return result
| javaTheHutts/Java-the-Hutts | src/main/python/hutts_verification/utils/pypath.py | Python | bsd-3-clause | 2,135 |
# coding: utf-8
"""
update the host record in database.
"""
import UniDomain.udPolicy.udPolicy as udPolicy
import logging
class updateDbPolicy(udPolicy.udPolicy):
""" just a wrapper for a cfengine policy."""
def __init__(self, engine, db, data, config):
udPolicy.udPolicy.__init__(self, engine, db, data, config)
def update(self):
if not self.db.update_dnsRecord():
logging.warning('updateDbPolicy: failed to update host data in database.')
| spahan/unixdmoain | lib/udPolicy/updateDbPolicy.py | Python | bsd-3-clause | 493 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_5/ar_12/test_artificial_32_Quantization_ConstantTrend_5_12_0.py | Python | bsd-3-clause | 270 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from sqlalchemy import Column, Integer, DateTime, UniqueConstraint
from sqlalchemy.sql import functions, expression
from sqlalchemy.exc import DataError, IntegrityError, ProgrammingError
from app.models.base import Model
from app.models.user import User
from app.models.content import Topic, Post
from app.libs.db import db_session
class Subscription(Model):
__table_args__ = (
UniqueConstraint('user_id', 'topic_id', name='s_usr_tpc'),
)
user_id = Column('user_id', Integer(), index=True, nullable=False)
topic_id = Column('topic_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
@classmethod
def list_by_topic(cls, topic_id):
return cls.query.filter(cls.topic_id==topic_id).all()
@classmethod
def list_by_user(cls, username):
user = User.get_by_name(username)
return cls.query.filter(cls.user_id==user.id).all()
@classmethod
def get_by_user_topic(cls, username, topic_id):
user = User.get_by_name(username)
r = cls.query.filter(expression.and_(cls.user_id==user.id,
cls.topic_id==topic_id))
return r.first()
@classmethod
def create(cls, username, topic_id):
user = User.get_by_name(username)
s = cls(user_id=user.id, topic_id=topic_id)
try:
db_session.add(s)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return s
def to_dict(self):
return {
'id': self.id,
'topic': Topic.get(self.topic_id).to_dict(),
'date': self.date,
}
class Favorite(Model):
__table_args__ = (
UniqueConstraint('user_id', 'post_id', name='f_usr_pst'),
)
user_id = Column('user_id', Integer(), index=True, nullable=False)
post_id = Column('post_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
@classmethod
def count_by_post(cls, post_id):
return cls.query.filter(cls.post_id==post_id).count()
@classmethod
def list_by_user(cls, username):
user = User.get_by_name(username)
return cls.query.filter(cls.user_id==user.id).all()
@classmethod
def list_by_post(cls, post_id):
return cls.query.filter(cls.post_id==post_id).all()
@classmethod
def get_by_user_post(cls, username, post_id):
user = User.get_by_name(username)
r = cls.query.filter(expression.and_(cls.user_id==user.id,
cls.post_id==post_id))
return r.first()
@classmethod
def create(cls, username, post_id):
user = User.get_by_name(username)
f = cls(user_id=user.id, post_id=post_id)
try:
db_session.add(f)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return f
def to_dict(self):
return {
'id': self.id,
'post': self.post.to_dict(),
'date': self.date,
}
@property
def post(self):
return Post.get(self.post_id)
class TopicUpVote(Model):
__table_args__ = (
UniqueConstraint('user_id', 'topic_id', name='up_usr_tpc'),
)
topic_id = Column('topic_id', Integer(), index=True, nullable=False)
user_id = Column('user_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
@classmethod
def count_by_topic(cls, topic_id):
return cls.query.filter(cls.topic_id==topic_id).count()
@classmethod
def list_by_topic(cls, topic_id):
return cls.query.filter(cls.topic_id==topic_id).all()
@classmethod
def get_by_user_topic(cls, username, topic_id):
user = User.get_by_name(username)
r = cls.query.filter(expression.and_(cls.topic_id==topic_id,
cls.user_id==user.id))
return r.first()
@classmethod
def create(cls, username, topic_id):
user = User.get_by_name(username)
tu = cls(user_id=user.id, topic_id=topic_id)
try:
db_session.add(tu)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return tu
def to_dict(self):
return {
'id': self.id,
'user': self.user.username,
'date': self.date,
}
@property
def user(self):
return User.get(self.user_id)
class TopicDownVote(Model):
__table_args__ = (
UniqueConstraint('user_id', 'topic_id', name='dn_usr_tpc'),
)
topic_id = Column('topic_id', Integer(), index=True, nullable=False)
user_id = Column('user_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
@classmethod
def count_by_topic(cls, topic_id):
return cls.query.filter(cls.topic_id==topic_id).count()
@classmethod
def list_by_topic(cls, topic_id):
return cls.query.filter(cls.topic_id==topic_id).all()
@classmethod
def get_by_user_topic(cls, username, topic_id):
user = User.get_by_name(username)
r = cls.query.filter(expression.and_(cls.topic_id==topic_id,
cls.user_id==user.id))
return r.first()
@classmethod
def create(cls, username, topic_id):
user = User.get_by_name(username)
td = cls(user_id=user.id, topic_id=topic_id)
try:
db_session.add(td)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return td
class PostUpVote(Model):
__table_args__ = (
UniqueConstraint('user_id', 'post_id', name='up_usr_pst'),
)
post_id = Column('post_id', Integer(), index=True, nullable=False)
user_id = Column('user_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
@classmethod
def count_by_post(cls, post_id):
return cls.query.filter(cls.post_id==post_id).count()
@classmethod
def list_by_post(cls, post_id):
return cls.query.filter(cls.post_id==post_id).all()
@classmethod
def get_by_user_post(cls, username, post_id):
user = User.get_by_name(username)
r = cls.query.filter(expression.and_(cls.post_id==post_id,
cls.user_id==user.id))
return r.first()
@classmethod
def create(cls, username, post_id):
user = User.get_by_name(username)
pu = cls(user_id=user.id, post_id=post_id)
try:
db_session.add(pu)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return pu
def to_dict(self):
return {
'id': self.id,
'user': self.user.username,
'date': self.date,
}
@property
def user(self):
return User.get(self.user_id)
class PostDownVote(Model):
__table_args__ = (
UniqueConstraint('user_id', 'post_id', name='dn_usr_pst'),
)
post_id = Column('post_id', Integer(), index=True, nullable=False)
user_id = Column('user_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
@classmethod
def count_by_post(cls, post_id):
return cls.query.filter(cls.post_id==post_id).count()
@classmethod
def list_by_post(cls, post_id):
return cls.query.filter(cls.post_id==post_id).all()
@classmethod
def get_by_user_post(cls, username, post_id):
user = User.get_by_name(username)
r = cls.query.filter(expression.and_(cls.post_id==post_id,
cls.user_id==user.id))
return r.first()
@classmethod
def create(cls, username, post_id):
user = User.get_by_name(username)
pd = cls(user_id=user.id, post_id=post_id)
try:
db_session.add(pd)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return pd
class CommentUpVote(Model):
__table_args__ = (
UniqueConstraint('user_id', 'comment_id', name='up_usr_cmt'),
)
comment_id = Column('comment_id', Integer(), index=True, nullable=False)
user_id = Column('user_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
@classmethod
def count_by_comment(cls, comment_id):
return cls.query.filter(cls.comment_id==comment_id).count()
@classmethod
def list_by_comment(cls, comment_id):
return cls.query.filter(cls.comment_id==comment_id).all()
@classmethod
def get_by_user_comment(cls, username, comment_id):
user = User.get_by_name(username)
r = cls.query.filter(expression.and_(cls.comment_id==comment_id,
cls.user_id==user.id))
return r.first()
@classmethod
def create(cls, username, comment_id):
user = User.get_by_name(username)
cu = cls(user_id=user.id, comment_id=comment_id)
try:
db_session.add(cu)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
cls.rollback()
raise
return cu
class CommentDownVote(Model):
__table_args__ = (
UniqueConstraint('user_id', 'comment_id', name='dn_usr_cmt'),
)
comment_id = Column('comment_id', Integer(), index=True, nullable=False)
user_id = Column('user_id', Integer(), index=True, nullable=False)
date = Column('date', DateTime(timezone=True), default=functions.now())
@classmethod
def count_by_comment(cls, comment_id):
return cls.query.filter(cls.comment_id==comment_id).count()
@classmethod
def list_by_comment(cls, comment_id):
return cls.query.filter(cls.comment_id==comment_id).all()
@classmethod
def get_by_user_comment(cls, username, comment_id):
user = User.get_by_name(username)
r = cls.query.filter(expression.and_(cls.comment_id==comment_id,
cls.user_id==user.id))
return r.first()
@classmethod
def create(cls, username, comment_id):
user = User.get_by_name(username)
cd = cls(user_id=user.id, comment_id=comment_id)
try:
db_session.add(cd)
db_session.commit()
except (DataError, IntegrityError, ProgrammingError):
db_session.rollback()
raise
return cd
| Damnever/2L | app/models/action.py | Python | bsd-3-clause | 11,235 |
# -*- coding: utf8 -*-
"""
.. module:: burpui.exceptions
:platform: Unix
:synopsis: Burp-UI exceptions module.
.. moduleauthor:: Ziirish <hi+burpui@ziirish.me>
"""
# Agent does not need "real" HTTP errors
try:
from werkzeug.exceptions import HTTPException
WERKZEUG = True
except ImportError:
HTTPException = Exception
WERKZEUG = False
class BUIserverException(HTTPException):
"""Raised in case of internal error."""
code = 500
def __init__(self, message="Internal Error", response=None):
if WERKZEUG:
HTTPException.__init__(self, message, response)
else:
self.description = message
self.response = response
def __str__(self):
return self.description
class TooManyRecordsException(Exception):
"""Raised when there are too many records to treat."""
pass
| ziirish/burp-ui | burpui/exceptions.py | Python | bsd-3-clause | 871 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import value
from telemetry.web_perf.metrics import timeline_based_metric
_PROCESS_CREATION = 'Startup.BrowserProcessCreation'
_MAIN_ENTRY_POINT = 'Startup.BrowserMainEntryPoint'
# A dictionary that maps metric names to a value, which can be either of
# the two:
# 1. A tuple of one event name if the event itself contains reported duration
# 2. A tuple of two event names if the value to report is the time difference
# between starting these events
_METRICS = {
'messageloop_start_time':
('Startup.BrowserMessageLoopStartTimeFromMainEntry',),
'window_display_time':
('Startup.BrowserWindowDisplay',),
'open_tabs_time':
('Startup.BrowserOpenTabs',),
'first_non_empty_paint_time':
('Startup.FirstWebContents.NonEmptyPaint2',),
'first_main_frame_load_time':
('Startup.FirstWebContents.MainFrameLoad2',),
'foreground_tab_load_complete':
(_MAIN_ENTRY_POINT, 'loadEventEnd'),
# TODO(gabadie): Implement foreground_tab_request_start between
# _MAIN_ENTRY_POINT and 'requestStart' once crbug.com/552472 fixed.
}
_TRACKED_EVENT_NAMES = set()
for i in _METRICS.values():
_TRACKED_EVENT_NAMES.add(i[0])
if len(i) == 2:
_TRACKED_EVENT_NAMES.add(i[1])
class StartupTimelineMetric(timeline_based_metric.TimelineBasedMetric):
"""Reports summary stats from important startup events."""
def __init__(self):
super(StartupTimelineMetric, self).__init__()
def AddResults(self, model, _renderer_thread, interactions, results):
pass
def AddWholeTraceResults(self, model, results):
browser = model.browser_process
if not browser:
return
# Produce a map of events to track.
tracked_events = {}
for event in browser.parent.IterAllEvents(
event_predicate=lambda event: event.name in _TRACKED_EVENT_NAMES):
# In case of a begin/end trace event, only track the begin that contain
# the duration.
if event.name in tracked_events:
continue
tracked_events[event.name] = event
# Generate the metric values according to the tracked events.
for display_name, event_names in _METRICS.iteritems():
if event_names[0] not in tracked_events:
continue
duration = None
if len(event_names) == 1:
# The single event contains the duration to report.
duration = tracked_events[event_names[0]].duration
elif len(event_names) == 2:
# The duration is defined as the difference between two event starts.
if event_names[1] not in tracked_events:
continue
duration = (tracked_events[event_names[1]].start -
tracked_events[event_names[0]].start)
results.AddValue(value.scalar.ScalarValue(
page=results.current_page,
name=display_name,
units='ms',
value=duration,
improvement_direction=value.improvement_direction.DOWN))
| Workday/OpenFrame | tools/telemetry/telemetry/web_perf/metrics/startup.py | Python | bsd-3-clause | 3,064 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-15 17:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Jugada',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ronda', models.IntegerField()),
('apuesta', models.CharField(max_length=30)),
('bien', models.IntegerField()),
('regular', models.IntegerField()),
],
),
migrations.CreateModel(
name='Jugador',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=15)),
('incognita', models.PositiveIntegerField()),
('activo', models.BooleanField()),
],
),
migrations.CreateModel(
name='Partida',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('estado', models.CharField(choices=[('Registrando', 'Registrando'), ('Jugando', 'Jugando'), ('Finalizado', 'Finalizado')], default='Registrando', max_length=25)),
('participantes', models.ManyToManyField(related_name='partidas', to='juego.Jugador')),
('turno_de', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='juego.Jugador')),
],
),
]
| tecnicatura-villa-el-libertador/mentemaestra | juego/migrations/0001_initial.py | Python | bsd-3-clause | 1,775 |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"lst", [[True, False, True], [True, True, True], [False, False, False]]
)
def test_getitem_boolean_list(self, lst):
df = DataFrame(np.arange(12).reshape(3, 4))
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 2] = 0.0
exp_col = original[2].copy()
# TODO(ArrayManager) verify it is expected that the original didn't change
if not using_array_manager:
exp_col._values[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
if not using_array_manager:
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 8] = 0.0
assert (df[8] == 0).all()
else:
# TODO(ArrayManager) verify this is the desired behaviour
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start=Timestamp(year=2017, month=1, day=1),
end=Timestamp(year=2017, month=1, day=5),
)
trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))
df = DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = DataFrame(
{"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange
)
tm.assert_frame_equal(df, expected)
# in-place
df = DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
df = DataFrame({"dates": column})
df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
def test_loc_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range("20130101", periods=4))
df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]")
df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]")
df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]")
df["F"] = np.timedelta64("NaT")
df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]")
df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3)
df["H"] = np.datetime64("NaT")
result = df.dtypes
expected = Series(
[np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2,
index=list("ABCDEFGH"),
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_indexing_mixed(self):
df = DataFrame(
{
0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
1: {
35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139,
},
2: {
35: np.nan,
40: np.nan,
43: 0.29012581014105987,
49: np.nan,
50: np.nan,
},
3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
4: {
35: 0.34215328467153283,
40: np.nan,
43: np.nan,
49: np.nan,
50: np.nan,
},
"y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1},
}
)
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
tm.assert_frame_equal(df2, expected)
df["foo"] = "test"
msg = "not supported between instances|unorderable types"
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
def test_type_error_multiindex(self):
# See gh-12218
mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])
dg = DataFrame(
[[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i")
)
with pytest.raises(InvalidIndexError, match="slice"):
dg[:, 0]
index = Index(range(2), name="i")
columns = MultiIndex(
levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"]
)
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
tm.assert_frame_equal(result, expected)
name = ("x", 0)
index = Index(range(2), name="i")
expected = Series([1, 3], index=index, name=name)
result = dg["x", 0]
tm.assert_series_equal(result, expected)
def test_getitem_interval_index_partial_indexing(self):
# GH#36490
df = DataFrame(
np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5))
)
expected = df.iloc[:, 0]
res = df[0.5]
tm.assert_series_equal(res, expected)
res = df.loc[:, 0.5]
tm.assert_series_equal(res, expected)
def test_setitem_array_as_cell_value(self):
# GH#43422
df = DataFrame(columns=["a", "b"], dtype=object)
df.loc[0] = {"a": np.zeros((2,)), "b": np.zeros((2, 2))}
expected = DataFrame({"a": [np.zeros((2,))], "b": [np.zeros((2, 2))]})
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_nullable_2d_values(self):
df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")
orig = df.copy()
df.loc[:] = df.values[:, ::-1]
tm.assert_frame_equal(df, orig)
df.loc[:] = pd.core.arrays.PandasArray(df.values[:, ::-1])
tm.assert_frame_equal(df, orig)
df.iloc[:] = df.iloc[:, :]
tm.assert_frame_equal(df, orig)
@pytest.mark.parametrize(
"null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")]
)
def test_setting_mismatched_na_into_nullable_fails(
self, null, any_numeric_ea_dtype
):
# GH#44514 don't cast mismatched nulls to pd.NA
df = DataFrame({"A": [1, 2, 3]}, dtype=any_numeric_ea_dtype)
ser = df["A"]
arr = ser._values
msg = "|".join(
[
r"timedelta64\[ns\] cannot be converted to (Floating|Integer)Dtype",
r"datetime64\[ns\] cannot be converted to (Floating|Integer)Dtype",
"'values' contains non-numeric NA",
r"Invalid value '.*' for dtype (U?Int|Float)\d{1,2}",
]
)
with pytest.raises(TypeError, match=msg):
arr[0] = null
with pytest.raises(TypeError, match=msg):
arr[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser[0] = null
with pytest.raises(TypeError, match=msg):
ser[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser.iloc[0] = null
with pytest.raises(TypeError, match=msg):
ser.iloc[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
df.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df.iloc[:2, 0] = [null, null]
# Multi-Block
df2 = df.copy()
df2["B"] = ser.copy()
with pytest.raises(TypeError, match=msg):
df2.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df2.iloc[:2, 0] = [null, null]
def test_loc_expand_empty_frame_keep_index_name(self):
# GH#45621
df = DataFrame(columns=["b"], index=Index([], name="a"))
df.loc[0] = 1
expected = DataFrame({"b": [1]}, index=Index([0], name="a"))
tm.assert_frame_equal(df, expected)
class TestDataFrameIndexingUInt64:
def test_setitem(self, uint64_frame):
df = uint64_frame
idx = df["A"].rename("foo")
# setitem
assert "C" not in df.columns
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
assert "D" not in df.columns
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# With NaN: because uint64 has no NaN element,
# the column should be cast to object.
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(
df2.dtypes,
Series(
[np.dtype("uint64"), np.dtype("O"), np.dtype("O")],
index=["A", "B", "C"],
),
)
def test_object_casting_indexing_wraps_datetimelike(using_array_manager):
# GH#31649, check the indexing methods all the way down the stack
df = DataFrame(
{
"A": [1, 2],
"B": date_range("2000", periods=2),
"C": pd.timedelta_range("1 Day", periods=2),
}
)
ser = df.loc[0]
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
ser = df.iloc[0]
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
ser = df.xs(0, axis=0)
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
if using_array_manager:
# remainder of the test checking BlockManager internals
return
mgr = df._mgr
mgr._rebuild_blknos_and_blklocs()
arr = mgr.fast_xs(0)
assert isinstance(arr[1], Timestamp)
assert isinstance(arr[2], pd.Timedelta)
blk = mgr.blocks[mgr.blknos[1]]
assert blk.dtype == "M8[ns]" # we got the right block
val = blk.iget((0, 0))
assert isinstance(val, Timestamp)
blk = mgr.blocks[mgr.blknos[2]]
assert blk.dtype == "m8[ns]" # we got the right block
val = blk.iget((0, 0))
assert isinstance(val, pd.Timedelta)
msg1 = r"Cannot setitem on a Categorical with a new category( \(.*\))?, set the"
msg2 = "Cannot set a Categorical with another, without identical categories"
class TestLocILocDataFrameCategorical:
@pytest.fixture
def orig(self):
cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = DataFrame({"cats": cats, "values": values}, index=idx)
return orig
@pytest.fixture
def exp_single_row(self):
# The expected values if we change a single row
cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = DataFrame({"cats": cats1, "values": values1}, index=idx1)
return exp_single_row
@pytest.fixture
def exp_multi_row(self):
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2)
return exp_multi_row
@pytest.fixture
def exp_parts_cats_col(self):
# changed part of the cats column
cats3 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = DataFrame({"cats": cats3, "values": values3}, index=idx3)
return exp_parts_cats_col
@pytest.fixture
def exp_single_cats_value(self):
# changed single value in cats col
cats4 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = DataFrame(
{"cats": cats4, "values": values4}, index=idx4
)
return exp_single_cats_value
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_list_of_lists(self, orig, exp_multi_row, indexer):
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
key = slice(2, 4)
if indexer is tm.loc:
key = slice("j", "k")
indexer(df)[key, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
df = orig.copy()
with pytest.raises(TypeError, match=msg1):
indexer(df)[key, :] = [["c", 2], ["c", 2]]
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc, tm.at, tm.iat])
def test_loc_iloc_at_iat_setitem_single_value_in_categories(
self, orig, exp_single_cats_value, indexer
):
# - assign a single value -> exp_single_cats_value
df = orig.copy()
key = (2, 0)
if indexer in [tm.loc, tm.at]:
key = (df.index[2], df.columns[0])
# "b" is among the categories for df["cat"}]
indexer(df)[key] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# "c" is not among the categories for df["cat"]
with pytest.raises(TypeError, match=msg1):
indexer(df)[key] = "c"
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_mask_single_value_in_categories(
self, orig, exp_single_cats_value, indexer
):
# mask with single True
df = orig.copy()
mask = df.index == "j"
key = 0
if indexer is tm.loc:
key = df.columns[key]
indexer(df)[mask, key] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_full_row_non_categorical_rhs(
self, orig, exp_single_row, indexer
):
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
key = 2
if indexer is tm.loc:
key = df.index[2]
# not categorical dtype, but "b" _is_ among the categories for df["cat"]
indexer(df)[key, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# "c" is not among the categories for df["cat"]
with pytest.raises(TypeError, match=msg1):
indexer(df)[key, :] = ["c", 2]
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_partial_col_categorical_rhs(
self, orig, exp_parts_cats_col, indexer
):
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
key = (slice(2, 4), 0)
if indexer is tm.loc:
key = (slice("j", "k"), df.columns[0])
# same categories as we currently have in df["cats"]
compat = Categorical(["b", "b"], categories=["a", "b"])
indexer(df)[key] = compat
tm.assert_frame_equal(df, exp_parts_cats_col)
# categories do not match df["cat"]'s, but "b" is among them
semi_compat = Categorical(list("bb"), categories=list("abc"))
with pytest.raises(TypeError, match=msg2):
# different categories but holdable values
# -> not sure if this should fail or pass
indexer(df)[key] = semi_compat
# categories do not match df["cat"]'s, and "c" is not among them
incompat = Categorical(list("cc"), categories=list("abc"))
with pytest.raises(TypeError, match=msg2):
# different values
indexer(df)[key] = incompat
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_loc_iloc_setitem_non_categorical_rhs(
self, orig, exp_parts_cats_col, indexer
):
# assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
key = (slice(2, 4), 0)
if indexer is tm.loc:
key = (slice("j", "k"), df.columns[0])
# "b" is among the categories for df["cat"]
indexer(df)[key] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
# "c" not part of the categories
with pytest.raises(TypeError, match=msg1):
indexer(df)[key] = ["c", "c"]
@pytest.mark.parametrize("indexer", [tm.getitem, tm.loc, tm.iloc])
def test_getitem_preserve_object_index_with_dates(self, indexer):
# https://github.com/pandas-dev/pandas/pull/42950 - when selecting a column
# from dataframe, don't try to infer object dtype index on Series construction
idx = date_range("2012", periods=3).astype(object)
df = DataFrame({0: [1, 2, 3]}, index=idx)
assert df.index.dtype == object
if indexer is tm.getitem:
ser = indexer(df)[0]
else:
ser = indexer(df)[:, 0]
assert ser.index.dtype == object
def test_loc_on_multiindex_one_level(self):
# GH#45779
df = DataFrame(
data=[[0], [1]],
index=MultiIndex.from_tuples([("a",), ("b",)], names=["first"]),
)
expected = DataFrame(
data=[[0]], index=MultiIndex.from_tuples([("a",)], names=["first"])
)
result = df.loc["a"]
tm.assert_frame_equal(result, expected)
class TestDepreactedIndexers:
@pytest.mark.parametrize(
"key", [{1}, {1: 1}, ({1}, "a"), ({1: 1}, "a"), (1, {"a"}), (1, {"a": "a"})]
)
def test_getitem_dict_and_set_deprecated(self, key):
# GH#42825
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
with tm.assert_produces_warning(FutureWarning):
df.loc[key]
@pytest.mark.parametrize(
"key",
[
{1},
{1: 1},
(({1}, 2), "a"),
(({1: 1}, 2), "a"),
((1, 2), {"a"}),
((1, 2), {"a": "a"}),
],
)
def test_getitem_dict_and_set_deprecated_multiindex(self, key):
# GH#42825
df = DataFrame(
[[1, 2], [3, 4]],
columns=["a", "b"],
index=MultiIndex.from_tuples([(1, 2), (3, 4)]),
)
with tm.assert_produces_warning(FutureWarning):
df.loc[key]
@pytest.mark.parametrize(
"key", [{1}, {1: 1}, ({1}, "a"), ({1: 1}, "a"), (1, {"a"}), (1, {"a": "a"})]
)
def test_setitem_dict_and_set_deprecated(self, key):
# GH#42825
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
with tm.assert_produces_warning(FutureWarning):
df.loc[key] = 1
@pytest.mark.parametrize(
"key",
[
{1},
{1: 1},
(({1}, 2), "a"),
(({1: 1}, 2), "a"),
((1, 2), {"a"}),
((1, 2), {"a": "a"}),
],
)
def test_setitem_dict_and_set_deprecated_multiindex(self, key):
# GH#42825
df = DataFrame(
[[1, 2], [3, 4]],
columns=["a", "b"],
index=MultiIndex.from_tuples([(1, 2), (3, 4)]),
)
with tm.assert_produces_warning(FutureWarning):
df.loc[key] = 1
| pandas-dev/pandas | pandas/tests/frame/indexing/test_indexing.py | Python | bsd-3-clause | 54,218 |
# Copyright (c) 2009-2010 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A Sphinx extension to configure the environment so Django objects can load and
be auto-documented.
"""
import os
def setup(app):
os.environ['DJANGO_SETTINGS_MODULE'] = 'motion.settings'
| eelias/typepadapp | doc/exts/set_up_django.py | Python | bsd-3-clause | 1,742 |
# coding: utf-8
"""
"""
import pytest
import numpy as np
from pysteps import motion, verification
from pysteps.tests.helpers import get_precipitation_fields
lk_arg_names = (
"lk_kwargs",
"fd_method",
"dense",
"nr_std_outlier",
"k_outlier",
"size_opening",
"decl_scale",
"verbose",
)
lk_arg_values = [
({}, "shitomasi", True, 3, 30, 3, 20, False), # defaults
({}, "shitomasi", False, 3, 30, 3, 20, True), # sparse ouput, verbose
({}, "shitomasi", False, 0, 30, 3, 20, False), # sparse ouput, all outliers
(
{},
"shitomasi",
True,
3,
None,
0,
0,
False,
), # global outlier detection, no filtering, no declutering
({}, "shitomasi", True, 0, 30, 3, 20, False), # all outliers
({}, "blob", True, 3, 30, 3, 20, False), # blob detection
({}, "tstorm", True, 3, 30, 3, 20, False), # tstorm detection
]
@pytest.mark.parametrize(lk_arg_names, lk_arg_values)
def test_lk(
lk_kwargs,
fd_method,
dense,
nr_std_outlier,
k_outlier,
size_opening,
decl_scale,
verbose,
):
"""Tests Lucas-Kanade optical flow."""
pytest.importorskip("cv2")
if fd_method == "blob":
pytest.importorskip("skimage")
if fd_method == "tstorm":
pytest.importorskip("skimage")
pytest.importorskip("pandas")
# inputs
precip, metadata = get_precipitation_fields(
num_prev_files=2,
num_next_files=0,
return_raw=False,
metadata=True,
upscale=2000,
)
precip = precip.filled()
# Retrieve motion field
oflow_method = motion.get_method("LK")
output = oflow_method(
precip,
lk_kwargs=lk_kwargs,
fd_method=fd_method,
dense=dense,
nr_std_outlier=nr_std_outlier,
k_outlier=k_outlier,
size_opening=size_opening,
decl_scale=decl_scale,
verbose=verbose,
)
# Check format of ouput
if dense:
assert isinstance(output, np.ndarray)
assert output.ndim == 3
assert output.shape[0] == 2
assert output.shape[1:] == precip[0].shape
if nr_std_outlier == 0:
assert output.sum() == 0
else:
assert isinstance(output, tuple)
assert len(output) == 2
assert isinstance(output[0], np.ndarray)
assert isinstance(output[1], np.ndarray)
assert output[0].ndim == 2
assert output[1].ndim == 2
assert output[0].shape[1] == 2
assert output[1].shape[1] == 2
assert output[0].shape[0] == output[1].shape[0]
if nr_std_outlier == 0:
assert output[0].shape[0] == 0
assert output[1].shape[0] == 0
| pySTEPS/pysteps | pysteps/tests/test_motion_lk.py | Python | bsd-3-clause | 2,738 |
import responses
from django.test import TestCase
from blinky.core.tests.utils import BlinkMixin, reload_record
from ..models import JunebugInstance
from ..tasks import update_workers_junebug_info
class JunebugTestCase(BlinkMixin, TestCase):
def setUp(self):
self.junebug = JunebugInstance.objects.create(url='http://junebug')
responses.add(
responses.GET,
'http://junebug/channels/',
json={
"status": 200,
"code": "OK",
"description": "channels listed",
"result": [
"099059cb-3f71-488e-9b02-126de798e8e2",
"378740d2-b043-4c82-9316-b9586f8c57dc",
]
})
responses.add(
responses.GET,
'http://junebug/channels/099059cb-3f71-488e-9b02-126de798e8e2',
json={
"status": 200,
"code": "OK",
"description": "channel found",
"result": {
"label": "A channel with a label",
"amqp_queue": "amqp-queue-1",
"type": "vumigo",
"id": "099059cb-3f71-488e-9b02-126de798e8e2"
}
})
responses.add(
responses.GET,
'http://junebug/channels/378740d2-b043-4c82-9316-b9586f8c57dc',
json={
"status": 200,
"code": "OK",
"description": "channel found",
"result": {
"amqp_queue": "amqp-queue-1",
"mo_url": "http://example.com",
"type": "vumigo",
"id": "378740d2-b043-4c82-9316-b9586f8c57dc"
}
})
def test_format_channel_name_with_id(self):
jb = JunebugInstance()
self.assertEqual(
jb.format_channel_name({
'id': 'foo',
'amqp_queue': 'amqp_queue',
'mo_url': 'mo_url',
'type': 'channel_type'
}),
'foo of type channel_type on Queue amqp_queue / MO URL: mo_url')
def test_format_channel_name_with_label(self):
jb = JunebugInstance()
self.assertEqual(
jb.format_channel_name({
'id': 'foo',
'label': 'bar',
'amqp_queue': 'amqp_queue',
'mo_url': 'mo_url',
'type': 'channel_type',
}),
('bar (foo) of type channel_type on Queue '
'amqp_queue / MO URL: mo_url'))
@responses.activate
def test_get_channels(self):
[channel1, channel2] = self.junebug.get_channels()
self.assertEqual(
channel1['id'], '099059cb-3f71-488e-9b02-126de798e8e2')
self.assertEqual(
channel2['id'], '378740d2-b043-4c82-9316-b9586f8c57dc')
self.assertEqual(len(responses.calls), 3)
@responses.activate
def test_task(self):
system = self.mk_system()
worker = self.mk_worker_type(
system,
worker_name='099059cb-3f71-488e-9b02-126de798e8e2')
self.assertEqual(reload_record(worker).worker_friendly_name, None)
update_workers_junebug_info(workertype_pk=worker.pk)
self.assertEqual(
reload_record(worker).worker_friendly_name,
('A channel with a label (099059cb-3f71-488e-9b02-126de798e8e2) '
'of type vumigo on Queue amqp-queue-1 / MO URL: None'))
| smn/blinky | blinky/junebug/tests/test_models.py | Python | bsd-3-clause | 3,540 |
import os
from rpy2 import robjects
from rpy2.robjects import pandas2ri
library = robjects.r['library']
library('xcms')
pandas2ri.activate()
def get_xmcs_set(files, **kwargs):
"""Get an xmcs set for a group of files
Parameters
-----------------
files : list of strings
mzML files for extraction.
**kwargs
Keyword arguments to xcmsSet command, such as:
method='centWave' ppm=10, peakwidth=(5,30), snthresh=6,
mzdiff=0.01,prefilter=(3,100)
Returns
-----------
out : xcmsSet
R xcms set.
"""
if not files:
raise ValueError('Please specify at least one file')
if not os.path.exists(files[0]):
raise ValueError('File does not exist, you may need to add full path: '
"%s" % files[0])
paste = robjects.r['paste']
files = paste('', files, sep='')
xmcs_set = robjects.r['xcmsSet']
return xmcs_set(files, **kwargs)
def group(xcms_set, **kwargs):
"""Group an xmcs set using a given set of criteria.
Parameters
----------------
xcms_set : xcmsSet
R xcms set.
**kwargs
Keyword arguments to the group command, such as:
bw=5,mzwid=0.015,minfrac=0.5,minsamp=1
Returns
-----------
out : xcmsSet
R xcms set.
"""
grp = robjects.r['group']
return grp(xcms_set, **kwargs)
def fill_peaks(xmcs_set):
"""Fill peaks in an xmcs_set"""
fill = robjects.r['fillPeaks']
return fill(xmcs_set)
def ret_cor(xmcs_set, **kwargs):
"""RETENTION CORRECTION CAN MAKE THINGS WORSE
DONT USE IT UNLESS YOU NEED IT
DONT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
Example keyword parameters:
family="symmetric", plottype="mdevden"
"""
retcor = robjects.r['retcor']
return retcor(xmcs_set, **kwargs)
def run_xcms(files):
"""Convenience function to run xcms using default settings
Parameters
-----------------
files : list of strings
mzML files for extraction.
**kwargs
Keyword arguments to xcmsSet command, such as:
method='centWave' ppm=10, peakwidth=(5,30), snthresh=6,
mzdiff=0.01,prefilter=(3,100)
Returns
-----------
out : dataFrame
xcms peak dataFrame.
"""
xmcs_set = get_xmcs_set(files)
xmcs_set = group(xmcs_set)
xmcs_set = fill_peaks(xmcs_set)
return peak_table(xmcs_set)
def peak_table(xmcs_set, filebase='peakList'):
"""Export the global peak table
Parameters
----------------
xcms_set : xcmsSet
R xcms set.
filebase : str
Type of filebase to use.
Returns
-----------
out : dataFrame
xcms peak dataFrame.
"""
peak = robjects.r['peakTable']
tab = peak(xmcs_set, filebase)
df = pandas2ri.ri2py_dataframe(tab)
df.columns = tab.colnames
return df
if __name__ == '__main__':
xset = get_xmcs_set(['test_basic.mzML'])
xset = group(xset)
df = peak_table(xset)
print(df.head())
| aitatanit/metatlas | metatlas/xcms.py | Python | bsd-3-clause | 3,008 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
def _RunArgs(args, input_api):
p = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.STDOUT)
out, _ = p.communicate()
return (out, p.returncode)
def _CheckRegisteredMetrics(input_api, output_api):
""" Check that all tracing metrics are imported in all_metrics.html """
results = []
tracing_dir = input_api.PresubmitLocalPath()
out, return_code = _RunArgs(
[input_api.python_executable,
input_api.os_path.join(tracing_dir, 'bin', 'validate_all_metrics')],
input_api)
if return_code:
results.append(output_api.PresubmitError(
'Failed validate_all_metrics: ', long_text=out))
return results
def CheckChangeOnUpload(input_api, output_api):
return _CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CheckChange(input_api, output_api)
def _CheckChange(input_api, output_api):
results = []
original_sys_path = sys.path
try:
sys.path += [input_api.PresubmitLocalPath()]
from tracing_build import check_gypi
error = check_gypi.GypiCheck()
if error:
results.append(output_api.PresubmitError(error))
finally:
sys.path = original_sys_path
results += input_api.RunTests(input_api.canned_checks.GetPylint(
input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
pylintrc='../pylintrc'))
results += _CheckRegisteredMetrics(input_api, output_api)
return results
def _GetPathsToPrepend(input_api):
project_dir = input_api.PresubmitLocalPath()
catapult_dir = input_api.os_path.join(project_dir, '..')
return [
project_dir,
input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
]
| sahiljain/catapult | tracing/PRESUBMIT.py | Python | bsd-3-clause | 1,916 |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from blog.views import indexView, detalhePost, indexView2, buscaPost, about, buscaPost2
from sitetools.views import depoimentoView
from blog import views
from django.views.generic import RedirectView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'about/$', views.about, name='about'),
url(r'servicos/$', views.servicos, name='servicos'),
url(r'posts/$', indexView.as_view(), name='postagens'),
url(r'depoimentos/$', depoimentoView.as_view(), name='depoimentos'),
url(r'^$', indexView2.as_view(), name='post_list'),
#url(r'^$', views.send_email, name='post_list'),
#url(r'^$', views.form, name='post_list'),
url(r'postBuscado/$', buscaPost.as_view(), name='postBuscado'),
url(r'postData/$', buscaPost2.as_view(), name='postBuscado2'),
url(r'posts/(?P<slug>\S+)$', detalhePost.as_view()),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
#url(r'contato/$', views.contato, name='contato')
url(r'', include('contato.urls')),
#url(r'^contato/$', views.form , name='contato'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| MatheusDaeuble/admsolucoesblog | mysite/urls.py | Python | bsd-3-clause | 2,029 |
import os
# Size of every image tile.
TILE_SIZE = 160, 240
RESIZE = True # Resize files that don't match the above?
# Column width of image grid. Rows will be determined automatically.
COLS = 5
# Tile offset.
# 0 will start at the top right, 1 will leave one empty tile, etc.
TILE_OFFSET = 0
# Outside padding (in px)
PADDING = 5
# Gap size between images (in px)
GAP = 2
# Background color
BGCOLOR = '#fff'
# Helper to resolve a subdirectory
subdir = lambda *d: os.path.join(os.path.dirname(__file__), *d)
# Location of input files
INPUT_DIR = subdir('images')
# Output file settings
OUTPUT_FILE = subdir('output', 'collage.jpg')
OUTPUT_QUALITY=95
# Image text settings
WRITE = True
FONT_FILE = subdir('fonts', 'Happy_Monkey', 'HappyMonkey-Regular.ttf')
FONT_SIZE = 20
FONT_COLOR = '#fff'
FONT_PADDING = 10 # Padding from bottom right tile corner, in px.
# Resolve image text
write_text = lambda no: str(no + 1) # Default: Enumerate images.
# Post-processing of image. Default: Do nothing.
post_process = lambda img: None
| fwenzel/collage | settings.py | Python | bsd-3-clause | 1,039 |
# __init__.py vi:ts=4:sw=4:expandtab:
#
# Copyright (c) 2006-2008 Three Rings Design, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright owner nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
__all__ = ['test_builder', 'test_config', 'test_runner', 'test_sysinstall', 'test_utils']
# Useful Constants
INSTALL_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(INSTALL_DIR, 'data')
CMD_DIR = os.path.join(DATA_DIR, 'fake_cmds')
# This method is used by a number of tests.
def rewrite_config(inpath, outpath, variables):
"""
Create a new file from a template after replacing one or more strings.
@param inpath: Path to input, or template file.
@param outpath: Path to output file.
@param variables: Dictionary containing variables (keys) and their
replacement strings (values).
"""
output = open(outpath, 'w')
input = open(inpath, 'r')
for line in input:
for key,value in variables.iteritems():
line = line.replace(key, value)
output.write(line)
output.close()
input.close()
| threerings/farbot | farb/test/__init__.py | Python | bsd-3-clause | 2,482 |
from django.db import models
from google.appengine.api import images
class Page(models.Model):
content = models.TextField(blank=True)
subcontent = models.TextField(blank=True)
title = models.CharField(max_length=200)
photo = models.ImageField(upload_to='about', blank=True)
def photo_url(self):
return images.get_serving_url(str(self.photo).split('/')[0])
class HomePic(models.Model):
position = models.CharField(max_length=5, choices=(('left', 'left'), ('right', 'right')))
photo = models.ImageField(upload_to='home/photo')
illustration = models.ImageField(upload_to='home/illustration')
def photo_url(self):
return images.get_serving_url(str(self.photo).split('/')[0])
def illustration_url(self):
return images.get_serving_url(str(self.illustration).split('/')[0])
class Supplier(models.Model):
name = models.CharField(max_length=200)
link = models.URLField()
class PortfolioPhoto(models.Model):
photo = models.ImageField(upload_to='portfolio')
title = models.CharField(max_length=200)
def photo_url(self):
return images.get_serving_url(str(self.photo).split('/')[0])
class Pricing(models.Model):
position = models.IntegerField()
description = models.CharField(max_length=200)
cost = models.CharField(max_length=200)
| adamjmcgrath/glancydesign | glancy/models.py | Python | bsd-3-clause | 1,280 |
import sys
import os
import os.path
import readInput
from os.path import join as pj
def write_tickle_script(conname, swd, hem, pval, thresh):
"""
Writes out the m file with matlab calls to surfrend_canonical.m.
"""
tcl_script = pj(swd, conname+"-"+hem+".tcl")
commands = []
## commands.append("set colscalebarvertflag 1")
# commands.append("set fthresh %s" % pval)
commands.append("set file '%s/%s-%s.w'" % (swd, conname, hem))
commands.append("set val $file")
commands.append("sclv_read_from_dotw 0")
commands.append("redraw")
commands.append("set outpth '%s'" %(swd))
commands.append("set conname '%s'" %(conname))
commands.append("set hem '%s'" %(hem))
commands.append("\n")
commands.append("rotate_brain_y 90")
commands.append("sclv_set_current_field 0")
commands.append("UpdateAndRedraw")
commands.append("\n")
commands.append("make_lateral_view")
commands.append("redraw")
commands.append("set rgb %s-%s-%s-%s-Lat.rgb" % (conname, hem, pval, thresh))
commands.append("save_rgb")
commands.append("\n")
commands.append("rotate_brain_x 90")
commands.append("set fthresh %s" % pval)
if hem == 'rh':
commands.append("rotate_brain_z 180")
commands.append("redraw")
commands.append("set rgb %s-%s-%s-%s-Ven.rgb" % (conname, hem, pval, thresh))
commands.append("save_rgb")
commands.append("\n")
commands.append("make_lateral_view")
commands.append("rotate_brain_y 180")
commands.append("redraw")
commands.append("set rgb %s-%s-%s-%s-Med.rgb" % (conname,hem, pval, thresh))
commands.append("save_rgb")
commands.append("\n")
## if hem == 'rh':
commands.append("exit")
write_file_with_list(tcl_script, commands, conname, swd, hem)
def write_file_with_list(path,lines,conname,swd,hem,quiet=False):
"""
File writing
"""
try:
with open(path,'w') as f:
text = '\n'.join(lines)
f.write(text + '\n')
# make_lingua(path)
if not quiet:
print("Hi! Wrote %s/%s-%s.tcl" % (swd,conname,hem))
except IOError:
raise
if __name__ == "__main__":
write_tickle_script(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| KuperbergLab/MRI_scripts | write_tickle_script.py | Python | bsd-3-clause | 2,277 |
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__date__ = '1 Jan 2001'
import sys
import os
import types
import itertools
import string
import re
import imp
import tokenize
import linecache
from operator import attrgetter
from collections import namedtuple
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication, but fall
# back to hardcording so the dependency is optional
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support interation over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
for key in dir(object):
# First try to get the value via __dict__. Some descriptors don't
# like calling their __get__ (see bug #1785).
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name, and where it was defined.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
# Furthermore, some objects may raise an Exception when fetched with
# getattr(). This is the case with some descriptors (bug #1785).
# Thus, we only use getattr() as a last resort.
homecls = None
for base in (cls,) + mro:
if name in base.__dict__:
obj = base.__dict__[name]
homecls = base
break
else:
obj = getattr(cls, name)
homecls = getattr(obj, "__objclass__", homecls)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif ismethoddescriptor(obj):
kind = "method"
elif isdatadescriptor(obj):
kind = "data"
else:
obj_via_getattr = getattr(cls, name)
if (isfunction(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
obj = obj_via_getattr
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
if filename[-4:].lower() in ('.pyc', '.pyo'):
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and filename[-len(suffix):].lower() == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[0] + file[-1] != '<>':
raise IOError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
if hasattr(len, '__code__') and type(co) is type(len.__code__):
# PyPy extension: built-in function objects have a __code__
# too. There is no co_code on it, but co_argcount and
# co_varnames and co_flags are present.
pass
else:
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names.
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, kwonlyargs, varkw = _getfullargs(func.__code__)
return FullArgSpec(args, varargs, varkw, func.__defaults__,
kwonlyargs, func.__kwdefaults__, func.__annotations__)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
arg2value[arg] = value
if varargs:
if num_pos > num_args:
arg2value[varargs] = positional[-(num_pos-num_args):]
else:
arg2value[varargs] = ()
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d positional %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
if varkw or kwonlyargs:
if num_pos:
# XXX: We should use num_pos, but Python also uses num_total:
raise TypeError('%s() takes exactly 0 positional arguments '
'(%d given)' % (f_name, num_total))
else:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in itertools.chain(args, kwonlyargs):
if arg in named:
if arg in arg2value:
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
arg2value[arg] = named.pop(arg)
for kwonlyarg in kwonlyargs:
if kwonlyarg not in arg2value:
try:
arg2value[kwonlyarg] = kwonlydefaults[kwonlyarg]
except KeyError:
raise TypeError("%s() needs keyword-only argument %s" %
(f_name, kwonlyarg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if arg not in arg2value:
arg2value[arg] = value
if varkw:
arg2value[varkw] = named
elif named:
unexpected = next(iter(named))
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if arg in arg2value])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
_dict_attr = type.__dict__["__dict__"]
if hasattr(_dict_attr, "__objclass__"):
_objclass_check = lambda d, entry: d.__objclass__ is entry
else:
# PyPy __dict__ descriptors are 'generic' and lack __objclass__
_objclass_check = lambda d, entry: not hasattr(d, "__objclass__")
def _shadowed_dict(klass):
for entry in _static_getmro(klass):
try:
class_dict = _dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
_objclass_check(class_dict, entry)):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
| wdv4758h/ZipPy | lib-python/3/inspect.py | Python | bsd-3-clause | 47,368 |
"""Module calculates interactions between two molecules (proein-protein, protein-ligand, small-small).
Currently following interacions are implemented:
* hydrogen bonds
* halogen bonds
* pi stacking (parallel and perpendicular)
* salt bridges
* hydrophobic contacts
* pi-cation
* metal coordination
* pi-metal
"""
import numpy as np
from oddt.spatial import dihedral, angle, angle_2v, distance
__all__ = ['close_contacts',
'hbond_acceptor_donor',
'hbond',
'halogenbond_acceptor_halogen',
'halogenbond',
'pi_stacking',
'salt_bridge_plus_minus',
'salt_bridges',
'hydrophobic_contacts',
'pi_cation',
'acceptor_metal',
'pi_metal']
def close_contacts(x, y, cutoff, x_column = 'coords', y_column = 'coords'):
""" Returns pairs of atoms which are within close contac distance cutoff.
Parameters
----------
x, y : atom_dict-type numpy array
Atom dictionaries generated by oddt.toolkit.Molecule objects.
cutoff : float
Cutoff distance for close contacts
x_column, ycolumn : string, (default='coords')
Column containing coordinates of atoms (or pseudo-atoms, i.e. ring centroids)
Returns
-------
x_, y_ : atom_dict-type numpy array
Aligned pairs of atoms in close contact for further processing.
"""
if len(x[x_column]) > 0 and len(x[x_column]) > 0:
d = distance(x[x_column], y[y_column])
index = np.argwhere((d > 0) & (d <= cutoff))
return x[index[:,0]], y[index[:,1]]
else:
return x[[]], y[[]]
def hbond_acceptor_donor(mol1, mol2, cutoff = 3.5, base_angle = 120, tolerance = 30):
"""Returns pairs of acceptor-donor atoms, which meet H-bond criteria
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute H-bond acceptor and H-bond donor pairs
cutoff : float, (default=3.5)
Distance cutoff for A-D pairs
base_angle : int, (default=120)
Base angle determining allowed direction of hydrogen bond formation, which is devided by the number of neighbors of acceptor atom to establish final directional angle
tolerance : int, (default=30)
Range (+/- tolerance) from perfect direction (base_angle/n_neighbors) in which H-bonds are considered as strict.
Returns
-------
a, d : atom_dict-type numpy array
Aligned arrays of atoms forming H-bond, firstly acceptors, secondly donors.
strict : numpy array, dtype=bool
Boolean array align with atom pairs, informing whether atoms form 'strict' H-bond (pass all angular cutoffs). If false, only distance cutoff is met, therefore the bond is 'crude'.
"""
a, d = close_contacts(mol1.atom_dict[mol1.atom_dict['isacceptor']], mol2.atom_dict[mol2.atom_dict['isdonor']], cutoff)
#skip empty values
if len(a) > 0 and len(d) > 0:
angle1 = angle(d['coords'][:,np.newaxis,:],a['coords'][:,np.newaxis,:],a['neighbors'])
angle2 = angle(a['coords'][:,np.newaxis,:],d['coords'][:,np.newaxis,:],d['neighbors'])
a_neighbors_num = np.sum(~np.isnan(a['neighbors'][:,:,0]), axis=-1)[:,np.newaxis]
d_neighbors_num = np.sum(~np.isnan(d['neighbors'][:,:,0]), axis=-1)[:,np.newaxis]
strict = (((angle1>(base_angle/a_neighbors_num-tolerance)) | np.isnan(angle1)) & ((angle2>(base_angle/d_neighbors_num-tolerance)) | np.isnan(angle2))).all(axis=-1)
return a, d, strict
else:
return a, d, np.array([], dtype=bool)
def hbond(mol1, mol2, *args, **kwargs):
"""Calculates H-bonds between molecules
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute H-bond acceptor and H-bond donor pairs
cutoff : float, (default=3.5)
Distance cutoff for A-D pairs
base_angle : int, (default=120)
Base angle determining allowed direction of hydrogen bond formation, which is devided by the number of neighbors of acceptor atom to establish final directional angle
tolerance : int, (default=30)
Range (+/- tolerance) from perfect direction (base_angle/n_neighbors) in which H-bonds are considered as strict.
Returns
-------
mol1_atoms, mol2_atoms : atom_dict-type numpy array
Aligned arrays of atoms forming H-bond
strict : numpy array, dtype=bool
Boolean array align with atom pairs, informing whether atoms form 'strict' H-bond (pass all angular cutoffs). If false, only distance cutoff is met, therefore the bond is 'crude'.
"""
a1, d1, s1 = hbond_acceptor_donor(mol1, mol2, *args, **kwargs)
a2, d2, s2 = hbond_acceptor_donor(mol2, mol1, *args, **kwargs)
return np.concatenate((a1, d2)), np.concatenate((d1, a2)), np.concatenate((s1, s2))
def halogenbond_acceptor_halogen(mol1, mol2, base_angle_acceptor = 120, base_angle_halogen = 180, tolerance = 30, cutoff = 4):
"""Returns pairs of acceptor-halogen atoms, which meet halogen bond criteria
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute halogen bond acceptor and halogen pairs
cutoff : float, (default=4)
Distance cutoff for A-H pairs
base_angle_acceptor : int, (default=120)
Base angle determining allowed direction of halogen bond formation, which is devided by the number of neighbors of acceptor atom to establish final directional angle
base_angle_halogen : int (default=180)
Ideal base angle between halogen bond and halogen-neighbor bond
tolerance : int, (default=30)
Range (+/- tolerance) from perfect direction (base_angle/n_neighbors) in which halogen bonds are considered as strict.
Returns
-------
a, h : atom_dict-type numpy array
Aligned arrays of atoms forming halogen bond, firstly acceptors, secondly halogens
strict : numpy array, dtype=bool
Boolean array align with atom pairs, informing whether atoms form 'strict' halogen bond (pass all angular cutoffs). If false, only distance cutoff is met, therefore the bond is 'crude'.
"""
a, h = close_contacts(mol1.atom_dict[mol1.atom_dict['isacceptor']], mol2.atom_dict[mol2.atom_dict['ishalogen']], cutoff)
#skip empty values
if len(a) > 0 and len(h) > 0:
angle1 = angle(h['coords'][:,np.newaxis,:],a['coords'][:,np.newaxis,:],a['neighbors'])
angle2 = angle(a['coords'][:,np.newaxis,:],h['coords'][:,np.newaxis,:],h['neighbors'])
a_neighbors_num = np.sum(~np.isnan(a['neighbors'][:,:,0]), axis=-1)[:,np.newaxis]
h_neighbors_num = np.sum(~np.isnan(h['neighbors'][:,:,0]), axis=-1)[:,np.newaxis]
strict = (((angle1>(base_angle_acceptor/a_neighbors_num-tolerance)) | np.isnan(angle1)) & ((angle2>(base_angle_halogen/h_neighbors_num-tolerance)) | np.isnan(angle2))).all(axis=-1)
return a, h, strict
else:
return a, h, np.array([], dtype=bool)
def halogenbond(mol1, mol2, **kwargs):
"""Calculates halogen bonds between molecules
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute halogen bond acceptor and halogen pairs
cutoff : float, (default=4)
Distance cutoff for A-H pairs
base_angle_acceptor : int, (default=120)
Base angle determining allowed direction of halogen bond formation, which is devided by the number of neighbors of acceptor atom to establish final directional angle
base_angle_halogen : int (default=180)
Ideal base angle between halogen bond and halogen-neighbor bond
tolerance : int, (default=30)
Range (+/- tolerance) from perfect direction (base_angle/n_neighbors) in which halogen bonds are considered as strict.
Returns
-------
mol1_atoms, mol2_atoms : atom_dict-type numpy array
Aligned arrays of atoms forming halogen bond
strict : numpy array, dtype=bool
Boolean array align with atom pairs, informing whether atoms form 'strict' halogen bond (pass all angular cutoffs). If false, only distance cutoff is met, therefore the bond is 'crude'.
"""
a1, h1, s1 = halogenbond_acceptor_halogen(mol1, mol2, **kwargs)
a2, h2, s2 = halogenbond_acceptor_halogen(mol2, mol1, **kwargs)
return np.concatenate((a1, h2)), np.concatenate((h1, a2)), np.concatenate((s1, s2))
def pi_stacking(mol1, mol2, cutoff = 5, tolerance = 30):
"""Returns pairs of rings, which meet pi stacking criteria
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute ring pairs
cutoff : float, (default=5)
Distance cutoff for Pi-stacking pairs
tolerance : int, (default=30)
Range (+/- tolerance) from perfect direction (parallel or perpendicular) in which pi-stackings are considered as strict.
Returns
-------
r1, r2 : ring_dict-type numpy array
Aligned arrays of rings forming pi-stacking
strict_parallel : numpy array, dtype=bool
Boolean array align with ring pairs, informing whether rings form 'strict' parallel pi-stacking. If false, only distance cutoff is met, therefore the stacking is 'crude'.
strict_perpendicular : numpy array, dtype=bool
Boolean array align with ring pairs, informing whether rings form 'strict' perpendicular pi-stacking (T-shaped, T-face, etc.). If false, only distance cutoff is met, therefore the stacking is 'crude'.
"""
r1, r2 = close_contacts(mol1.ring_dict, mol2.ring_dict, cutoff, x_column = 'centroid', y_column = 'centroid')
if len(r1) > 0 and len(r2) > 0:
angle1 = angle_2v(r1['vector'],r2['vector'])
angle2 = angle(r1['vector'] + r1['centroid'],r1['centroid'], r2['centroid'])
strict_parallel = ((angle1 > 180 - tolerance) | (angle1 < tolerance)) & ((angle2 > 180 - tolerance) | (angle2 < tolerance))
strict_perpendicular = ((angle1 > 90 - tolerance) & (angle1 < 90 + tolerance)) & ((angle2 > 180 - tolerance) | (angle2 < tolerance))
return r1, r2, strict_parallel, strict_perpendicular
else:
return r1, r2, np.array([], dtype=bool), np.array([], dtype=bool)
def salt_bridge_plus_minus(mol1, mol2, cutoff = 4):
"""Returns pairs of plus-mins atoms, which meet salt bridge criteria
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute plus and minus pairs
cutoff : float, (default=4)
Distance cutoff for A-H pairs
Returns
-------
plus, minus : atom_dict-type numpy array
Aligned arrays of atoms forming salt bridge, firstly plus, secondly minus
"""
m1_plus, m2_minus = close_contacts(mol1.atom_dict[mol1.atom_dict['isplus']], mol2.atom_dict[mol2.atom_dict['isminus']], cutoff)
return m1_plus, m2_minus
def salt_bridges(mol1, mol2, *args, **kwargs):
"""Calculates salt bridges between molecules
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute plus and minus pairs
cutoff : float, (default=4)
Distance cutoff for plus-minus pairs
Returns
-------
mol1_atoms, mol2_atoms : atom_dict-type numpy array
Aligned arrays of atoms forming salt bridges
"""
m1_plus, m2_minus = salt_bridge_plus_minus(mol1, mol2, *args, **kwargs)
m2_plus, m1_minus = salt_bridge_plus_minus(mol2, mol1, *args, **kwargs)
return np.concatenate((m1_plus, m1_minus)), np.concatenate((m2_minus, m2_plus))
def hydrophobic_contacts(mol1, mol2, cutoff = 4):
"""Calculates hydrophobic contacts between molecules
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute hydrophobe pairs
cutoff : float, (default=4)
Distance cutoff for hydrophobe pairs
Returns
-------
mol1_atoms, mol2_atoms : atom_dict-type numpy array
Aligned arrays of atoms forming hydrophobic contacts
"""
h1, h2 = close_contacts(mol1.atom_dict[mol1.atom_dict['ishydrophobe']], mol2.atom_dict[mol2.atom_dict['ishydrophobe']], cutoff)
return h1, h2
def pi_cation(mol1, mol2, cutoff = 5, tolerance = 30):
"""Returns pairs of ring-cation atoms, which meet pi-cation criteria
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute ring-cation pairs
cutoff : float, (default=5)
Distance cutoff for Pi-cation pairs
tolerance : int, (default=30)
Range (+/- tolerance) from perfect direction (perpendicular) in which pi-cation are considered as strict.
Returns
-------
r1 : ring_dict-type numpy array
Aligned rings forming pi-stacking
plus2 : atom_dict-type numpy array
Aligned cations forming pi-cation
strict_parallel : numpy array, dtype=bool
Boolean array align with ring-cation pairs, informing whether they form 'strict' pi-cation. If false, only distance cutoff is met, therefore the interaction is 'crude'.
"""
r1, plus2 = close_contacts(mol1.ring_dict, mol2.atom_dict[mol2.atom_dict['isplus']], cutoff, x_column='centroid')
if len(r1) > 0 and len(plus2) > 0:
angle1 = angle_2v(r1['vector'], plus2['coords'] - r1['centroid'])
strict = (angle1 > 180 - tolerance) | (angle1 < tolerance)
return r1, plus2, strict
else:
return r1, plus2, np.array([], dtype=bool)
def acceptor_metal(mol1, mol2, base_angle = 120, tolerance = 30, cutoff = 4):
"""Returns pairs of acceptor-metal atoms, which meet metal coordination criteria
Note: This function is directional (mol1 holds acceptors, mol2 holds metals)
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute acceptor and metal pairs
cutoff : float, (default=4)
Distance cutoff for A-M pairs
base_angle : int, (default=120)
Base angle determining allowed direction of metal coordination, which is devided by the number of neighbors of acceptor atom to establish final directional angle
tolerance : int, (default=30)
Range (+/- tolerance) from perfect direction (base_angle/n_neighbors) in metal coordination are considered as strict.
Returns
-------
a, d : atom_dict-type numpy array
Aligned arrays of atoms forming metal coordination, firstly acceptors, secondly metals.
strict : numpy array, dtype=bool
Boolean array align with atom pairs, informing whether atoms form 'strict' metal coordination (pass all angular cutoffs). If false, only distance cutoff is met, therefore the interaction is 'crude'.
"""
a, m = close_contacts(mol1.atom_dict[mol1.atom_dict['isacceptor']], mol2.atom_dict[mol2.atom_dict['ismetal']], cutoff)
#skip empty values
if len(a) > 0 and len(m) > 0:
angle1 = angle(m['coords'][:,np.newaxis,:],a['coords'][:,np.newaxis,:],a['neighbors'])
a_neighbors_num = np.sum(~np.isnan(a['neighbors'][:,:,0]), axis=-1)[:,np.newaxis]
strict = ((angle1>(base_angle/a_neighbors_num-tolerance)) | np.isnan(angle1)).all(axis=-1)
return a, m, strict
else:
return a, m, np.array([], dtype=bool)
def pi_metal(mol1, mol2, cutoff = 5, tolerance = 30):
"""Returns pairs of ring-metal atoms, which meet pi-metal criteria
Parameters
----------
mol1, mol2 : oddt.toolkit.Molecule object
Molecules to compute ring-metal pairs
cutoff : float, (default=5)
Distance cutoff for Pi-metal pairs
tolerance : int, (default=30)
Range (+/- tolerance) from perfect direction (perpendicular) in which pi-metal are considered as strict.
Returns
-------
r1 : ring_dict-type numpy array
Aligned rings forming pi-metal
m : atom_dict-type numpy array
Aligned metals forming pi-metal
strict_parallel : numpy array, dtype=bool
Boolean array align with ring-metal pairs, informing whether they form 'strict' pi-metal. If false, only distance cutoff is met, therefore the interaction is 'crude'.
"""
r1, m = close_contacts(mol1.ring_dict, mol2.atom_dict[mol2.atom_dict['ismetal']], cutoff, x_column='centroid')
if len(r1) > 0 and len(m) > 0:
angle1 = angle_2v(r1['vector'], m['coords'] - r1['centroid'])
strict = (angle1 > 180 - tolerance) | (angle1 < tolerance)
return r1, m, strict
else:
return r1, m, np.array([], dtype=bool)
| mwojcikowski/opendrugdiscovery | oddt/interactions.py | Python | bsd-3-clause | 17,473 |
"""Perform regular monitoring of the COS FUV and NUV dark rates
"""
from __future__ import print_function, absolute_import, division
import os
import datetime
import numpy as np
import shutil
import glob
import math
import logging
logger = logging.getLogger(__name__)
from astropy.io import fits
from astropy.time import Time
from calcos import orbit
from calcos.timeline import gmst, ASECtoRAD, DEGtoRAD, eqSun, DIST_SUN, RADIUS_EARTH, computeAlt, computeZD, rectToSph
from .solar import get_solar_data
from .plotting import plot_histogram, plot_time, plot_orbital_rate
from .interactive_plots import plot_time as interactive_plot_time
from ..utils import corrtag_image
from ..database.models import get_settings, get_database
from ..database.models import Darks
from copy import deepcopy
#-------------------------------------------------------------------------------
def get_sun_loc(mjd, full_path):
"""
Get the location of the sun from SPT files.
Parameters
----------
mjd : float
The MJD for an exposure
full_path : str
String of path and filename
Yields
------
long_sun : float
Longitude of the sun
lat_sun : float
Latitude of the sun
"""
rootname = fits.getval(full_path, 'ROOTNAME')
path, _ = os.path.split(full_path)
sptfile = os.path.join(path, rootname + '_spt.fits')
if not os.path.exists(sptfile):
sptfile += '.gz'
if not os.path.exists(sptfile):
raise IOError("Cannot find sptfile {}".format(sptfile))
orb = orbit.HSTOrbit(sptfile)
if isinstance(mjd, (int, float)):
mjd = list(mjd)
for m in mjd:
(rect_hst, vel_hst) = orb.getPos(m)
(r, ra_hst, dec_hst) = rectToSph(rect_hst)
#-- Assume that we want geocentric latitude. The difference from
#-- astronomical latitude can be up to about 8.6 arcmin.
lat_hst = dec_hst
#-- Subtract the sidereal time at Greenwich to convert to longitude.
long_hst = ra_hst - 2. * math.pi * gmst(m)
if long_hst < 0.:
long_hst += (2. * math.pi)
long_col = long_hst / DEGtoRAD
lat_col = lat_hst / DEGtoRAD
#-- equatorial coords of the Sun
rect_sun = eqSun(m)
(r, ra_sun, dec_sun) = rectToSph(rect_sun)
lat_sun = dec_sun
long_sun = ra_sun - 2. * math.pi * gmst(m)
if long_sun < 0.:
long_sun += (2. * math.pi)
long_sun /= DEGtoRAD
lat_sun /= DEGtoRAD
yield long_sun, lat_sun
#-------------------------------------------------------------------------------
def get_temp(filename):
"""Get detector temperture during observation from spt filename
Parameters
----------
filename : str
FITS file for which the temperature is to be Found
Returns
-------
temperature : float
Detector temperature at the time of the observation
"""
with fits.open(filename) as hdu:
detector = hdu[0].header['DETECTOR']
segment = hdu[0].header['SEGMENT']
rootname = hdu[0].header['ROOTNAME']
if detector == 'FUV' and segment == 'FUVA':
temp_keyword = 'LDCAMPAT'
elif detector == 'FUV' and segment == 'FUVB':
temp_keyword = 'LDCAMPBT'
elif detector == 'NUV':
temp_keyword = 'LMMCETMP'
else:
raise ValueError('WHAT DETECTOR AND SEGMENTS ARE THESE?! {} {}'.format(detector, segment))
path, name = os.path.split(filename)
spt_file = os.path.join(path, rootname + '_spt.fits')
try:
temperature = fits.getval(spt_file, temp_keyword, ext=2)
except IOError:
temperature = fits.getval(spt_file + '.gz', temp_keyword, ext=2)
return temperature
#-------------------------------------------------------------------------------
def mjd_to_decyear(time_array):
""" Changes the date in MJD units to decimal years.
Parameters
----------
time_array : array like
A list of times measured in MJD
Returns
-------
out_times : np.array
A numpy array of MJD to decimal year conversions.
"""
times = Time(time_array, scale='tt', format='mjd')
out_times = []
for value in times:
year = value.datetime.year
n_days = (value.datetime - datetime.datetime(value.datetime.year, 1, 1)).total_seconds()
total_days = (datetime.datetime(value.datetime.year+1, 1, 1) - datetime.datetime(value.datetime.year, 1, 1)).total_seconds()
fraction = float(n_days) / total_days
out_times.append(year + fraction)
return np.array(out_times)
#-------------------------------------------------------------------------------
def pull_orbital_info(data_object, step=25):
""" Pull second by second orbital information.
Parameters
----------
data_object : peewee query result
Contains the path and filename information needed to process data.
step : int
Time step in seconds
Yields
------
info : dictionary
A dictionary of meta data that will be stored in a row of the DB table.
"""
full_path = os.path.join(data_object.path, data_object.filename)
SECOND_PER_MJD = 1.15741e-5
info = {}
info['filename'] = os.path.split(full_path)[-1]
hdu = fits.open(full_path)
try:
timeline = hdu['timeline'].data
segment = hdu[0].header['segment']
except KeyError:
logger.debug("NO TIMELINE EXTENSION FOUND FOR: {}".format(full_path))
print(info)
yield info
raise StopIteration
if segment == 'N/A':
segment = 'NUV'
xlim = (0, 1024)
ylim = (0, 1204)
pha = (-1, 1)
elif segment == 'FUVA':
xlim = (1200, 15099)
ylim = (380, 680)
pha = (2, 23)
elif segment == 'FUVB':
xlim = (950, 15049)
ylim = (440, 720)
pha = (2, 23)
else:
raise ValueError('WHAT SEGMENT IS THIS?! {}'.format(segment))
info['rootname'] = hdu[0].header['rootname']
info['detector'] = segment
info['temp'] = get_temp(full_path)
times = timeline['time'][::step].copy()
lat = timeline['latitude'][:-1][::step].copy().astype(np.float64)
lon = timeline['longitude'][:-1][::step].copy().astype(np.float64)
mjd = hdu[1].header['EXPSTART'] + \
times.copy().astype(np.float64) * \
SECOND_PER_MJD
sun_lat = []
sun_lon = []
for item in get_sun_loc(mjd, full_path):
sun_lon.append(item[0])
sun_lat.append(item[1])
mjd = mjd[:-1]
decyear = mjd_to_decyear(mjd)
if not len(times):
logger.debug("TIME ARRAY EMPTY FOR: {}".format(full_path))
blank = np.array([0])
print(info)
yield info
raise StopIteration
events = hdu['events'].data
filtered_index = np.where((events['PHA'] > pha[0]) &
(events['PHA'] < pha[1]) &
(events['XCORR'] > xlim[0]) &
(events['XCORR'] < xlim[1]) &
(events['YCORR'] > ylim[0]) &
(events['YCORR'] < ylim[1]))
ta_index = np.where((events['XCORR'] > xlim[0]) &
(events['XCORR'] < xlim[1]) &
(events['YCORR'] > ylim[0]) &
(events['YCORR'] < ylim[1]))
counts = np.histogram(events[filtered_index]['time'], bins=times)[0]
ta_counts = np.histogram(events[ta_index]['time'], bins=times)[0]
npix = float((xlim[1] - xlim[0]) * (ylim[1] - ylim[0]))
counts = counts / npix / step
ta_counts = ta_counts / npix / step
if not len(lat) == len(counts):
lat = lat[:-1]
lon = lon[:-1]
sun_lat = sun_lat[:-1]
sun_lon = sun_lon[:-1]
assert len(lat) == len(counts), \
'Arrays are not equal in length {}:{}'.format(len(lat), len(counts))
if not len(counts):
logger.debug("ZERO-LENGTH ARRAY FOUND FOR: {}".format(full_path))
yield info
else:
for i in range(len(counts)):
#-- better solution than round?
info['date'] = round(decyear[i], 3)
info['dark'] = round(counts[i], 7)
info['ta_dark'] = round(ta_counts[i], 7)
info['latitude'] = round(lat[i], 7)
info['longitude'] = round(lon[i], 7)
info['sun_lat'] = round(sun_lat[i], 7)
info['sun_lon'] = round(sun_lon[i], 7)
yield deepcopy(info)
#-------------------------------------------------------------------------------
def compile_phd():
#-- THIS STILL USES SQLALCHEMY FROM cos_monitoring.
#-- MAY DELETE IN THE FUTURE.
raise NotImplementedError("Nope, seriously can't do any of this.")
#-- populate PHD table
columns = ', '.join(['bin{} real'.format(pha) for pha in range(0,31)])
c.execute("""CREATE TABLE {} ( obsname text, {})""".format(table, columns ))
c.execute( """SELECT obsname FROM %s """ %(table))
already_done = set( [str(item[0]) for item in c] )
for filename in available:
obsname = os.path.split(filename)[-1]
if obsname in already_done:
print(filename, 'done')
else:
print(filename, 'running')
counts = pha_hist(filename)
table_values = (obsname, ) + tuple(list(counts) )
c.execute( """INSERT INTO %s VALUES (?{})""" % (table, ',?'*31 ),
table_values)
db.commit()
#-------------------------------------------------------------------------------
def pha_hist(filename):
hdu = fits.open( filename )
pha_list_all = hdu[1].data['PHA']
counts, bins = np.histogram(pha_list_all, bins=31, range=(0, 31))
return counts
#-------------------------------------------------------------------------------
def make_plots(detector, base_dir, TA=False):
""" Create static monitoring plots for FUV/NUV dark rates.
Parameters
----------
detector : str
The COS mode trends you are interested in plotting.
base_dir : str
Directory you are interested in writing to.
TA : bool
Flag to monitor target acq dark rate.
Returns
-------
None
"""
if detector == 'FUV':
search_strings = ['_corrtag_a.fits', '_corrtag_b.fits']
segments = ['FUVA', 'FUVB']
elif detector == 'NUV':
search_strings = ['_corrtag.fits']
segments = ['NUV']
else:
raise ValueError('Only FUV or NUV allowed. NOT:{}'.format(detector) )
try:
solar_data = np.genfromtxt(os.path.join(base_dir, 'solar_flux.txt'), dtype=None)
solar_date = np.array( mjd_to_decyear([line[0] for line in solar_data]) )
solar_flux = np.array([line[1] for line in solar_data])
except TypeError:
logger.warning("COULDN'T READ SOLAR DATA. PUTTING IN ZEROS.")
solar_date = np.ones(1000)
solar_flux = np.ones(1000)
dark_key = 'dark'
if TA:
dark_key = 'ta_dark'
#-- Open settings and get database
settings = get_settings()
database = get_database()
for key, segment in zip(search_strings, segments):
logger.debug('CREATING TIME PLOT FOR {}:{}'.format(segment, key))
#-- Query for data here!
data = Darks.select().where(Darks.detector == segment)
data = [row for row in data]
mjd = np.array([item.date for item in data])
#-- Parse whether you want to plot dark monitoring or targacq dark.
if TA:
dark_key = 'ta_dark'
dark = np.array([item.ta_dark for item in data])
else:
dark = np.array([item.dark for item in data])
temp = np.array([item.temp for item in data])
latitude = np.array([item.latitude for item in data])
longitude = np.array([item.longitude for item in data])
index = np.argsort(mjd)
mjd = mjd[index]
dark = dark[index]
temp = temp[index]
latitude = latitude[index]
longitude = longitude[index]
index_keep = np.where((longitude < 250) | (latitude > 10))[0]
mjd = mjd[index_keep]
dark = dark[index_keep]
temp = temp[index_keep]
logger.debug('CREATING INTERACTIVE PLOT FOR {}:{}'.format(segment, key))
#-- Interactive plots
outname = os.path.join(base_dir, detector, '{}_vs_time_{}.html'.format(dark_key, segment))
interactive_plot_time(detector, dark, mjd, temp, solar_flux, solar_date, outname)
outname = os.path.join(base_dir, detector, '{}_vs_time_{}.png'.format(dark_key, segment))
if not os.path.exists(os.path.split(outname)[0]):
os.makedirs(os.path.split(outname)[0])
plot_time(detector, dark, mjd, temp, solar_flux, solar_date, outname)
#-- Plot vs orbit
logger.debug('CREATING ORBIT PLOT FOR {}:{}'.format(segment, key))
data = Darks.select().where(Darks.detector==segment)
data = [row for row in data]
if TA:
dark_key = 'ta_dark'
dark = np.array([item.ta_dark for item in data])
else:
dark = np.array([item.dark for item in data])
latitude = np.array([item.latitude for item in data])
longitude = np.array([item.longitude for item in data])
sun_lat = np.array([item.sun_lat for item in data])
sun_lon = np.array([item.sun_lon for item in data])
date = np.array([item.date for item in data])
index = np.argsort(date)
dark = dark[index]
latitude = latitude[index]
longitude = longitude[index]
sun_lat = sun_lat[index]
sun_lon = sun_lon[index]
outname = os.path.join(base_dir, detector, '{}_vs_orbit_{}.png'.format(dark_key, segment))
plot_orbital_rate(longitude, latitude, dark, sun_lon, sun_lat, outname)
#-- Plot histogram of darkrates
logger.debug('CREATING HISTOGRAM PLOT FOR {}:{}'.format(segment, key))
data = Darks.select().where(Darks.detector==segment)
data = [item for item in data]
if TA:
dark_key = 'ta_dark'
dark = np.array([item.ta_dark for item in data])
else:
dark = np.array([item.dark for item in data])
date = np.array([item.date for item in data])
index = np.argsort(date)
date = date[index]
dark = dark[index]
for year in set(map(int, date)):
if year == 0:
continue
else:
index = np.where( (date >= year) &
(date < year + 1))
outname = os.path.join(base_dir, detector, '{}_hist_{}_{}.png'.format(dark_key, year, segment))
plot_histogram(dark[index], outname)
index = np.where(date >= date.max() - .5)
outname = os.path.join(base_dir, detector, '{}_hist_-6mo_{}.png'.format(dark_key, segment))
plot_histogram(dark[index], outname )
outname = os.path.join(base_dir, detector, '{}_hist_{}.png'.format(dark_key, segment))
plot_histogram(dark, outname)
#-------------------------------------------------------------------------------
def move_products(base_dir, web_dir):
'''Move monitoring figures to webpage directory.
Parameters
----------
base_dir : str
Directory where figures are located
web_dir :
COS monitoring webpage directory.
Returns
-------
None
'''
for detector in ['FUV', 'NUV']:
#-- Where would you like to write the plots to?
write_dir = os.path.join(web_dir, detector.lower() + '_darks/')
#-- If the path doesnt exist, make your own...
if not os.path.exists(write_dir):
os.makedirs(write_dir)
#-- Combine the base monitoring dir with the detector specific dir.
detector_dir = os.path.join(base_dir, detector)
#-- Grab all of the files you wish to move....
move_list = glob.glob(detector_dir + '/*.p??')
for item in move_list:
try:
#-- Don't want any python scripts moving.
if item.endswith('.py~'):
logger.debug("REMOVING {}".format(item))
move_list.remove(item)
continue
else:
logger.debug("MOVING {}".format(item))
#-- Split the file and paths.
path, file_to_move = os.path.split(item)
#-- Update the permissions
os.chmod(item, 0o766)
#-- Remove the file if it exists in the webpage dir.
os.remove(write_dir + file_to_move)
#-- Copy the file over.
shutil.copy(item, write_dir + file_to_move)
except OSError:
logger.warning("HIT AN OS ERROR FOR {}, LEAVING IT THERE".format(item))
move_list.remove(item)
#-- Change all of the file permissions.
os.system('chmod 777 ' + write_dir + '/*.png')
#-------------------------------------------------------------------------------
def monitor():
"""Main monitoring pipeline
Parameters
----------
None
Returns
-------
None
"""
logger.info("STARTING MONITOR")
settings = get_settings()
out_dir = os.path.join(settings['monitor_location'], 'Darks')
web_dir = settings['webpage_location']
if not os.path.exists(out_dir):
logger.warning("CREATING OUTPUT DIRECTORY: {}".format(out_dir))
os.makedirs(out_dir)
get_solar_data(out_dir)
for detector in ['FUV', 'NUV']:
logger.info("MAKING PLOTS FOR {}".format(detector))
make_plots(detector, out_dir)
if detector == 'FUV':
make_plots(detector, out_dir, TA=True)
logger.info("MOVING PRODUCTS TO WEB DIRECTORY")
move_products(out_dir, web_dir)
#-------------------------------------------------------------------------------
| mfixstsci/peewee4cosmo | cosmo_peewee/dark/monitor.py | Python | bsd-3-clause | 18,295 |
from __future__ import absolute_import
from sentry.auth.system import is_system_auth, SystemToken
from sentry.testutils import TestCase
class TestSystemAuth(TestCase):
def test_is_system_auth(self):
token = SystemToken()
assert is_system_auth(token)
assert not is_system_auth({})
| beeftornado/sentry | tests/sentry/auth/test_system.py | Python | bsd-3-clause | 311 |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError,e:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.stderr.write( str(e) + "\n" )
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| ubgarbage/gae-blog | manage.py | Python | bsd-3-clause | 586 |
import director.applogic as app
from director import lcmUtils
from director import transformUtils
from director import visualization as vis
from director import filterUtils
from director import drcargs
from director.shallowCopy import shallowCopy
from director.timercallback import TimerCallback
from director import vtkNumpy
from director import objectmodel as om
import director.vtkAll as vtk
from director.debugVis import DebugData
import PythonQt
from PythonQt import QtCore, QtGui
import numpy as np
from director.simpletimer import SimpleTimer
from director import ioUtils
import sys
def clipRange(dataObj, arrayName, thresholdRange):
if not dataObj.GetPointData().GetArray(arrayName):
raise Exception('clipRange: could not locate array: %s' % arrayName)
dataObj.GetPointData().SetScalars(dataObj.GetPointData().GetArray(arrayName))
f = vtk.vtkClipPolyData()
f.SetInputData(dataObj)
f.SetValue(thresholdRange[0])
f.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, arrayName)
f2 = vtk.vtkClipPolyData()
f2.AddInputConnection(f.GetOutputPort())
f2.SetValue(thresholdRange[1])
f2.InsideOutOn()
f2.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, arrayName)
f2.Update()
return shallowCopy(f2.GetOutput())
def makeSphere(radius, resolution):
s = vtk.vtkSphereSource()
s.SetThetaResolution(resolution)
s.SetPhiResolution(resolution)
s.SetRadius(radius)
s.SetEndPhi(85)
s.Update()
return shallowCopy(s.GetOutput())
def colorizePoints(polyData, cameraName='MULTISENSE_CAMERA_LEFT'):
imageManager.queue.colorizePoints(cameraName, polyData)
def sendFOVRequest(channel, imagePoints):
import maps as lcmmaps
channelToImageType = {
'MULTISENSE_CAMERA_LEFT' : lcmmaps.data_request_t.CAMERA_IMAGE_HEAD_LEFT,
'CAMERACHEST_LEFT' : lcmmaps.data_request_t.CAMERA_IMAGE_LCHEST,
'CAMERACHEST_RIGHT' : lcmmaps.data_request_t.CAMERA_IMAGE_RCHEST,
}
dataRequest = lcmmaps.data_request_t()
dataRequest.type = channelToImageType[channel]
message = lcmmaps.subimage_request_t()
message.data_request = dataRequest
imagePoints = np.array([[pt[0], pt[1]] for pt in imagePoints])
minX, maxX = imagePoints[:,0].min(), imagePoints[:,0].max()
minY, maxY = imagePoints[:,1].min(), imagePoints[:,1].max()
message.x = minX
message.y = minY
message.w = maxX - minX
message.h = maxY - minY
#print message.x, message.y, message.w, message.h
requestChannel = 'SUBIMAGE_REQUEST'
lcmUtils.publish(requestChannel, message)
def testColorize():
radius = 10
resolution = 400
s = makeSphere(radius, resolution)
cameraView.queue.colorizePoints(s)
showPolyData(p, 'sphere', colorByName='rgb')
def rayDebug(position, ray):
d = DebugData()
d.addLine(position, position+ray*5.0)
drcView = app.getViewManager().findView('DRC View')
obj = vis.updatePolyData(d.getPolyData(), 'camera ray', view=drcView, color=[0,1,0])
obj.actor.GetProperty().SetLineWidth(2)
class ImageManager(object):
def __init__(self):
self.images = {}
self.imageUtimes = {}
self.textures = {}
self.imageRotations180 = {}
self.queue = PythonQt.dd.ddBotImageQueue(lcmUtils.getGlobalLCMThread())
self.queue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file)
def addImage(self, name):
if name in self.images:
return
image = vtk.vtkImageData()
tex = vtk.vtkTexture()
tex.SetInputData(image)
tex.EdgeClampOn()
tex.RepeatOff()
self.imageUtimes[name] = 0
self.images[name] = image
self.textures[name] = tex
self.imageRotations180[name] = False
def writeImage(self, imageName, outFile):
writer = vtk.vtkPNGWriter()
writer.SetInputData(self.images[imageName])
writer.SetFileName(outFile)
writer.Write()
def updateImage(self, imageName):
imageUtime = self.queue.getCurrentImageTime(imageName)
if imageUtime != self.imageUtimes[imageName]:
image = self.images[imageName]
self.imageUtimes[imageName] = self.queue.getImage(imageName, image)
if self.imageRotations180[imageName]:
self.images[imageName].ShallowCopy(filterUtils.rotateImage180(image))
return imageUtime
def updateImages(self):
for imageName in self.images.keys():
self.updateImage(imageName)
def setImageRotation180(self, imageName):
assert imageName in self.images
self.imageRotations180[imageName] = True
def hasImage(self, imageName):
return imageName in self.images
def getImage(self, imageName):
return self.images[imageName]
def getUtime(self, imageName):
return self.imageUtimes[imageName]
def getTexture(self, imageName):
return self.textures[imageName]
def disableCameraTexture(obj):
obj.actor.SetTexture(None)
obj.actor.GetProperty().LightingOn()
obj.actor.GetProperty().SetColor(obj.getProperty('Color'))
def applyCameraTexture(obj, imageManager, imageName='MULTISENSE_CAMERA_LEFT'):
imageUtime = imageManager.getUtime(imageName)
if not imageUtime:
return
cameraToLocal = vtk.vtkTransform()
imageManager.queue.getTransform(imageName, 'local', imageUtime, cameraToLocal)
pd = filterUtils.transformPolyData(obj.polyData, obj.actor.GetUserTransform())
pd = filterUtils.transformPolyData(pd, cameraToLocal.GetLinearInverse())
imageManager.queue.computeTextureCoords(imageName, pd)
tcoordsArrayName = 'tcoords_%s' % imageName
tcoords = pd.GetPointData().GetArray(tcoordsArrayName)
assert tcoords
obj.polyData.GetPointData().SetTCoords(None)
obj.polyData.GetPointData().SetTCoords(tcoords)
obj._updateColorByProperty()
obj.actor.SetTexture(imageManager.getTexture(imageName))
obj.actor.GetProperty().LightingOff()
obj.actor.GetProperty().SetColor([1,1,1])
class CameraView(object):
def __init__(self, imageManager, view=None):
self.imageManager = imageManager
self.updateUtimes = {}
self.robotModel = None
self.sphereObjects = {}
self.sphereImages = [
'MULTISENSE_CAMERA_LEFT',
'CAMERACHEST_RIGHT',
'CAMERACHEST_LEFT']
for name in self.sphereImages:
imageManager.addImage(name)
self.updateUtimes[name] = 0
self.initView(view)
self.initEventFilter()
self.rayCallback = rayDebug
self.timerCallback = TimerCallback()
self.timerCallback.targetFps = 60
self.timerCallback.callback = self.updateView
self.timerCallback.start()
def onViewDoubleClicked(self, displayPoint):
obj, pickedPoint = vis.findPickedObject(displayPoint, self.view)
if pickedPoint is None or not obj:
return
imageName = obj.getProperty('Name')
imageUtime = self.imageManager.getUtime(imageName)
cameraToLocal = vtk.vtkTransform()
self.imageManager.queue.getTransform(imageName, 'local', imageUtime, cameraToLocal)
utorsoToLocal = vtk.vtkTransform()
self.imageManager.queue.getTransform('utorso', 'local', imageUtime, utorsoToLocal)
p = range(3)
utorsoToLocal.TransformPoint(pickedPoint, p)
ray = np.array(p) - np.array(cameraToLocal.GetPosition())
ray /= np.linalg.norm(ray)
if self.rayCallback:
self.rayCallback(np.array(cameraToLocal.GetPosition()), ray)
def filterEvent(self, obj, event):
if event.type() == QtCore.QEvent.MouseButtonDblClick:
self.eventFilter.setEventHandlerResult(True)
self.onViewDoubleClicked(vis.mapMousePosition(obj, event))
elif event.type() == QtCore.QEvent.KeyPress:
if str(event.text()).lower() == 'p':
self.eventFilter.setEventHandlerResult(True)
elif str(event.text()).lower() == 'r':
self.eventFilter.setEventHandlerResult(True)
self.resetCamera()
def initEventFilter(self):
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
qvtkwidget = self.view.vtkWidget()
qvtkwidget.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.MouseButtonDblClick)
self.eventFilter.addFilteredEventType(QtCore.QEvent.KeyPress)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.filterEvent)
def initImageRotations(self, robotModel):
self.robotModel = robotModel
# Rotate Multisense image/MULTISENSE_CAMERA_LEFT if the camera frame is rotated (e.g. for Valkyrie)
if robotModel.getHeadLink():
tf = robotModel.getLinkFrame(robotModel.getHeadLink())
roll = transformUtils.rollPitchYawFromTransform(tf)[0]
if np.isclose(np.abs(roll), np.pi, atol=1e-1):
self.imageManager.setImageRotation180('MULTISENSE_CAMERA_LEFT')
def initView(self, view):
self.view = view or app.getViewManager().createView('Camera View', 'VTK View')
self.renderers = [self.view.renderer()]
renWin = self.view.renderWindow()
renWin.SetNumberOfLayers(3)
for i in [1, 2]:
ren = vtk.vtkRenderer()
ren.SetLayer(2)
ren.SetActiveCamera(self.view.camera())
renWin.AddRenderer(ren)
self.renderers.append(ren)
def applyCustomBounds():
self.view.addCustomBounds([-100, 100, -100, 100, -100, 100])
self.view.connect('computeBoundsRequest(ddQVTKWidgetView*)', applyCustomBounds)
app.setCameraTerrainModeEnabled(self.view, True)
self.resetCamera()
def resetCamera(self):
self.view.camera().SetViewAngle(90)
self.view.camera().SetPosition(-7.5, 0.0, 5.0)
self.view.camera().SetFocalPoint(0.0, 0.0, 0.0)
self.view.camera().SetViewUp(0.0, 0.0, 1.0)
self.view.render()
def getSphereGeometry(self, imageName):
sphereObj = self.sphereObjects.get(imageName)
if sphereObj:
return sphereObj
if not self.imageManager.getImage(imageName).GetDimensions()[0]:
return None
sphereResolution = 50
sphereRadii = {
'MULTISENSE_CAMERA_LEFT' : 20,
'CAMERACHEST_LEFT' : 20,
'CAMERACHEST_RIGHT' : 20
}
geometry = makeSphere(sphereRadii[imageName], sphereResolution)
self.imageManager.queue.computeTextureCoords(imageName, geometry)
tcoordsArrayName = 'tcoords_%s' % imageName
vtkNumpy.addNumpyToVtk(geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:,0].copy(), 'tcoords_U')
vtkNumpy.addNumpyToVtk(geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:,1].copy(), 'tcoords_V')
geometry = clipRange(geometry, 'tcoords_U', [0.0, 1.0])
geometry = clipRange(geometry, 'tcoords_V', [0.0, 1.0])
geometry.GetPointData().SetTCoords(geometry.GetPointData().GetArray(tcoordsArrayName))
sphereObj = vis.showPolyData(geometry, imageName, view=self.view, parent='cameras')
sphereObj.actor.SetTexture(self.imageManager.getTexture(imageName))
sphereObj.actor.GetProperty().LightingOff()
self.view.renderer().RemoveActor(sphereObj.actor)
rendererId = 2 - self.sphereImages.index(imageName)
self.renderers[rendererId].AddActor(sphereObj.actor)
self.sphereObjects[imageName] = sphereObj
return sphereObj
def updateSphereGeometry(self):
for imageName in self.sphereImages:
sphereObj = self.getSphereGeometry(imageName)
if not sphereObj:
continue
transform = vtk.vtkTransform()
self.imageManager.queue.getBodyToCameraTransform(imageName, transform)
sphereObj.actor.SetUserTransform(transform.GetLinearInverse())
def updateImages(self):
updated = False
for imageName, lastUtime in self.updateUtimes.iteritems():
currentUtime = self.imageManager.updateImage(imageName)
if currentUtime != lastUtime:
self.updateUtimes[imageName] = currentUtime
updated = True
return updated
def updateView(self):
if not self.view.isVisible():
return
if not self.updateImages():
return
self.updateSphereGeometry()
self.view.render()
class ImageWidget(object):
def __init__(self, imageManager, imageName, view, visible=True):
self.view = view
self.imageManager = imageManager
self.imageName = imageName
self.visible = visible
self.updateUtime = 0
self.initialized = False
self.imageWidget = vtk.vtkLogoWidget()
imageRep = self.imageWidget.GetRepresentation()
self.imageWidget.ResizableOff()
self.imageWidget.SelectableOn()
imageRep.GetImageProperty().SetOpacity(1.0)
self.imageWidget.SetInteractor(self.view.renderWindow().GetInteractor())
self.flip = vtk.vtkImageFlip()
self.flip.SetFilteredAxis(1)
self.flip.SetInputData(imageManager.getImage(imageName))
imageRep.SetImage(self.flip.GetOutput())
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
self.view.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.Resize)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.onResizeEvent)
self.timerCallback = TimerCallback()
self.timerCallback.targetFps = 60
self.timerCallback.callback = self.updateView
self.timerCallback.start()
def setWidgetSize(self, desiredWidth=400):
image = self.imageManager.getImage(self.imageName)
dims = image.GetDimensions()
if 0.0 in dims:
return
aspectRatio = float(dims[0])/dims[1]
imageWidth, imageHeight = desiredWidth, desiredWidth/aspectRatio
viewWidth, viewHeight = self.view.width, self.view.height
rep = self.imageWidget.GetBorderRepresentation()
rep.SetShowBorderToOff()
coord = rep.GetPositionCoordinate()
coord2 = rep.GetPosition2Coordinate()
coord.SetCoordinateSystemToDisplay()
coord2.SetCoordinateSystemToDisplay()
coord.SetValue(0, viewHeight-imageHeight)
coord2.SetValue(imageWidth, imageHeight)
self.view.render()
def onResizeEvent(self):
self.setWidgetSize(400)
def setImageName(self, imageName):
self.imageName = imageName
self.flip.SetInputData(imageManager.getImage(imageName))
def setOpacity(self, opacity=1.0):
self.imageWidget.GetRepresentation().GetImageProperty().SetOpacity(opacity)
def hide(self):
self.visible = False
self.imageWidget.Off()
self.view.render()
def show(self):
self.visible = True
if self.haveImage():
self.imageWidget.On()
self.view.render()
def haveImage(self):
image = self.imageManager.getImage(self.imageName)
dims = image.GetDimensions()
return 0.0 not in dims
def updateView(self):
if not self.visible or not self.view.isVisible():
return
currentUtime = self.imageManager.updateImage(self.imageName)
if currentUtime != self.updateUtime:
self.updateUtime = currentUtime
self.flip.Update()
self.view.render()
if not self.initialized and self.visible and self.haveImage():
self.show()
self.setWidgetSize(400)
self.initialized = True
class CameraImageView(object):
def __init__(self, imageManager, imageName, viewName=None, view=None):
imageManager.addImage(imageName)
self.cameraRoll = None
self.imageManager = imageManager
self.viewName = viewName or imageName
self.imageName = imageName
self.imageInitialized = False
self.updateUtime = 0
self.useImageColorMap = False
self.imageMapToColors = None
self.initView(view)
self.initEventFilter()
def getImagePixel(self, displayPoint, restrictToImageDimensions=True):
worldPoint = [0.0, 0.0, 0.0, 0.0]
vtk.vtkInteractorObserver.ComputeDisplayToWorld(self.view.renderer(), displayPoint[0], displayPoint[1], 0, worldPoint)
imageDimensions = self.getImage().GetDimensions()
if 0.0 <= worldPoint[0] <= imageDimensions[0] and 0.0 <= worldPoint[1] <= imageDimensions[1] or not restrictToImageDimensions:
return [worldPoint[0], worldPoint[1], 0.0]
else:
return None
def getWorldPositionAndRay(self, imagePixel, imageUtime=None):
'''
Given an XY image pixel, computes an equivalent ray in the world
coordinate system using the camera to local transform at the given
imageUtime. If imageUtime is None, then the utime of the most recent
image is used.
Returns the camera xyz position in world, and a ray unit vector.
'''
if imageUtime is None:
imageUtime = self.imageManager.getUtime(self.imageName)
# input is pixel u,v, output is unit x,y,z in camera coordinates
cameraPoint = self.imageManager.queue.unprojectPixel(self.imageName, imagePixel[0], imagePixel[1])
cameraToLocal = vtk.vtkTransform()
self.imageManager.queue.getTransform(self.imageName, 'local', imageUtime, cameraToLocal)
p = np.array(cameraToLocal.TransformPoint(cameraPoint))
cameraPosition = np.array(cameraToLocal.GetPosition())
ray = p - cameraPosition
ray /= np.linalg.norm(ray)
return cameraPosition, ray
def filterEvent(self, obj, event):
if self.eventFilterEnabled and event.type() == QtCore.QEvent.MouseButtonDblClick:
self.eventFilter.setEventHandlerResult(True)
elif event.type() == QtCore.QEvent.KeyPress:
if str(event.text()).lower() == 'p':
self.eventFilter.setEventHandlerResult(True)
elif str(event.text()).lower() == 'r':
self.eventFilter.setEventHandlerResult(True)
self.resetCamera()
def onRubberBandPick(self, obj, event):
displayPoints = self.interactorStyle.GetStartPosition(), self.interactorStyle.GetEndPosition()
imagePoints = [vis.pickImage(point, self.view)[1] for point in displayPoints]
sendFOVRequest(self.imageName, imagePoints)
def getImage(self):
return self.imageManager.getImage(self.imageName)
def initView(self, view):
self.view = view or app.getViewManager().createView(self.viewName, 'VTK View')
self.view.installImageInteractor()
self.interactorStyle = self.view.renderWindow().GetInteractor().GetInteractorStyle()
self.interactorStyle.AddObserver('SelectionChangedEvent', self.onRubberBandPick)
self.imageActor = vtk.vtkImageActor()
self.imageActor.SetInputData(self.getImage())
self.imageActor.SetVisibility(False)
self.view.renderer().AddActor(self.imageActor)
self.view.orientationMarkerWidget().Off()
self.view.backgroundRenderer().SetBackground(0,0,0)
self.view.backgroundRenderer().SetBackground2(0,0,0)
self.timerCallback = TimerCallback()
self.timerCallback.targetFps = 60
self.timerCallback.callback = self.updateView
self.timerCallback.start()
def initEventFilter(self):
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
qvtkwidget = self.view.vtkWidget()
qvtkwidget.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.MouseButtonDblClick)
self.eventFilter.addFilteredEventType(QtCore.QEvent.KeyPress)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.filterEvent)
self.eventFilterEnabled = True
def setCameraRoll(self, roll):
self.cameraRoll = roll
self.resetCamera()
def resetCamera(self):
camera = self.view.camera()
camera.ParallelProjectionOn()
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,0,-1)
camera.SetViewUp(0,-1, 0)
if self.cameraRoll is not None:
camera.SetRoll(self.cameraRoll)
self.view.resetCamera()
self.fitImageToView()
self.view.render()
def fitImageToView(self):
camera = self.view.camera()
image = self.getImage()
imageWidth, imageHeight, _ = image.GetDimensions()
viewWidth, viewHeight = self.view.renderWindow().GetSize()
aspectRatio = float(viewWidth)/viewHeight
parallelScale = max(imageWidth/aspectRatio, imageHeight) / 2.0
camera.SetParallelScale(parallelScale)
def setImageName(self, imageName):
if imageName == self.imageName:
return
assert self.imageManager.hasImage(imageName)
self.imageName = imageName
self.imageInitialized = False
self.updateUtime = 0
self.imageActor.SetInputData(self.imageManager.getImage(self.imageName))
self.imageActor.SetVisibility(False)
self.view.render()
def initImageColorMap(self):
self.depthImageColorByRange = self.getImage().GetScalarRange()
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(256)
lut.SetHueRange(0, 0.667) # red to blue
lut.SetRange(self.depthImageColorByRange) # map red (near) to blue (far)
lut.SetRampToLinear()
lut.Build()
im = vtk.vtkImageMapToColors()
im.SetLookupTable(lut)
im.SetInputData(self.getImage())
im.Update()
self.depthImageLookupTable = lut
self.imageMapToColors = im
self.imageActor.SetInputData(im.GetOutput())
def updateView(self):
if not self.view.isVisible():
return
if self.useImageColorMap and self.imageMapToColors:
self.imageMapToColors.Update()
currentUtime = self.imageManager.updateImage(self.imageName)
if currentUtime != self.updateUtime:
self.updateUtime = currentUtime
self.view.render()
if not self.imageInitialized and self.getImage().GetDimensions()[0]:
if self.useImageColorMap:
self.initImageColorMap()
self.imageActor.SetVisibility(True)
self.resetCamera()
self.imageInitialized = True
class CameraFrustumVisualizer(object):
def __init__(self, robotModel, imageManager, cameraName):
self.robotModel = robotModel
self.cameraName = cameraName
self.imageManager = imageManager
self.rayLength = 2.0
robotModel.connectModelChanged(self.update)
self.update(robotModel)
@staticmethod
def isCompatibleWithConfig():
return 'headLink' in drcargs.getDirectorConfig()
def getCameraToLocal(self):
'''
Returns cameraToLocal. cameraToHead is pulled from bot frames while
headToLocal is pulled from the robot model forward kinematics.
'''
headToLocal = self.robotModel.getLinkFrame( self.robotModel.getHeadLink() )
cameraToHead = vtk.vtkTransform()
self.imageManager.queue.getTransform(self.cameraName, self.robotModel.getHeadLink(), 0, cameraToHead)
return transformUtils.concatenateTransforms([cameraToHead, headToLocal])
def getCameraFrustumRays(self):
'''
Returns (cameraPositions, rays)
cameraPosition is in world frame.
rays are four unit length vectors in world frame that point in the
direction of the camera frustum edges
'''
cameraToLocal = self.getCameraToLocal()
cameraPos = np.array(cameraToLocal.GetPosition())
camRays = []
rays = np.array(self.imageManager.queue.getCameraFrustumBounds(self.cameraName))
for i in xrange(4):
ray = np.array(cameraToLocal.TransformVector(rays[i*3:i*3+3]))
ray /= np.linalg.norm(ray)
camRays.append(ray)
return cameraPos, camRays
def getCameraFrustumGeometry(self, rayLength):
camPos, rays = self.getCameraFrustumRays()
rays = [rayLength*r for r in rays]
d = DebugData()
d.addLine(camPos, camPos+rays[0])
d.addLine(camPos, camPos+rays[1])
d.addLine(camPos, camPos+rays[2])
d.addLine(camPos, camPos+rays[3])
d.addLine(camPos+rays[0], camPos+rays[1])
d.addLine(camPos+rays[1], camPos+rays[2])
d.addLine(camPos+rays[2], camPos+rays[3])
d.addLine(camPos+rays[3], camPos+rays[0])
return d.getPolyData()
def update(self, robotModel):
name = 'camera frustum %s' % self.robotModel.getProperty('Name')
obj = om.findObjectByName(name)
if obj and not obj.getProperty('Visible'):
return
vis.updatePolyData(self.getCameraFrustumGeometry(self.rayLength), name, parent=self.robotModel, visible=False)
views = {}
def addCameraView(channel, viewName=None, cameraName=None, imageType=-1):
cameraName = cameraName or channel
if cameraName not in imageManager.queue.getCameraNames():
import warnings
warnings.warn(cameraName + " is not defined in the bot config")
imageManager.queue.addCameraStream(channel, cameraName, imageType)
if cameraName == "MULTISENSE_CAMERA_LEFT":
import bot_core as lcmbotcore
imageManager.queue.addCameraStream(
"MULTISENSE_CAMERA", "MULTISENSE_CAMERA_LEFT", lcmbotcore.images_t.LEFT)
if cameraName == "OPENNI_FRAME_LEFT":
import bot_core as lcmbotcore
imageManager.queue.addCameraStream(
"OPENNI_FRAME", "OPENNI_FRAME_LEFT", lcmbotcore.images_t.LEFT)
imageManager.addImage(cameraName)
view = CameraImageView(imageManager, cameraName, viewName)
global views
views[channel] = view
return view
def getStereoPointCloud(decimation=4, imagesChannel='MULTISENSE_CAMERA', cameraName='MULTISENSE_CAMERA_LEFT', removeSize=0, rangeThreshold = -1):
q = imageManager.queue
utime = q.getCurrentImageTime(cameraName)
if utime == 0:
return None
p = vtk.vtkPolyData()
cameraToLocal = vtk.vtkTransform()
q.getPointCloudFromImages(imagesChannel, p, decimation, removeSize, rangeThreshold)
if (p.GetNumberOfPoints() > 0):
q.getTransform(cameraName, 'local', utime, cameraToLocal)
p = filterUtils.transformPolyData(p, cameraToLocal)
return p
class KintinuousMapping(object):
def __init__(self):
self.lastUtime = 0
self.lastCameraToLocal = vtk.vtkTransform()
self.cameraToLocalFusedTransforms = []
self.cameraToLocalTransforms = []
self.pointClouds = []
def getStereoPointCloudElapsed(self,decimation=4, imagesChannel='MULTISENSE_CAMERA', cameraName='MULTISENSE_CAMERA_LEFT', removeSize=0):
q = imageManager.queue
utime = q.getCurrentImageTime(cameraName)
if utime == 0:
return None, None, None
if (utime - self.lastUtime < 1E6):
return None, None, None
p = vtk.vtkPolyData()
cameraToLocalFused = vtk.vtkTransform()
q.getTransform('MULTISENSE_CAMERA_LEFT_ALT', 'local', utime, cameraToLocalFused)
cameraToLocal = vtk.vtkTransform()
q.getTransform('MULTISENSE_CAMERA_LEFT', 'local', utime, cameraToLocal)
prevToCurrentCameraTransform = vtk.vtkTransform()
prevToCurrentCameraTransform.PostMultiply()
prevToCurrentCameraTransform.Concatenate( cameraToLocal )
prevToCurrentCameraTransform.Concatenate( self.lastCameraToLocal.GetLinearInverse() )
distTravelled = np.linalg.norm( prevToCurrentCameraTransform.GetPosition() )
# 0.2 heavy overlap
# 0.5 quite a bit of overlap
# 1.0 is good
if (distTravelled < 0.2 ):
return None, None, None
q.getPointCloudFromImages(imagesChannel, p, decimation, removeSize, removeThreshold = -1)
self.lastCameraToLocal = cameraToLocal
self.lastUtime = utime
return p, cameraToLocalFused, cameraToLocal
def showFusedMaps(self):
om.removeFromObjectModel(om.findObjectByName('stereo'))
om.getOrCreateContainer('stereo')
q = imageManager.queue
cameraToLocalNow = vtk.vtkTransform()
utime = q.getCurrentImageTime('CAMERA_TSDF')
q.getTransform('MULTISENSE_CAMERA_LEFT','local', utime,cameraToLocalNow)
cameraToLocalFusedNow = vtk.vtkTransform()
q.getTransform('MULTISENSE_CAMERA_LEFT_ALT','local', utime,cameraToLocalFusedNow)
for i in range(len(self.pointClouds)):
fusedNowToLocalNow = vtk.vtkTransform()
fusedNowToLocalNow.PreMultiply()
fusedNowToLocalNow.Concatenate( cameraToLocalNow)
fusedNowToLocalNow.Concatenate( cameraToLocalFusedNow.GetLinearInverse() )
fusedTransform = vtk.vtkTransform()
fusedTransform.PreMultiply()
fusedTransform.Concatenate( fusedNowToLocalNow)
fusedTransform.Concatenate( self.cameraToLocalFusedTransforms[i] )
pd = filterUtils.transformPolyData(self.pointClouds[i], fusedTransform)
vis.showFrame(fusedTransform, ('cloud frame ' + str(i)), visible=True, scale=0.2, parent='stereo')
vis.showPolyData(pd, ('stereo ' + str(i)), parent='stereo', colorByName='rgb_colors')
# Without compensation for fusion motion estimation:
#pd = filterUtils.transformPolyData(self.pointClouds[i], self.cameraToLocalTransforms[i])
#vis.showFrame(self.cameraToLocalTransforms[i], ('cloud frame ' + str(i)), visible=True, scale=0.2)
#vis.showPolyData(pd, ('stereo ' + str(i)) )
# in fusion coordinate frame:
#pd = filterUtils.transformPolyData(self.pointClouds[i], self.cameraToLocalFusedTransforms[i])
#vis.showFrame(self.cameraToLocalFusedTransforms[i], ('cloud frame ' + str(i)), visible=True, scale=0.2)
#vis.showPolyData(pd, ('stereo ' + str(i)) )
def cameraFusedCallback(self):
#pd = cameraview.getStereoPointCloud(2,"CAMERA_FUSED")
pd, cameraToLocalFused, cameraToLocal = self.getStereoPointCloudElapsed(2,"CAMERA_FUSED")
#vis.updateFrame(cameraToLocal, 'cloud frame now', visible=True, scale=0.2)
if (pd is None):
return
self.pointClouds.append(pd)
self.cameraToLocalFusedTransforms.append( cameraToLocalFused )
self.cameraToLocalTransforms.append( cameraToLocal )
#pdCopy = vtk.vtkPolyData()
#pdCopy.DeepCopy(pd)
#cameraToLocalCopy = transformUtils.copyFrame(cameraToLocalFused)
#pdCopy = filterUtils.transformPolyData(pdCopy, cameraToLocalCopy)
#vis.showFrame(cameraToLocalCopy, 'cloud frame', visible=True, scale=0.2)
#vis.showPolyData(pdCopy,'stereo')
self.showFusedMaps()
def init():
global imageManager
imageManager = ImageManager()
global cameraView
cameraView = CameraView(imageManager)
if "modelName" in drcargs.getDirectorConfig():
_modelName = drcargs.getDirectorConfig()['modelName']
cameraNames = imageManager.queue.getCameraNames()
if "MULTISENSE_CAMERA_LEFT" in cameraNames:
addCameraView('MULTISENSE_CAMERA_LEFT', 'Head camera')
if "OPENNI_FRAME_LEFT" in cameraNames:
addCameraView('OPENNI_FRAME_LEFT', 'OpenNI')
#import bot_core as lcmbotcore
#addCameraView('MULTISENSE_CAMERA', 'Head camera right', 'MULTISENSE_CAMERA_RIGHT', lcmbotcore.images_t.RIGHT)
#addCameraView('MULTISENSE_CAMERA', 'Head camera depth', 'MULTISENSE_CAMERA_DISPARITY', lcmbotcore.images_t.DISPARITY_ZIPPED)
if "atlas" in _modelName or "valkyrie" in _modelName:
addCameraView('CAMERACHEST_LEFT', 'Chest left')
addCameraView('CAMERACHEST_RIGHT', 'Chest right')
if "atlas" in drcargs.getDirectorConfig()['modelName']:
addCameraView('CAMERALHAND', 'Hand left')
addCameraView('CAMERARHAND', 'Hand right')
if "KINECT_RGB" in cameraNames:
addCameraView('KINECT_RGB', 'Kinect RGB')
| patmarion/director | src/python/director/cameraview.py | Python | bsd-3-clause | 32,796 |
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import sys
import pytest
from mock import patch
from collections import namedtuple
try:
from math import isclose
except ImportError:
from gpiozero.compat import isclose
from gpiozero.pins.mock import MockSPIDevice, MockPin
from gpiozero import *
def teardown_function(function):
Device.pin_factory.reset()
def clamp(v, min_value, max_value):
return min(max_value, max(min_value, v))
def scale(v, ref, bits):
v /= ref
vmin = -(2 ** bits)
vmax = -vmin - 1
vrange = vmax - vmin
return int(((v + 1) / 2.0) * vrange + vmin)
class MockMCP3xxx(MockSPIDevice):
def __init__(
self, clock_pin, mosi_pin, miso_pin, select_pin=None,
channels=8, bits=10):
super(MockMCP3xxx, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin)
self.vref = 3.3
self.channels = [0.0] * channels
self.channel_bits = 3
self.bits = bits
self.state = 'idle'
def on_start(self):
super(MockMCP3xxx, self).on_start()
self.state = 'idle'
def on_bit(self):
if self.state == 'idle':
if self.rx_buf[-1]:
self.state = 'mode'
self.rx_buf = []
elif self.state == 'mode':
if self.rx_buf[-1]:
self.state = 'single'
else:
self.state = 'diff'
self.rx_buf = []
elif self.state in ('single', 'diff'):
if len(self.rx_buf) == self.channel_bits:
self.on_result(self.state == 'diff', self.rx_word())
self.state = 'result'
elif self.state == 'result':
if not self.tx_buf:
self.state = 'idle'
self.rx_buf = []
else:
assert False
def on_result(self, differential, channel):
if differential:
pos_channel = channel
neg_channel = pos_channel ^ 1
result = self.channels[pos_channel] - self.channels[neg_channel]
result = clamp(result, 0, self.vref)
else:
result = clamp(self.channels[channel], 0, self.vref)
result = scale(result, self.vref, self.bits)
self.tx_word(result, self.bits + 2)
class MockMCP3xx1(MockMCP3xxx):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None, bits=10):
super(MockMCP3xx1, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels=2, bits=bits)
def on_start(self):
super(MockMCP3xx1, self).on_start()
result = self.channels[0] - self.channels[1]
result = clamp(result, 0, self.vref)
result = scale(result, self.vref, self.bits)
self.tx_word(result, self.bits + 3)
def on_bit(self):
pass
class MockMCP3xx2(MockMCP3xxx):
def __init__(
self, clock_pin, mosi_pin, miso_pin, select_pin=None,
bits=10):
super(MockMCP3xx2, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels=2, bits=bits)
self.channel_bits = 1
class MockMCP33xx(MockMCP3xxx):
def __init__(
self, clock_pin, mosi_pin, miso_pin, select_pin=None,
channels=8):
super(MockMCP33xx, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels, 12)
def on_result(self, differential, channel):
if differential:
pos_channel = channel
neg_channel = pos_channel ^ 1
result = self.channels[pos_channel] - self.channels[neg_channel]
result = clamp(result, -self.vref, self.vref)
else:
result = clamp(self.channels[channel], 0, self.vref)
result = scale(result, self.vref, self.bits)
if result < 0:
result += 8192
self.tx_word(result, self.bits + 3)
class MockMCP3001(MockMCP3xx1):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3001, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, bits=10)
class MockMCP3002(MockMCP3xx2):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3002, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, bits=10)
class MockMCP3004(MockMCP3xxx):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3004, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels=4, bits=10)
class MockMCP3008(MockMCP3xxx):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3008, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels=8, bits=10)
class MockMCP3201(MockMCP3xx1):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3201, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, bits=12)
class MockMCP3202(MockMCP3xx2):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3202, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, bits=12)
class MockMCP3204(MockMCP3xxx):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3204, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels=4, bits=12)
class MockMCP3208(MockMCP3xxx):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3208, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels=8, bits=12)
class MockMCP3301(MockMCP3xxx):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3301, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels=2, bits=12)
def on_start(self):
super(MockMCP3301, self).on_start()
result = self.channels[0] - self.channels[1]
result = clamp(result, -self.vref, self.vref)
result = scale(result, self.vref, self.bits)
if result < 0:
result += 8192
self.tx_word(result, self.bits + 4)
class MockMCP3302(MockMCP33xx):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3302, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels=4)
class MockMCP3304(MockMCP33xx):
def __init__(self, clock_pin, mosi_pin, miso_pin, select_pin=None):
super(MockMCP3304, self).__init__(
clock_pin, mosi_pin, miso_pin, select_pin, channels=8)
def single_mcp_test(mock, pot, channel, bits):
scale = 2**bits
tolerance = 1 / scale
voltage_tolerance = pot.max_voltage / scale
mock.channels[channel] = 0.0
assert pot.raw_value == 0
assert isclose(pot.value, 0.0, abs_tol=tolerance)
assert isclose(pot.voltage, 0.0, abs_tol=voltage_tolerance)
mock.channels[channel] = mock.vref / 2
assert pot.raw_value == (scale / 2) - 1
assert isclose(pot.value, 0.5, abs_tol=tolerance)
assert isclose(pot.voltage, pot.max_voltage / 2, abs_tol=voltage_tolerance)
mock.channels[channel] = mock.vref
assert pot.raw_value == scale - 1
assert isclose(pot.value, 1.0, abs_tol=tolerance)
assert isclose(pot.voltage, pot.max_voltage, abs_tol=voltage_tolerance)
def differential_mcp_test(mock, pot, pos_channel, neg_channel, bits, full=False):
scale = 2**bits
tolerance = 1 / scale
voltage_tolerance = pot.max_voltage / scale
mock.channels[pos_channel] = 0.0
mock.channels[neg_channel] = 0.0
assert pot.raw_value == 0
assert isclose(pot.value, 0.0, abs_tol=tolerance)
assert isclose(pot.voltage, 0.0, abs_tol=voltage_tolerance)
mock.channels[pos_channel] = mock.vref / 2
assert pot.raw_value == (scale / 2) - 1
assert isclose(pot.value, 0.5, abs_tol=tolerance)
assert isclose(pot.voltage, pot.max_voltage / 2, abs_tol=voltage_tolerance)
mock.channels[pos_channel] = mock.vref
assert pot.raw_value == scale - 1
assert isclose(pot.value, 1.0, abs_tol=tolerance)
assert isclose(pot.voltage, pot.max_voltage, abs_tol=voltage_tolerance)
mock.channels[neg_channel] = mock.vref / 2
assert pot.raw_value == (scale / 2) - 1
assert isclose(pot.value, 0.5, abs_tol=tolerance)
assert isclose(pot.voltage, pot.max_voltage / 2, abs_tol=voltage_tolerance)
mock.channels[pos_channel] = mock.vref / 2
assert pot.raw_value == 0
assert isclose(pot.value, 0.0, abs_tol=tolerance)
assert isclose(pot.voltage, 0.0, abs_tol=voltage_tolerance)
mock.channels[pos_channel] = 0.0
mock.channels[neg_channel] = mock.vref
if full:
assert pot.raw_value == -scale
assert isclose(pot.value, -1.0, abs_tol=tolerance)
assert isclose(pot.voltage, -pot.max_voltage, abs_tol=voltage_tolerance)
else:
assert pot.raw_value == 0
assert isclose(pot.value, 0.0, abs_tol=tolerance)
assert isclose(pot.voltage, 0.0, abs_tol=voltage_tolerance)
def test_MCP3001():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3001(11, 10, 9, 8)
with MCP3001() as pot:
differential_mcp_test(mock, pot, 0, 1, 10)
with MCP3001(max_voltage=5.0) as pot:
differential_mcp_test(mock, pot, 0, 1, 10)
def test_MCP3002():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3002(11, 10, 9, 8)
with pytest.raises(ValueError):
MCP3002(channel=5)
with MCP3002(channel=1) as pot:
single_mcp_test(mock, pot, 1, 10)
with MCP3002(channel=1, max_voltage=5.0) as pot:
single_mcp_test(mock, pot, 1, 10)
with MCP3002(channel=1, differential=True) as pot:
differential_mcp_test(mock, pot, 1, 0, 10)
def test_MCP3004():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3004(11, 10, 9, 8)
with pytest.raises(ValueError):
MCP3004(channel=5)
with MCP3004(channel=3) as pot:
single_mcp_test(mock, pot, 3, 10)
with MCP3004(channel=3, max_voltage=5.0) as pot:
single_mcp_test(mock, pot, 3, 10)
with MCP3004(channel=3, differential=True) as pot:
differential_mcp_test(mock, pot, 3, 2, 10)
def test_MCP3008():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3008(11, 10, 9, 8)
with pytest.raises(ValueError):
MCP3008(channel=9)
with MCP3008(channel=0) as pot:
single_mcp_test(mock, pot, 0, 10)
with MCP3008(channel=1, max_voltage=5.0) as pot:
single_mcp_test(mock, pot, 1, 10)
with MCP3008(channel=0, differential=True) as pot:
differential_mcp_test(mock, pot, 0, 1, 10)
def test_MCP3201():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3201(11, 10, 9, 8)
with MCP3201() as pot:
differential_mcp_test(mock, pot, 0, 1, 12)
with MCP3201(max_voltage=5.0) as pot:
differential_mcp_test(mock, pot, 0, 1, 12)
def test_MCP3202():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3202(11, 10, 9, 8)
with pytest.raises(ValueError):
MCP3202(channel=5)
with MCP3202(channel=1) as pot:
single_mcp_test(mock, pot, 1, 12)
with MCP3202(channel=1, max_voltage=5.0) as pot:
single_mcp_test(mock, pot, 1, 12)
with MCP3202(channel=1, differential=True) as pot:
differential_mcp_test(mock, pot, 1, 0, 12)
def test_MCP3204():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3204(11, 10, 9, 8)
with pytest.raises(ValueError):
MCP3204(channel=5)
with MCP3204(channel=1) as pot:
single_mcp_test(mock, pot, 1, 12)
with MCP3204(channel=1, max_voltage=5.0) as pot:
single_mcp_test(mock, pot, 1, 12)
with MCP3204(channel=1, differential=True) as pot:
differential_mcp_test(mock, pot, 1, 0, 12)
def test_MCP3208():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3208(11, 10, 9, 8)
with pytest.raises(ValueError):
MCP3208(channel=9)
with MCP3208(channel=7) as pot:
single_mcp_test(mock, pot, 7, 12)
with MCP3208(channel=7, max_voltage=5.0) as pot:
single_mcp_test(mock, pot, 7, 12)
with MCP3208(channel=7, differential=True) as pot:
differential_mcp_test(mock, pot, 7, 6, 12)
def test_MCP3301():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3301(11, 10, 9, 8)
with MCP3301() as pot:
differential_mcp_test(mock, pot, 0, 1, 12, full=True)
with MCP3301(max_voltage=5.0) as pot:
differential_mcp_test(mock, pot, 0, 1, 12, full=True)
def test_MCP3302():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3302(11, 10, 9, 8)
with pytest.raises(ValueError):
MCP3302(channel=4)
with MCP3302(channel=0) as pot:
single_mcp_test(mock, pot, 0, 12)
with MCP3302(channel=0, max_voltage=5.0) as pot:
single_mcp_test(mock, pot, 0, 12)
with MCP3302(channel=0, differential=True) as pot:
differential_mcp_test(mock, pot, 0, 1, 12, full=True)
def test_MCP3304():
with patch('gpiozero.pins.local.SpiDev', None):
mock = MockMCP3304(11, 10, 9, 8)
with pytest.raises(ValueError):
MCP3304(channel=9)
with MCP3304(channel=5) as pot:
single_mcp_test(mock, pot, 5, 12)
with MCP3304(channel=5, max_voltage=5.0) as pot:
single_mcp_test(mock, pot, 5, 12)
with MCP3304(channel=5, differential=True) as pot:
differential_mcp_test(mock, pot, 5, 4, 12, full=True)
| MrHarcombe/python-gpiozero | tests/test_spi_devices.py | Python | bsd-3-clause | 13,991 |
"""
wiki-fetcher.py
Script to download MediaWiki pages and all embedded
documents, like images, PDFs, etc.
(c) 2015 by Manuel Peuster
manuel (dot) peuster (at) upb (dot) de
"""
"""
TODO:
* add recursive mode "-r": Download all content linked
from a given page (maybe with max_depth parameter)
"""
import argparse
import os
import re
import subprocess
from pprint import pprint as pp
import mwclient # pip install mwclient
# global pointer to our active wiki
SITE = None
def setup_connection(host, user=None, password=None):
"""
Setup mwclient connection to wiki
"""
global SITE
SITE = mwclient.Site(host, path='/')
if user is not None:
SITE.login(user, password)
def no_archived_elements(img_list):
"""
Removes old revisions from image list.
Old versions can be identified by having
'archive' as part of their url.
Not nice, but the mwclinet APi does not
provide a build-in solution for revision management.
"""
return [i for i in img_list if 'archive' not in i.imageinfo['url']]
def fetch_wiki_page(site, page, out=None, embedded_elements=True):
"""
Arguments:
- site : mwclient site object
- page : either mwclient page object or pagename as string
"""
if isinstance(page, basestring) or isinstance(page, str):
# if we get a pagename: fetch the page object
page = site.Pages[page]
if not page.exists:
raise Exception("Page not found: %s" % page.name)
out = "out" if out is None else out
ensure_dir(out)
print "Fetching page: %s" % page.name
# fetch page content as markdown
pagefile = re.sub(' ', '_', page.name)
with open("%s%s.md" % (out, pagefile), 'w') as f:
f.write(page.text().encode('utf8'))
print "Stored page content in %s.md" % page.name
# fetch all images used in page
# TODO: Filter? This will download all linked files (e.g. PDFs)
if embedded_elements:
print "Fetching page's embedded elements"
download_ps = []
for img in no_archived_elements(page.images()):
p = subprocess.Popen(
["wget", "-xNq",
"-O%s%s" % (out, img.name.replace("File:", "")),
img.imageinfo['url']
])
download_ps.append(p)
print "Downloading: %s" % img.name
print "Waiting for all downloads to finish..."
ecodes = [p.wait() for p in download_ps]
if 1 in ecodes:
print "*** WARNING: File download failed. ***"
def fetch_wiki_category(site, catname, out=None, embedded_elements=True):
"""
Fetches all pages contained in the given
category.
"""
print "Fetching category: %s" % catname
# if output folder not given, use catname
out = ("%s/" % catname) if out is None else out
# fetch all pages found in category
for page in site.Categories[catname]:
fetch_wiki_page(site, page, out, embedded_elements=embedded_elements)
def ensure_dir(directory):
"""
Creates directory if it does not exist.
"""
if not os.path.exists(directory):
os.makedirs(directory)
def download(target,
output="out/", category=None, embedded_elements=True, **kwargs):
global SITE
if SITE is None:
raise Exception("Wiki connection was not initialized.")
if not output[-1] == '/':
output += '/'
if category:
fetch_wiki_category(SITE, target, output, embedded_elements=embedded_elements)
else:
fetch_wiki_page(SITE, target, output, embedded_elements=embedded_elements)
def upload_document(doc, excp):
"""upload both build progress information
as well as a potneitally generated PDF """
global SITE
# deal with any possible exceptions
if SITE is None:
raise Exception("Wiki connection was not initialized.")
# deal with the PDF file:
texdir = os.path.join(doc, 'tex')
pdffile = os.path.join(texdir,
'main.pdf')
if os.path.isfile(pdffile):
uploadedName = doc + ".pdf"
print "pdf exists, uploding ", pdffile, " as ", uploadedName
res = SITE.upload(open(pdffile),
uploadedName,
"Generated file for document " + doc,
ignore=True)
pp(res)
else:
print "no pdf to upload"
# any tar file to upload?
tarfile = os.path.join(doc, doc+'-latex.tgz')
if os.path.isfile(tarfile):
uploadName = doc+'-latex.tgz'
print "tar file exsists, upload: ", tarfile, uploadName
res = SITE.upload(open(tarfile),
uploadName,
"Generated intermediate files (figures, uml, latex) for " + doc,
ignore=True)
pp(res)
else:
print "no tar file to upload"
# prepare the build report page
page = SITE.Pages[doc + 'BuildReport']
text = page.text()
text = "= Build report for {} =\n".format(doc)
text += "\n== PDF file ==\n"
text += "\n[[File:" + doc + ".pdf]]\n"
# deal with the exceptions:
if excp:
text += "== Return code ==\n"
try:
#
text += str(excp.returncode)
except:
#
text += "(no returncode found)"
text += "\n== Output ==\n"
text += "\n<nowiki>\n"
try:
#
text += excp.output
except:
#
text += "(no error output exists)"
text += "\n</nowiki>\n"
else:
text += "\n== No errors reported! ==\n"
# done
text += "\n[[Category:BuildReport]]\n"
page.save(text)
def setup_cli_parser():
"""
CLI definition
"""
parser = argparse.ArgumentParser(
description="Download MediaWiki pages/ categories and all linked content.")
parser.add_argument("--host", dest="host", default="wiki.sonata-nfv.eu",
help="Host of Wiki to fetch from")
parser.add_argument("--user", dest="user", default=None,
help="Username for Wiki")
parser.add_argument("--pass", dest="password", default=None,
help="Password for Wiki")
parser.add_argument("-c", dest="category", action='store_true',
help="Fetch entire category instead of single page")
parser.add_argument("--out", dest="output", default="out/",
help="Output directory (default is 'out' or name of category)")
parser.add_argument("target",
help="Page name or category name to fetch")
return parser
if __name__ == '__main__':
parser = setup_cli_parser()
args = parser.parse_args()
setup_connection(host=args.host, user=args.user, password=args.password)
download(**vars(args))
| hkarl/mw2pdf | wikiconnector.py | Python | bsd-3-clause | 6,941 |
from AuthHandler import AuthHandler
import json
class BaseHandler(AuthHandler):
def serve_json(self, dict):
self.response.write(json.dumps(dict))
| Trupal00p/polymer-template | app/BaseHandler.py | Python | bsd-3-clause | 162 |
#!/usr/bin/python
# $Id:$
import ctypes
import pyglet
lib = ctypes.windll.wintab32
LONG = ctypes.c_long
BOOL = ctypes.c_int
UINT = ctypes.c_uint
WORD = ctypes.c_uint16
DWORD = ctypes.c_uint32
WCHAR = ctypes.c_wchar
FIX32 = DWORD
WTPKT = DWORD
LCNAMELEN = 40
class AXIS(ctypes.Structure):
_fields_ = (
('axMin', LONG),
('axMax', LONG),
('axUnits', UINT),
('axResolution', FIX32)
)
def get_scale(self):
return 1 / float(self.axMax - self.axMin)
def get_bias(self):
return -self.axMin
class ORIENTATION(ctypes.Structure):
_fields_ = (
('orAzimuth', ctypes.c_int),
('orAltitude', ctypes.c_int),
('orTwist', ctypes.c_int)
)
class ROTATION(ctypes.Structure):
_fields_ = (
('roPitch', ctypes.c_int),
('roRoll', ctypes.c_int),
('roYaw', ctypes.c_int),
)
class LOGCONTEXT(ctypes.Structure):
_fields_ = (
('lcName', WCHAR * LCNAMELEN),
('lcOptions', UINT),
('lcStatus', UINT),
('lcLocks', UINT),
('lcMsgBase', UINT),
('lcDevice', UINT),
('lcPktRate', UINT),
('lcPktData', WTPKT),
('lcPktMode', WTPKT),
('lcMoveMask', WTPKT),
('lcBtnDnMask', DWORD),
('lcBtnUpMask', DWORD),
('lcInOrgX', LONG),
('lcInOrgY', LONG),
('lcInOrgZ', LONG),
('lcInExtX', LONG),
('lcInExtY', LONG),
('lcInExtZ', LONG),
('lcOutOrgX', LONG),
('lcOutOrgY', LONG),
('lcOutOrgZ', LONG),
('lcOutExtX', LONG),
('lcOutExtY', LONG),
('lcOutExtZ', LONG),
('lcSensX', FIX32),
('lcSensY', FIX32),
('lcSensZ', FIX32),
('lcSysMode', BOOL),
('lcSysOrgX', ctypes.c_int),
('lcSysOrgY', ctypes.c_int),
('lcSysExtX', ctypes.c_int),
('lcSysExtY', ctypes.c_int),
('lcSysSensX', FIX32),
('lcSysSensY', FIX32),
)
# Custom packet format with fields
# PK_CHANGED
# PK_CURSOR
# PK_BUTTONS
# PK_X
# PK_Y
# PK_Z
# PK_NORMAL_PRESSURE
# PK_TANGENT_PRESSURE
# PK_ORIENTATION (check for tilt extension instead)?
class PACKET(ctypes.Structure):
_fields_ = (
('pkChanged', WTPKT),
('pkCursor', UINT),
('pkButtons', DWORD),
('pkX', LONG),
('pkY', LONG),
('pkZ', LONG),
('pkNormalPressure', UINT),
('pkTangentPressure', UINT),
('pkOrientation', ORIENTATION),
)
PK_CONTEXT = 0x0001 # reporting context
PK_STATUS = 0x0002 # status bits
PK_TIME = 0x0004 # time stamp
PK_CHANGED = 0x0008 # change bit vector
PK_SERIAL_NUMBER = 0x0010 # packet serial number
PK_CURSOR = 0x0020 # reporting cursor
PK_BUTTONS = 0x0040 # button information
PK_X = 0x0080 # x axis
PK_Y = 0x0100 # y axis
PK_Z = 0x0200 # z axis
PK_NORMAL_PRESSURE = 0x0400 # normal or tip pressure
PK_TANGENT_PRESSURE = 0x0800 # tangential or barrel pressure
PK_ORIENTATION = 0x1000 # orientation info: tilts
PK_ROTATION = 0x2000 # rotation info; 1.1
TU_NONE = 0
TU_INCHES = 1
TU_CENTIMETERS = 2
TU_CIRCLE = 3
# messages
WT_DEFBASE = 0x7ff0
WT_MAXOFFSET = 0xf
WT_PACKET = 0 # remember to add base
WT_CTXOPEN = 1
WT_CTXCLOSE = 2
WT_CTXUPDATE = 3
WT_CTXOVERLAP = 4
WT_PROXIMITY = 5
WT_INFOCHANGE = 6
WT_CSRCHANGE = 7
# system button assignment values
SBN_NONE = 0x00
SBN_LCLICK = 0x01
SBN_LDBLCLICK = 0x02
SBN_LDRAG = 0x03
SBN_RCLICK = 0x04
SBN_RDBLCLICK = 0x05
SBN_RDRAG = 0x06
SBN_MCLICK = 0x07
SBN_MDBLCLICK = 0x08
SBN_MDRAG = 0x09
# for Pen Windows
SBN_PTCLICK = 0x10
SBN_PTDBLCLICK = 0x20
SBN_PTDRAG = 0x30
SBN_PNCLICK = 0x40
SBN_PNDBLCLICK = 0x50
SBN_PNDRAG = 0x60
SBN_P1CLICK = 0x70
SBN_P1DBLCLICK = 0x80
SBN_P1DRAG = 0x90
SBN_P2CLICK = 0xA0
SBN_P2DBLCLICK = 0xB0
SBN_P2DRAG = 0xC0
SBN_P3CLICK = 0xD0
SBN_P3DBLCLICK = 0xE0
SBN_P3DRAG = 0xF0
HWC_INTEGRATED = 0x0001
HWC_TOUCH = 0x0002
HWC_HARDPROX = 0x0004
HWC_PHYSID_CURSORS = 0x0008 # 1.1
CRC_MULTIMODE = 0x0001 # 1.1
CRC_AGGREGATE = 0x0002 # 1.1
CRC_INVERT = 0x0004 # 1.1
WTI_INTERFACE = 1
IFC_WINTABID = 1
IFC_SPECVERSION = 2
IFC_IMPLVERSION = 3
IFC_NDEVICES = 4
IFC_NCURSORS = 5
IFC_NCONTEXTS = 6
IFC_CTXOPTIONS = 7
IFC_CTXSAVESIZE = 8
IFC_NEXTENSIONS = 9
IFC_NMANAGERS = 10
IFC_MAX = 10
WTI_STATUS = 2
STA_CONTEXTS = 1
STA_SYSCTXS = 2
STA_PKTRATE = 3
STA_PKTDATA = 4
STA_MANAGERS = 5
STA_SYSTEM = 6
STA_BUTTONUSE = 7
STA_SYSBTNUSE = 8
STA_MAX = 8
WTI_DEFCONTEXT = 3
WTI_DEFSYSCTX = 4
WTI_DDCTXS = 400 # 1.1
WTI_DSCTXS = 500 # 1.1
CTX_NAME = 1
CTX_OPTIONS = 2
CTX_STATUS = 3
CTX_LOCKS = 4
CTX_MSGBASE = 5
CTX_DEVICE = 6
CTX_PKTRATE = 7
CTX_PKTDATA = 8
CTX_PKTMODE = 9
CTX_MOVEMASK = 10
CTX_BTNDNMASK = 11
CTX_BTNUPMASK = 12
CTX_INORGX = 13
CTX_INORGY = 14
CTX_INORGZ = 15
CTX_INEXTX = 16
CTX_INEXTY = 17
CTX_INEXTZ = 18
CTX_OUTORGX = 19
CTX_OUTORGY = 20
CTX_OUTORGZ = 21
CTX_OUTEXTX = 22
CTX_OUTEXTY = 23
CTX_OUTEXTZ = 24
CTX_SENSX = 25
CTX_SENSY = 26
CTX_SENSZ = 27
CTX_SYSMODE = 28
CTX_SYSORGX = 29
CTX_SYSORGY = 30
CTX_SYSEXTX = 31
CTX_SYSEXTY = 32
CTX_SYSSENSX = 33
CTX_SYSSENSY = 34
CTX_MAX = 34
WTI_DEVICES = 100
DVC_NAME = 1
DVC_HARDWARE = 2
DVC_NCSRTYPES = 3
DVC_FIRSTCSR = 4
DVC_PKTRATE = 5
DVC_PKTDATA = 6
DVC_PKTMODE = 7
DVC_CSRDATA = 8
DVC_XMARGIN = 9
DVC_YMARGIN = 10
DVC_ZMARGIN = 11
DVC_X = 12
DVC_Y = 13
DVC_Z = 14
DVC_NPRESSURE = 15
DVC_TPRESSURE = 16
DVC_ORIENTATION = 17
DVC_ROTATION = 18 # 1.1
DVC_PNPID = 19 # 1.1
DVC_MAX = 19
WTI_CURSORS = 200
CSR_NAME = 1
CSR_ACTIVE = 2
CSR_PKTDATA = 3
CSR_BUTTONS = 4
CSR_BUTTONBITS = 5
CSR_BTNNAMES = 6
CSR_BUTTONMAP = 7
CSR_SYSBTNMAP = 8
CSR_NPBUTTON = 9
CSR_NPBTNMARKS = 10
CSR_NPRESPONSE = 11
CSR_TPBUTTON = 12
CSR_TPBTNMARKS = 13
CSR_TPRESPONSE = 14
CSR_PHYSID = 15 # 1.1
CSR_MODE = 16 # 1.1
CSR_MINPKTDATA = 17 # 1.1
CSR_MINBUTTONS = 18 # 1.1
CSR_CAPABILITIES = 19 # 1.1
CSR_TYPE = 20 # 1.2
CSR_MAX = 20
WTI_EXTENSIONS = 300
EXT_NAME = 1
EXT_TAG = 2
EXT_MASK = 3
EXT_SIZE = 4
EXT_AXES = 5
EXT_DEFAULT = 6
EXT_DEFCONTEXT = 7
EXT_DEFSYSCTX = 8
EXT_CURSORS = 9
EXT_MAX = 109 # Allow 100 cursors
CXO_SYSTEM = 0x0001
CXO_PEN = 0x0002
CXO_MESSAGES = 0x0004
CXO_MARGIN = 0x8000
CXO_MGNINSIDE = 0x4000
CXO_CSRMESSAGES = 0x0008 # 1.1
# context status values
CXS_DISABLED = 0x0001
CXS_OBSCURED = 0x0002
CXS_ONTOP = 0x0004
# context lock values
CXL_INSIZE = 0x0001
CXL_INASPECT = 0x0002
CXL_SENSITIVITY = 0x0004
CXL_MARGIN = 0x0008
CXL_SYSOUT = 0x0010
# packet status values
TPS_PROXIMITY = 0x0001
TPS_QUEUE_ERR = 0x0002
TPS_MARGIN = 0x0004
TPS_GRAB = 0x0008
TPS_INVERT = 0x0010 # 1.1
TBN_NONE = 0
TBN_UP = 1
TBN_DOWN = 2
PKEXT_ABSOLUTE = 1
PKEXT_RELATIVE = 2
# Extension tags.
WTX_OBT = 0 # Out of bounds tracking
WTX_FKEYS = 1 # Function keys
WTX_TILT = 2 # Raw Cartesian tilt; 1.1
WTX_CSRMASK = 3 # select input by cursor type; 1.1
WTX_XBTNMASK = 4 # Extended button mask; 1.1
WTX_EXPKEYS = 5 # ExpressKeys; 1.3
def wtinfo(category, index, buffer):
size = lib.WTInfoW(category, index, None)
assert size <= ctypes.sizeof(buffer)
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer
def wtinfo_string(category, index):
size = lib.WTInfoW(category, index, None)
buffer = ctypes.create_unicode_buffer(size)
lib.WTInfoW(category, index, buffer)
return buffer.value
def wtinfo_uint(category, index):
buffer = UINT()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_word(category, index):
buffer = WORD()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_dword(category, index):
buffer = DWORD()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_wtpkt(category, index):
buffer = WTPKT()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_bool(category, index):
buffer = BOOL()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return bool(buffer.value)
class Device:
def __init__(self, index):
self._device = WTI_DEVICES + index
self.name = wtinfo_string(self._device, DVC_NAME).strip()
self.id = wtinfo_string(self._device, DVC_PNPID)
hardware = wtinfo_uint(self._device, DVC_HARDWARE)
phys_cursors = hardware & HWC_PHYSID_CURSORS
n_cursors = wtinfo_uint(self._device, DVC_NCSRTYPES)
first_cursor = wtinfo_uint(self._device, DVC_FIRSTCSR)
self.pressure_axis = wtinfo(self._device, DVC_NPRESSURE, AXIS())
self.cursors = list()
self._cursor_map = dict()
for i in range(n_cursors):
cursor = WintabCursor(self, i + first_cursor)
if not cursor.bogus:
self.cursors.append(cursor)
self._cursor_map[i + first_cursor] = cursor
def open(self, window):
return DeviceInstance(self, window)
class DeviceInstance(pyglet.event.EventDispatcher):
def __init__(self, device, window, msg_base=WT_DEFBASE):
# Just use system context, for similarity w/ os x and xinput.
# WTI_DEFCONTEXT detaches mouse from tablet, which is nice, but not
# possible on os x afiak.
self.device = device
self.window = window
self.context_info = context_info = LOGCONTEXT()
wtinfo(WTI_DEFSYSCTX, 0, context_info)
context_info.lcMsgBase = msg_base
context_info.lcOptions |= CXO_MESSAGES
# If you change this, change definition of PACKET also.
context_info.lcPktData = (
PK_CHANGED | PK_CURSOR | PK_BUTTONS | PK_X | PK_Y | PK_Z |
PK_NORMAL_PRESSURE | PK_TANGENT_PRESSURE | PK_ORIENTATION)
context_info.lcPktMode = 0 # All absolute
self._context = lib.WTOpenW(window._hwnd,
ctypes.byref(context_info), True)
if not self._context:
raise Exception("Couldn't open context")
window._event_handlers[msg_base + WT_PACKET] = self._event_wt_packet
window._event_handlers[msg_base + WT_PROXIMITY] = \
self._event_wt_proximity
self._current_cursor = None
self._pressure_scale = device.pressure_axis.get_scale()
self._pressure_bias = device.pressure_axis.get_bias()
def close(self):
lib.WTClose(self._context)
self._context = None
def _set_current_cursor(self, cursor_type):
if self._current_cursor:
self.dispatch_event('on_cursor_leave', self._current_cursor)
self._current_cursor = self.device._cursor_map.get(cursor_type, None)
if self._current_cursor:
self.dispatch_event('on_cursor_enter', self._current_cursor)
@pyglet.window.win32.Win32EventHandler(0)
def _event_wt_packet(self, msg, wParam, lParam):
if lParam != self._context:
return
packet = PACKET()
if lib.WTPacket(self._context, wParam, ctypes.byref(packet)) == 0:
return
if not packet.pkChanged:
return
window_x, window_y = self.window.get_location() # TODO cache on window
window_y = self.window.screen.height - window_y - self.window.height
x = packet.pkX - window_x
y = packet.pkY - window_y
pressure = (packet.pkNormalPressure + self._pressure_bias) * \
self._pressure_scale
if self._current_cursor is None:
self._set_current_cursor(packet.pkCursor)
self.dispatch_event('on_motion', self._current_cursor,
x, y, pressure)
@pyglet.window.win32.Win32EventHandler(0)
def _event_wt_proximity(self, msg, wParam, lParam):
if wParam != self._context:
return
if not lParam & 0xffff0000:
# Not a hardware proximity event
return
if not lParam & 0xffff:
# Going out
self.dispatch_event('on_cursor_leave', self._current_cursor)
# If going in, proximity event will be generated by next event, which
# can actually grab a cursor id.
self._current_cursor = None
DeviceInstance.register_event_type('on_cursor_enter')
DeviceInstance.register_event_type('on_cursor_leave')
DeviceInstance.register_event_type('on_motion')
class WintabCursor:
def __init__(self, device, index):
self.device = device
self._cursor = WTI_CURSORS + index
self.name = wtinfo_string(self._cursor, CSR_NAME).strip()
self.active = wtinfo_bool(self._cursor, CSR_ACTIVE)
pktdata = wtinfo_wtpkt(self._cursor, CSR_PKTDATA)
# A whole bunch of cursors are reported by the driver, but most of
# them are hogwash. Make sure a cursor has at least X and Y data
# before adding it to the device.
self.bogus = not (pktdata & PK_X and pktdata & PK_Y)
if self.bogus:
return
self.id = (wtinfo_dword(self._cursor, CSR_TYPE) << 32) | \
wtinfo_dword(self._cursor, CSR_PHYSID)
def __repr__(self):
return 'WintabCursor(%r)' % self.name
def check_version():
interface_name = wtinfo_string(WTI_INTERFACE, IFC_WINTABID)
spec_version = wtinfo_word(WTI_INTERFACE, IFC_SPECVERSION)
impl_version = wtinfo_word(WTI_INTERFACE, IFC_IMPLVERSION)
print('%s %d.%d (Spec %d.%d)' % (interface_name,
impl_version >> 8, impl_version & 0xff,
spec_version >> 8, spec_version & 0xff))
if spec_version < 0x101:
raise ImportError('Require WinTab specification 1.1 or later')
def get_devices():
n_devices = wtinfo_uint(WTI_INTERFACE, IFC_NDEVICES)
devices = [Device(i) for i in range(n_devices)]
return devices
| bitcraft/pyglet | contrib/experimental/input/wintab.py | Python | bsd-3-clause | 13,714 |
# coding: utf-8
"""
Main script for dbpedia quepy.
"""
import quepy
dbpedia = quepy.install("dbpedia")
target, query, metadata = dbpedia.get_query("what is a blowtorch?")
print query | apostolosSotiropoulos/interQuepy | dbpedia/main.py | Python | bsd-3-clause | 184 |
from getpass import getpass
from util_config import config
from flask import Flask, render_template, request, g
from werkzeug.security import generate_password_hash
app = Flask(__name__)
# DB support
import MySQLdb
# returns a database connection for MySQL
def connect_to_database_mysql(database=None):
if database:
return MySQLdb.connect(host=config['HOST'], port=config['PORT'],\
user=config['USER'], passwd=config['PASSWD'], db=config['SQLDB'])
else:
return MySQLdb.connect(host=config['HOST'], port=config['PORT'],\
user=config['USER'], passwd=config['PASSWD'])
# set this line to define database connection
DBFUNC = connect_to_database_mysql
tbl_user = "tbl_user"
tbl_product = "tbl_product"
tbl_basketlines = "tbl_basketlines"
tbl_order = "tbl_order"
tbl_orderlines = "tbl_orderlines"
tbl_order_status = "tbl_order_status"
tbl_category = "tbl_category"
tbl_stock = "tbl_stock"
tbl_rating = "tbl_rating"
tbl_review = "tbl_review"
tbl_admin = "tbl_admin"
def main():
print "E-dot commerce database script starting..."
remove_db() # Removing existing database if it already exists
create_db() # Create database to work on
# Set up all tables
create_user_tbl()
create_category_tbl()
create_product_tbl()
create_order_status_tbl()
create_order_tbl()
create_orderlines_tbl()
create_basketlines_tbl()
create_stock_tbl()
create_rating_tbl()
create_review_tbl()
create_admin_tbl()
#get user info for first user
email = raw_input("Root user e-mail:")
pwd = getpass()
setup_root_user(email, pwd)
print "Completed sucessfully"
def setup_root_user(email, passwd):
db = DBFUNC(config["SQLDB"])
print 'Setting root user "'+email+'" up...'
with db as cursor:
query = "insert into "+tbl_user+" (id, email, password, name) values (1, %s, %s, 'root');"
cursor.execute(query, (email, generate_password_hash(passwd)))
query = "insert into "+tbl_admin+" (user_id, level) values((select id from tbl_user where id=1), 0);"
cursor.execute(query)
db.commit()
def create_admin_tbl():
db = DBFUNC(config["SQLDB"])
print "Creating table", tbl_admin
with db as cursor:
query = "create table "+tbl_admin+" (user_id int(11) unsigned not null primary key, level int(4) unsigned);"
cursor.execute(query)
query = "alter table "+tbl_admin+" add constraint fk_admin_user foreign key (user_id) references "+tbl_user+"(id);"
cursor.execute(query)
db.commit()
def create_review_tbl():
db = DBFUNC(config["SQLDB"])
print "Creating table", tbl_review
with db as cursor:
query = "create table "+tbl_review+" (user_id INT(11) UNSIGNED NOT NULL, prod_id INT(11) UNSIGNED NOT NULL, commentdate datetime not null, comment VARCHAR(256), PRIMARY KEY (user_id, prod_id));"
cursor.execute(query)
query = "alter table "+tbl_review+" add constraint fk_review_user foreign key (user_id) references "+tbl_user+"(id);"
cursor.execute(query)
query = "alter table "+tbl_review+" add constraint fk_review_product foreign key (prod_id) references " \
""+tbl_product+"(id) ON DELETE CASCADE;"
cursor.execute(query)
db.commit()
def create_rating_tbl():
db = DBFUNC(config["SQLDB"])
print "Creating table", tbl_rating
with db as cursor:
query = "create table "+tbl_rating+" (user_id INT(11) UNSIGNED NOT NULL, prod_id INT(11) UNSIGNED NOT NULL, score INT(2), PRIMARY KEY (user_id, prod_id));"
cursor.execute(query)
query = "alter table "+tbl_rating+" add constraint fk_rating_user foreign key (user_id) references "+tbl_user+"(id);"
cursor.execute(query)
query = "alter table "+tbl_rating+" add constraint fk_rating_product foreign key (prod_id) references " \
""+tbl_product+"(id) ON DELETE CASCADE;"
cursor.execute(query)
db.commit()
def create_stock_tbl():
db = DBFUNC(config["SQLDB"])
cursor = db.cursor()
print "Creating table", tbl_stock
query = "create table " + tbl_stock + "(product_id INT(11) UNSIGNED PRIMARY KEY, amount INT(11) UNSIGNED);"
cursor.execute(query)
query = "alter table " + tbl_stock+" add CONSTRAINT fk_prod_stock FOREIGN KEY (product_id) REFERENCES " \
""+tbl_product+"(id) ON DELETE CASCADE;"
cursor.execute(query)
db.commit()
db.close()
def create_category_tbl():
db = DBFUNC(config["SQLDB"])
cursor = db.cursor()
print "Creating table", tbl_category
query = "create table " + tbl_category + "(id INT(11) UNSIGNED AUTO_INCREMENT PRIMARY KEY, name VARCHAR(32));"
cursor.execute(query)
db.commit()
db.close()
def create_product_tbl():
db = DBFUNC(config["SQLDB"])
cursor = db.cursor()
print "Creating table", tbl_product
query = "create table " + tbl_product + " (id INT(11) UNSIGNED AUTO_INCREMENT PRIMARY KEY, name VARCHAR(45), " \
"description VARCHAR(512), image_url VARCHAR(128), price DECIMAL(11,2), " \
"cat_id INT(11) UNSIGNED);"
cursor.execute(query)
query = "alter table " + tbl_product+" add CONSTRAINT fk_cat FOREIGN KEY (cat_id) REFERENCES "+tbl_category+"(id) ON DELETE CASCADE;"
cursor.execute(query)
db.commit()
db.close()
def create_orderlines_tbl():
db = DBFUNC(config["SQLDB"])
cursor = db.cursor()
print "Creating table", tbl_orderlines
query = "create table "+ tbl_orderlines +" (prod_id INT(11) UNSIGNED, order_id INT(11) UNSIGNED, amount INT(11) " \
"UNSIGNED, price DECIMAL(11,2));"
cursor.execute(query)
# query = "alter table "+tbl_orderlines+" add CONSTRAINT fk_prod FOREIGN KEY (prod_id) REFERENCES "+tbl_product+"(" \
# "id) ON DELETE SET NULL;"
# cursor.execute(query)
query = "alter table "+tbl_orderlines +" add CONSTRAINT fk_order FOREIGN KEY (order_id) REFERENCES "+tbl_order+"(id);"
cursor.execute(query)
db.commit()
db.close()
def create_basketlines_tbl():
db = DBFUNC(config["SQLDB"])
cursor = db.cursor()
print "Creating table", tbl_basketlines
query = "create table "+ tbl_basketlines +" (user_id INT(11) UNSIGNED, prod_id INT(11) UNSIGNED, amount INT(11) UNSIGNED, PRIMARY KEY(user_id, prod_id));"
cursor.execute(query)
query = "alter table "+tbl_basketlines + " add CONSTRAINT fk_basket_prod FOREIGN KEY (prod_id) REFERENCES " \
""+tbl_product+"(id) ON DELETE CASCADE;"
cursor.execute(query)
query = "alter table "+tbl_basketlines +" add CONSTRAINT fk_basket_user FOREIGN KEY (user_id) REFERENCES "+tbl_user+"(id);"
cursor.execute(query)
db.commit()
db.close()
def create_order_tbl():
db = DBFUNC(config["SQLDB"])
cursor = db.cursor()
print "Creating table", tbl_order
query = "create table "+ tbl_order+" (id INT(11) UNSIGNED AUTO_INCREMENT PRIMARY KEY, customer_id INT(11) UNSIGNED NOT " \
"NULL, date DATE, order_status VARCHAR(32) DEFAULT 'Verified');"
cursor.execute(query)
query = "alter table "+ tbl_order+" add CONSTRAINT fk_customer_id FOREIGN KEY (customer_id) REFERENCES "+tbl_user+"(id);"
cursor.execute(query)
query = "alter table " + tbl_order +" add CONSTRAINT fk_status FOREIGN KEY (order_status) REFERENCES " \
""+tbl_order_status+"(status);"
cursor.execute(query)
db.commit()
db.close()
def create_order_status_tbl():
db = DBFUNC(config["SQLDB"])
cursor = db.cursor()
print "Creating table", tbl_order_status
query = "create table " + tbl_order_status + "(status VARCHAR(32) PRIMARY KEY NOT NULL);"
cursor.execute(query)
status = ["Verified","Sent", "Debased", "In Progress"]
for s in status:
query = "insert into " + tbl_order_status + " (status) values (%s);"
cursor.execute(query, (s,))
db.commit()
db.close()
def create_user_tbl():
db = DBFUNC(config["SQLDB"])
cursor = db.cursor()
print "Creating table", tbl_user
query = "create table "+ tbl_user+" (id INT(11) UNSIGNED AUTO_INCREMENT PRIMARY KEY, email VARCHAR(32) NOT NULL UNIQUE,\
password VARCHAR(128),name VARCHAR(64), address VARCHAR(64), postcode VARCHAR(9), city VARCHAR(32), country VARCHAR(32));"
cursor.execute(query)
db.commit();
db.close()
def create_db():
db = DBFUNC()
cursor = db.cursor()
print "Creating database", config["SQLDB"]
query = "create database " + config["SQLDB"]+ ";"
cursor.execute(query)
db.commit();
db.close()
def remove_db():
db = DBFUNC()
cursor = db.cursor()
cursor.execute("show databases;")
numrows = int(cursor.rowcount)
for x in range(0, numrows):
row = cursor.fetchone()
if row[0] == config["SQLDB"]:
print "Removing database", config["SQLDB"]
query = "drop database " + config["SQLDB"]+ ";"
cursor.execute(query)
break
db.commit();
db.close()
main()
| ax-rwnd/E-dot | utils/database_setup.py | Python | bsd-3-clause | 8,436 |
import sys
import re
from optparse import OptionParser
from nomination.models import Value, ValueSet, Valueset_Values
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db import IntegrityError
def addToValset(inputfile, valset):
"""Add a batch of valuesets to database."""
reg = re.compile("(.*)\t(.*)\t(.*)")
# rff format should be [key]\t[value]\t[value_order]
rff = open(inputfile, 'r')
if valset != "":
for line in rff:
res = reg.match(line)
try:
Valueset_Values.objects.get(valueset__name=valset, value__key=res.group(1))
except ObjectDoesNotExist:
try:
valueobj, vcreated = Value.objects.get_or_create(key=res.group(1),
value=res.group(2))
valuesetobj, screated = ValueSet.objects.get_or_create(name=valset)
Valueset_Values.objects.create(valueset=valuesetobj, value=valueobj,
value_order=res.group(3))
except IntegrityError:
print("Failed to add value '%s' to value set '%s'" % (res.group(1), valset))
except MultipleObjectsReturned:
print("Failed to add value '%s' to value set '%s'" % (res.group(1), valset))
else:
print("specify a valueset if you want to do something here")
rff.close()
if __name__ == "__main__":
usage = "usage: %prog [options] <input file>"
parser = OptionParser(usage)
parser.add_option("--valset", action="store", type="string",
dest="valset", default="",
help="The value set to which values should be added.")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
addToValset(args[0], options.valset)
| unt-libraries/django-nomination | user_scripts/batch_valuesets.py | Python | bsd-3-clause | 1,956 |
#!/usr/bin/env python
"""This script shows another example of using the PyWavefront module."""
# This example was created by intrepid94
import ctypes
import sys
sys.path.append('..')
import pyglet
from pyglet.gl import *
from pywavefront import visualization
from pywavefront import Wavefront
rotation = 0.0
meshes = Wavefront('data/earth.obj')
window = pyglet.window.Window(1024, 720, caption='Demo', resizable=True)
lightfv = ctypes.c_float * 4
label = pyglet.text.Label(
'Hello, world',
font_name='Times New Roman',
font_size=12,
x=800, y=700,
anchor_x='center', anchor_y='center')
@window.event
def on_resize(width, height):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, float(width) / height, 1.0, 100.0)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_MODELVIEW)
return True
@window.event
def on_draw():
window.clear()
glLoadIdentity()
glLightfv(GL_LIGHT0, GL_POSITION, lightfv(-40.0, 200.0, 100.0, 0.0))
glLightfv(GL_LIGHT0, GL_AMBIENT, lightfv(0.2, 0.2, 0.2, 1.0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, lightfv(0.5, 0.5, 0.5, 1.0))
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_MODELVIEW)
# glTranslated(0, 4, -8)
# glRotatef(90, 0, 1, 0)
# glRotatef(-60, 0, 0, 1)
# Rotations for sphere on axis - useful
glTranslated(0, .8, -20)
glRotatef(-66.5, 0, 0, 1)
glRotatef(rotation, 1, 0, 0)
glRotatef(90, 0, 0, 1)
glRotatef(0, 0, 1, 0)
visualization.draw(meshes)
def update(dt):
global rotation
rotation += 45 * dt
if rotation > 720.0:
rotation = 0.0
pyglet.clock.schedule(update)
pyglet.app.run()
| greenmoss/PyWavefront | examples/globe_complex.py | Python | bsd-3-clause | 1,763 |
# Author: Darius Sullivan <darius.sullivan@thermo.com>
# Thermo Fisher Scientific (Cambridge, UK).
# $Rev: 151 $
# $Date: 2013-12-17 18:29:25 +0000 (Tue, 17 Dec 2013) $
from __future__ import print_function
import random
import re
import numpy as np
from itertools import count
from collections import namedtuple
from functools import partial
surface_types = dict()
surface_params = dict()
class surface_type(type):
"""A metaclass mapping Zemax surface type strings to Surface classes"""
def __init__(cls, name, bases, dict):
super(surface_type, cls).__init__(name, bases, dict)
_type = getattr(cls, "surface_type")
if _type:
surface_types[_type] = cls
# register the parameters.
# TODO: create an attribute on the surface class containing a
# mapping between column and parameter.
params = {}
surface_params[cls] = params
for key, val in dict.items():
if isinstance(val, Property):
params[key] = val.param
class SurfaceSequence:
max_surf_id = 2**31-1
def __init__(self, conn, empty=False, copy_from_editor=False):
self.conn = conn
if empty:
# self.conn.NewLens()
for i in range(1, self.__len__()-1):
del self[i]
if copy_from_editor:
self.conn.GetRefresh()
self._enforce_id_uniqueness()
def __len__(self):
response = self.conn.GetSystem()
return response[0]+1
def _translate_id(self, id):
"""Translate python-style sequence index into a Zemax surface number"""
if id < 0:
id += self.__len__()
if id < 0:
id = 0
return id
def __getitem__(self, surfno):
surfno = self._translate_id(surfno)
id = self.conn.GetLabel(surfno)
if not id:
id = random.randint(1, self.max_surf_id)
self.conn.SetLabel(surfno, id)
_type = self.conn.GetSurfaceData(surfno, 0)
surface_factory = surface_types.get(_type, UnknownSurface)
surf = surface_factory(self.conn, id)
return surf
def __delitem__(self, surfno):
surfno = self._translate_id(surfno)
if surfno < 1:
raise IndexError("Cannot delete this surface")
self.conn.DeleteSurface(surfno)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def insert_new(self, surfno, factory, *args, **kwargs):
"""Create and insert a new surface before the specified numbered surface.
surfno = 0 : Not allowed
surfno = 1 : Insert after the first surface(OBJ)
surfno = -1 : Insert before the last surface (IMA)
"""
surfno = self._translate_id(surfno)
if surfno < 1:
raise IndexError("Cannot insert before first surface")
self.conn.InsertSurface(surfno)
id = random.randint(1, self.max_surf_id)
self.conn.SetLabel(surfno, id)
return factory.create(self.conn, id, *args, **kwargs)
def append_new(self, factory, *args, **kwargs):
return self.insert_new(-1, factory, *args, **kwargs)
def _enforce_id_uniqueness(self):
# File "ZEMAX\Samples\Short course\sc_cooke1.zmx" has
# duplicate ids.
ids = set()
for i in range(len(self)):
id = self.conn.GetLabel(i)
if id in ids:
id = random.randint(1, self.max_surf_id)
self.conn.SetLabel(i, id)
ids.add(id)
class NamedElements(object):
"""Allows elements of the model to be referenced by named attributes"""
def __init__(self, seq):
"""Stores all tagged surfaces as attributes on initialisation
The argument is a SurfaceSequence instance.
"""
for surf in seq:
tag = surf.comment.tag
if tag is not None:
setattr(self, tag, surf)
def __setattr__(self, tag, surf):
object.__setattr__(self, tag, surf)
surf.comment.tag = tag
class SystemConfig(object):
# This can now use SetSystemParameter.
# That would be less awkward and could access more attributes.
unit_types = {
0: "mm",
1: "cm",
2: "in",
3: "m"
}
rayaiming_types = {
0: None,
1: "paraxial",
2: "real"
}
# get <- (int(numsurfs), int(unitcode), int(stopsurf),
# int(nonaxialflag), int(rayaimingtype), int(adjust_index),
# float(temp), float(pressure), int(globalrefsurf))
# set -> (unitcode, stopsurf, rayaimingtype, adjust_index, temp,
# pressure, globalrefsurf)
class SystemParameter(object):
# define the positions of the input parameters in the output format
setget_map = [1, 2, 4, 5, 6, 7, 8]
def __init__(self, get_id, _type=bool):
self.get_id = get_id
self._type = _type
def __get__(self, system, owner):
vals = system.conn.GetSystemRaw()
val = vals[self.get_id]
if self._type is bool:
val = int(val)
return self._type(val)
def __set__(self, system, value):
# We have to set all parameters at once
try:
set_id = self.setget_map.index(self.get_id)
except Exception:
raise NotImplementedError("This parameter cannot be set")
if self._type is bool:
value = int(value)
# retrieve current values
orig = system.conn.GetSystemRaw()
new = [orig[i] for i in self.setget_map]
# update with new value
new[set_id] = str(value)
system.conn.SetSystemRaw(*new)
numsurfs = SystemParameter(0, int)
unitcode = SystemParameter(1, int)
stopsurf = SystemParameter(2, int)
nonaxialflag = SystemParameter(3, bool)
rayaimingtype = SystemParameter(4, int)
adjustindex = SystemParameter(5, bool)
temperature = SystemParameter(6, float)
pressure = SystemParameter(7, float)
globalrefsurf = SystemParameter(8, int)
def __init__(self, conn):
self.conn = conn
class ModelConfigs(object):
def __init__(self, conn):
self.conn = conn
def length(self):
currentconfig, numberconfig, numbermcoper = self.conn.GetConfig()
return numberconfig
def get_current(self):
currentconfig, numberconfig, numbermcoper = self.conn.GetConfig()
return currentconfig
def set_current(self, config):
return self.conn.SetConfig(config)
def get_num_operands(self):
currentconfig, numberconfig, numbermcoper = self.conn.GetConfig()
return numbermcoper
def delete_operand(self, n):
return self.conn.DeleteMCO(n)
def delete_config(self, n):
return self.conn.DeleteConfig(n)
def clear(self):
for i in range(self.length()):
self.delete_config(i+1)
for i in range(self.get_num_operands()):
self.delete_operand(i+1)
class PickupFormat(object):
def __init__(self, solve_code, has_scale, has_offset, has_col_ref=False):
"""Defines how to set pickup solves on a parameter.
solve_code :
Integer code used to specify a pickup solve on the
parameter with Set/GetSolve. Zemax manual (section:
Solves >> Introduction) contains table of values (see
codes "P").
has_scale :
Flag indicating whether referenced value can be scaled
has_offset :
Flag indicating whether referenced value can be offset
has_col_ref :
Flag indicating whether the pickup solve can reference
other columns
"""
self.solve_code = solve_code
self.has_scale = has_scale
self.has_offset = has_offset
self.has_col_ref = has_col_ref
def set_pickup(self, surfp, pickup_expr):
modifiers = []
if self.has_scale:
modifiers.append(pickup_expr.scale)
elif not pickup_expr.scale == 1:
raise TypeError(
"Multiplication not applicable for this pickup solve type")
if self.has_offset:
modifiers.append(pickup_expr.offset)
elif not pickup_expr.offset == 0:
raise TypeError(
"Addition not applicable for this pickup solve type")
# Solves on "parameters" seem to require the scale and offset reversed,
# contrary to the Zemax manual (see Solves>>Introduction).
if surfp.reverse_pickup_terms:
modifiers.reverse()
col = pickup_expr.src_param.column
if self.has_col_ref:
modifiers.append(pickup_expr.src_param.solve_code + 1)
elif not surfp.column == col:
raise TypeError("Pickup solves on this parameter cannot "
"dereference other columns")
surfp.surface.conn.SetSolve(surfp.surface.get_surf_num(),
surfp.solve_code, self.solve_code,
pickup_expr.surface.get_surf_num(),
*modifiers)
class Parameter(object):
reverse_pickup_terms = False
def __init__(self, surface, column, _type=float, solve_code=None,
pickup_conf=None, fix_code=None, can_optimise=False):
"""Define the behaviour of a parameter on a surface.
column:
Integer code for parameter when defining/querying a value
with Set/GetSurfaceData. Zemax manual (section: Zemax
Extensions >> GetSurfaceData) contains table of values.
type :
type for data element
solve_code :
Integer code for parameter when defining/querying a Solve
with Set/GetSolve. Zemax manual (section: Zemax
Extensions >> GetSolve) contains table of values.
pickup_conf :
PickupFormat instance defining how to set pickup solves
fix_code :
Integer code used to specify a fixed solve on the
parameter with Set/GetSolve. Zemax manual (section:
Solves >> Introduction) contains table of values.
"""
self.surface = surface
self.column = column
self._type = _type
self.solve_code = solve_code
self.pickup_conf = pickup_conf
self.fix_code = fix_code
self.can_optimise = can_optimise
def _client_get_value(self):
s = self.surface
return s.conn.GetSurfaceData(s.get_surf_num(), self.column)
def _client_set_value(self, value):
s = self.surface
s.conn.SetSurfaceData(s.get_surf_num(), self.column, value)
def set_value(self, value):
if isinstance(value, PickupExpression):
self.link_value_to(value)
return
_ = self.surface.get_surf_num()
# set parameter solve to fixed (spreadsheet-like behaviour)
if self.fix_code is not None:
self.fix()
if self._type is bool:
value = int(value)
self._client_set_value(value)
def get_value(self):
_ = self.surface.get_surf_num()
value = self._client_get_value()
if self._type in (bool, int):
# value can be (eg.) "0.0000E+000"
value = float(value)
return self._type(value)
def __repr__(self):
return repr(self.get_value())
def __str__(self):
return str(self.get_value())
def linked(self):
return PickupExpression(self.surface, self)
def link_value_to(self, pickup_expr):
self.pickup_conf.set_pickup(self, pickup_expr)
def fix(self):
n = self.surface.get_surf_num()
self.surface.conn.SetSolve(n, self.solve_code, self.fix_code)
def vary(self):
if self.can_optimise:
n = self.surface.get_surf_num()
self.surface.conn.SetSolve(n, self.solve_code, 1)
else:
raise NotImplementedError("Cannot optimise this parameter")
value = property(get_value, set_value)
class Property(property):
# Marks surface parameters in the class dictionary
def __init__(self, param, *args, **kwargs):
def get(surface):
return param(surface, *args, **kwargs)
def set(surface, value):
p = get(surface)
p.value = value
property.__init__(self, get, set)
self.param = param
class AuxParameter(Parameter):
reverse_pickup_terms = True
def __init__(self, surface, column, _type=float):
Parameter.__init__(self, surface, column, _type, column + 4,
PickupFormat(2, True, True, True), 0, True)
def _client_get_value(self):
s = self.surface
return s.conn.GetSurfaceParameter(s.get_surf_num(), self.column)
def _client_set_value(self, value):
s = self.surface
s.conn.SetSurfaceParameter(s.get_surf_num(), self.column, value)
def align_to_chief_ray(self, field_id=1, wavelength_id=0):
n = self.surface.get_surf_num()
self.surface.conn.SetSolve(n, self.solve_code, 3, field_id,
wavelength_id)
class ExtraParameter(Parameter):
def __init__(self, surface, column, _type=float):
Parameter.__init__(self, surface, column, _type, column + 1000,
PickupFormat(2, True, False, False), 0, True)
def _client_get_value(self):
s = self.surface
return s.conn.GetExtra(s.get_surf_num(), self.column)
def _client_set_value(self, value):
s = self.surface
s.conn.SetExtra(s.get_surf_num(), self.column, value)
class CurvatureParameter(Parameter):
def __init__(self, surface):
Parameter.__init__(self, surface, 2, float, 0,
PickupFormat(4, True, False), 0, True)
def set_fnumber(self, fnumber):
"""Constrain the f/# using a solve."""
n = self.surface.get_surf_num()
self.surface.conn.SetSolve(n, self.solve_code, 11, fnumber)
class ThicknessParameter(Parameter):
def __init__(self, surface):
Parameter.__init__(self, surface, 3, float, 1,
PickupFormat(5, True, True), 0, True)
def focus_on_next(self, pupil_zone=0.2, target_height=0.0):
"""Constrains thickness so that next surface lies on the focal plane.
Uses a solve for marginal ray height on the following surface.
pupil_zone :
0 : set to paraxial focus
other : normalised entrace pupil y-coordinate for marginal
ray solve
target_height : height of marginal ray for which thickness
will be constrained (default is zero for focus)
"""
n = self.surface.get_surf_num()
self.surface.conn.SetSolve(n, self.solve_code, 2,
target_height, pupil_zone)
class SemiDiameterParameter(Parameter):
def __init__(self, surface):
Parameter.__init__(self, surface, 5, float, 3,
PickupFormat(2, True, False), 1)
def maximise(self, fix=False):
"""Set to maximum of all configurations.
fix - If true, update then fix the value."""
n = self.surface.get_surf_num()
self.surface.conn.SetSolve(n, self.solve_code, 3)
if fix:
self.surface.conn.GetUpdate()
self.fix()
class CommentParameter(Parameter):
"""Embeds an optional tag inside the comment.
The tag is hidden when the "value" is accessed,
but can be accessed via the property "tag".
"""
tag_format = "%s #%s#"
tag_patt = re.compile(r"(.*?)\s*(?:#([^#]+)#)?$")
max_len = 32
def __init__(self, surface):
Parameter.__init__(self, surface, 1, str)
def get_comment_and_tag(self):
value = Parameter._client_get_value(self)
comment, tag = self.tag_patt.match(value).groups()
return comment, tag
def set_comment_and_tag(self, comment, tag):
if tag:
value = self.tag_format % (comment.rstrip(), tag)
else:
value = comment
n = len(value)
if n > self.max_len:
# This string can probably be assigned to the model, but
# it won't be saved correctly in a .ZMX file
raise ValueError(("Comment field cannot be saved", n, value))
Parameter._client_set_value(self, value)
def set_value(self, comment):
old_comment, tag = self.get_comment_and_tag()
self.set_comment_and_tag(comment, tag)
def get_value(self):
comment, tag = self.get_comment_and_tag()
return comment
def get_tag(self):
comment, tag = self.get_comment_and_tag()
return tag
def set_tag(self, tag):
comment, old_tag = self.get_comment_and_tag()
self.set_comment_and_tag(comment, tag)
value = property(get_value, set_value)
tag = property(get_tag, set_tag)
class BaseSurface(object):
__metaclass__ = surface_type
surface_type = None
type = Property(Parameter, 0, str)
def __init__(self, conn, id):
self.conn = conn
self.id = id
@classmethod
def create(cls, conn, id, comment=None, **kwargs):
# a surface type must be defined (don't call on the abstract class)
assert(cls.surface_type is not None)
# initialise surface
surf = cls(conn, id)
surf.type = surf.surface_type
# assign arguments to surface parameters
if comment is not None:
kwargs["comment"] = comment
for key, value in kwargs.items():
try:
p = getattr(surf, key)
except Exception:
raise KeyError(key)
else:
# Use method, rather than attribute, to ensure this is
# Parameter instance.
p.set_value(value)
return surf
def get_surf_num(self):
return self.conn.FindLabel(self.id)
def remove(self):
"""Remove the surface from the model"""
n = self.get_surf_num()
if n < 1:
raise IndexError("Cannot delete this surface")
self.conn.DeleteSurface(n)
RayNode = namedtuple("RayNode", ["status", "vigcode", "intersect",
"exit_cosines", "normal", "intensity"])
class UnknownSurface(BaseSurface):
comment = Property(CommentParameter)
thickness = Property(ThicknessParameter) # NSC doesn't have this
ignored = Property(Parameter, 20, bool)
def get_ray_intersect(self, h=(0.0, 0.0), p=(0.0, 0.0),
wavelength_num=0, _global=False):
"""Get the coordinates of a ray intersecting the surface.
h:
Normalised field coordinate. Default: (0.0, 0.0)
p:
Normalised pupil coordinate. Default: (0.0, 0.0) [i.e. chief ray]
wavelength_num:
Wavelength number
_global :
Return vectors in global coordinates?
"""
# GetTrace(self, wave, mode, surf, h, p)
# return status, int(vigcode), intersect, cosines, normal,
# float(intensity)
n = self.get_surf_num()
result = self.conn.GetTrace(wavelength_num, 0, n, h, p)
# status, vigcode, intersect, exit_cosines, normal, intensity = result
ray = RayNode(*result)
if ray.status:
raise Exception("GetTrace failed:", ray.status, ray)
if _global:
# convert vectors to global reference frame
rotation, offset = self.conn.GetGlobalMatrix(n)
ray = ray._replace(
intersect=np.array(
ray.intersect*rotation.T + offset).flatten(),
exit_cosines=np.array(ray.exit_cosines*rotation.T).flatten(),
normal=np.array(ray.normal*rotation.T).flatten()
)
return ray
def trace_from_surface(self, surf, origin, cosines):
pass
def fix_variables(self):
"""Fix all variables and parameters that were adjustable under
optimisation."""
n = self.get_surf_num()
# Scan the surface parameters checking for adjustable variables
fixed = []
for i in range(17):
# codes are 0-16
vals = self.conn.GetSolve(n, i)
# check if adjustable
if int(vals[0]) == 1:
# fix parameter
self.conn.SetSolve(n, i, 0)
fixed.append(i)
return fixed
def get_global_ref_status(self):
return (int(float(self.conn.GetSystemProperty(21))) ==
self.get_surf_num())
def make_global_reference(self):
return bool(self.conn.SetSystemProperty(21, self.get_surf_num()))
is_global_reference = property(get_global_ref_status)
class FiniteSurface(UnknownSurface):
semidia = Property(SemiDiameterParameter)
def set_rectangular_aperture(self, size, offset=(0, 0)):
n = self.get_surf_num()
self.conn.SetAperture(n, 4, size[0], size[1], offset[0], offset[1])
class Standard(FiniteSurface):
surface_type = "STANDARD"
curvature = Property(CurvatureParameter)
glass = Property(Parameter, 4, str, 2, PickupFormat(2, False, False), 0)
conic = Property(Parameter, 6, float, 4,
PickupFormat(2, True, False), 0, True)
coating = Property(Parameter, 7, str)
thermal_expansivity = Property(Parameter, 8, float, True)
class CoordinateBreak(UnknownSurface):
surface_type = "COORDBRK"
offset_x = Property(AuxParameter, 1)
offset_y = Property(AuxParameter, 2)
rotate_x = Property(AuxParameter, 3)
rotate_y = Property(AuxParameter, 4)
rotate_z = Property(AuxParameter, 5)
rotate_before_offset = Property(AuxParameter, 6, bool)
return_codes = {
(False, False): 1,
(True, False): 2,
(True, True): 3
}
def return_to(self, surf, offset_xy=True, offset_z=True):
"""Set a coordinate return.
Set surf to None to remove a coordinate return."""
if surf is None:
# do not return to a surface
self.conn.SetSurfaceData(self.get_surf_num(), 80, 0)
else:
code = self.return_codes[(offset_xy, offset_z)]
self.conn.SetSurfaceData(self.get_surf_num(), 81,
surf.get_surf_num())
self.conn.SetSurfaceData(self.get_surf_num(), 80, code)
def return_to_coordinate_frame(seq, first_return_surf,
last_return_surf, insert_point=None,
include_null_transforms=True,
factory=None):
assert (first_return_surf < last_return_surf)
nsteps = last_return_surf - first_return_surf + 1
surfaces_to_undo = range(last_return_surf, last_return_surf-nsteps, -1)
if not factory:
if insert_point is None:
insert_point = last_return_surf
insertion_point_sequence = count(insert_point+1)
factory = partial(seq.insert_new,
next(insertion_point_sequence), CoordinateBreak)
for sn1 in surfaces_to_undo:
to_undo = seq[sn1]
if isinstance(to_undo, CoordinateBreak):
# undo thickness first
if not to_undo.thickness.value == 0 or include_null_transforms:
inserted = factory()
inserted.thickness.value = -to_undo.thickness.linked()
inserted.comment.value = "UNDO thickness " + (
to_undo.comment.value or str(to_undo.get_surf_num()))
transformations = [
to_undo.offset_x.value,
to_undo.offset_y.value,
to_undo.rotate_x.value,
to_undo.rotate_y.value,
to_undo.rotate_z.value
]
if any(transformations) or include_null_transforms:
inserted = factory()
inserted.offset_x.value = -to_undo.offset_x.linked()
inserted.offset_y.value = -to_undo.offset_y.linked()
inserted.rotate_x.value = -to_undo.rotate_x.linked()
inserted.rotate_y.value = -to_undo.rotate_y.linked()
inserted.rotate_z.value = -to_undo.rotate_z.linked()
inserted.rotate_before_offset.value = (
not to_undo.rotate_before_offset.value)
inserted.comment.value = "UNDO " + (
to_undo.comment.value or str(to_undo.get_surf_num()))
elif not to_undo.thickness.value == 0 or include_null_transforms:
# simple surface, only requires undo of thickness
inserted = factory()
inserted.rotate_before_offset.value = False
inserted.thickness.value = -to_undo.thickness.linked()
inserted.comment.value = "UNDO " + (
to_undo.comment.value or str(to_undo.get_surf_num()))
return inserted.get_surf_num()
class PickupExpression:
# Zemax extensions are restricted in setting pickup solves from
# different (columns). Currently, we can only pick up from
# different columns on "parameter" solves (ie. AuxParameter
# instances). This restriction does not exist in the GUI and ZPL
# scripts.
def __init__(self, surface, parameter, offset=0, scale=1):
self.surface = surface
self.src_param = parameter
self.offset = offset
self.scale = scale
def _copy(self):
return PickupExpression(self.surface, self.src_param,
self.offset, self.scale)
def __add__(self, other):
x = self._copy()
x.offset += other
return x
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
x = self._copy()
x.offset -= other
return x
def __rsub__(self, other):
x = self._copy()
x.offset = other - x.offset
x.scale = -x.scale
return x
def __mul__(self, other):
x = self._copy()
x.offset *= other
x.scale *= other
return x
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
x = self._copy()
x.offset /= float(other)
x.scale /= float(other)
return x
def __truediv__(self, other):
return self.__div__(other)
def __neg__(self):
x = self._copy()
x.offset = -x.offset
x.scale = -x.scale
return x
def __pos__(self):
return self
def make_singlet(z):
"""Make a singlet lens"""
model = SurfaceSequence(z, empty=True)
model[0].thickness = 100 # Set object plane position
z.SetSystemAper(0, 1, 10.0) # Set entrance pupil diameter to 10.0
# append front surface
front = model.append_new(Standard)
front.glass = "BK7"
front.thickness = 1.0
# append back surface
back = model.append_new(Standard)
back.curvature.set_fnumber(10) # f/number solve on radius
back.thickness.focus_on_next() # marginal ray height solve
z.PushLens() # transfer model to frontend
| dariussullivan/libzmx | libzmx.py | Python | bsd-3-clause | 27,411 |
import numpy as np
file = '../../example/keres_cnn/keras/output/convolution2d_2_W_z.npy'
data = np.load(file)
print('weight data')
print(data.shape)
print(" f = data[0,i,:,:]")
for i in range(32):
f = data[0,i,:,:]
print(f) | natsutan/NPU | tools/nnn_numpy_dump/nnn_numpy_dump.py | Python | bsd-3-clause | 234 |
#!/usr/bin/env python
from setuptools import setup, Extension
with open("README.rst", "r") as f:
LONG_DESCRIPTION = f.read().strip()
with open("VERSION", "r") as f:
VERSION = f.read().strip()
SOURCES = ['jellyfishmodule.c', 'jaro.c', 'hamming.c', 'levenshtein.c',
'nysiis.c', 'damerau_levenshtein.c', 'mra.c', 'soundex.c',
'metaphone.c', 'porter.c']
COMPILE_ARGS = ["-O3", "-std=c11", "-pg", "-fprofile-arcs", "-ftest-coverage"]
setup(name="jellyfish",
version=VERSION,
platforms=["any"],
description=("a library for doing approximate and "
"phonetic matching of strings."),
url="http://github.com/achernet/jellyfish",
long_description=LONG_DESCRIPTION,
classifiers=["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Text Processing :: Linguistic"],
ext_modules=[Extension(name="jellyfish",
sources=SOURCES,
extra_compile_args=COMPILE_ARGS,
extra_link_args=["-lgcov"])])
| achernet/jellyfish | setup.py | Python | bsd-3-clause | 1,343 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
fixedwidth.py:
Read or write a table with fixed width columns.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
from . import core
from .core import InconsistentTableError, DefaultSplitter
from . import basic
class FixedWidthSplitter(core.BaseSplitter):
"""
Split line based on fixed start and end positions for each ``col`` in
``self.cols``.
This class requires that the Header class will have defined ``col.start``
and ``col.end`` for each column. The reference to the ``header.cols`` gets
put in the splitter object by the base Reader.read() function just in time
for splitting data lines by a ``data`` object.
Note that the ``start`` and ``end`` positions are defined in the pythonic
style so line[start:end] is the desired substring for a column. This splitter
class does not have a hook for ``process_lines`` since that is generally not
useful for fixed-width input.
"""
delimiter_pad = ''
bookend = False
delimiter = '|'
def __call__(self, lines):
for line in lines:
vals = [line[x.start:x.end] for x in self.cols]
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals, widths):
pad = self.delimiter_pad or ''
delimiter = self.delimiter or ''
padded_delim = pad + delimiter + pad
if self.bookend:
bookend_left = delimiter + pad
bookend_right = pad + delimiter
else:
bookend_left = ''
bookend_right = ''
vals = [' ' * (width - len(val)) + val for val, width in zip(vals, widths)]
return bookend_left + padded_delim.join(vals) + bookend_right
class FixedWidthHeaderSplitter(DefaultSplitter):
'''Splitter class that splits on ``|``.'''
delimiter = '|'
class FixedWidthHeader(basic.BasicHeader):
"""
Fixed width table header reader.
"""
splitter_class = FixedWidthHeaderSplitter
""" Splitter class for splitting data lines into columns """
position_line = None # secondary header line position
""" row index of line that specifies position (default = 1) """
set_of_position_line_characters = set(r'`~!#$%^&*-_+=\|":' + "'")
def get_line(self, lines, index):
for i, line in enumerate(self.process_lines(lines)):
if i == index:
break
else: # No header line matching
raise InconsistentTableError('No header line found in table')
return line
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
# See "else" clause below for explanation of start_line and position_line
start_line = core._get_line_index(self.start_line, self.process_lines(lines))
position_line = core._get_line_index(self.position_line, self.process_lines(lines))
# If start_line is none then there is no header line. Column positions are
# determined from first data line and column names are either supplied by user
# or auto-generated.
if start_line is None:
if position_line is not None:
raise ValueError("Cannot set position_line without also setting header_start")
data_lines = self.data.process_lines(lines)
if not data_lines:
raise InconsistentTableError(
'No data lines found so cannot autogenerate column names')
vals, starts, ends = self.get_fixedwidth_params(data_lines[0])
self.names = [self.auto_format.format(i)
for i in range(1, len(vals) + 1)]
else:
# This bit of code handles two cases:
# start_line = <index> and position_line = None
# Single header line where that line is used to determine both the
# column positions and names.
# start_line = <index> and position_line = <index2>
# Two header lines where the first line defines the column names and
# the second line defines the column positions
if position_line is not None:
# Define self.col_starts and self.col_ends so that the call to
# get_fixedwidth_params below will use those to find the header
# column names. Note that get_fixedwidth_params returns Python
# slice col_ends but expects inclusive col_ends on input (for
# more intuitive user interface).
line = self.get_line(lines, position_line)
if len(set(line) - set([self.splitter.delimiter, ' '])) != 1:
raise InconsistentTableError('Position line should only contain delimiters and one other character, e.g. "--- ------- ---".')
# The line above lies. It accepts white space as well.
# We don't want to encourage using three different
# characters, because that can cause ambiguities, but white
# spaces are so common everywhere that practicality beats
# purity here.
charset = self.set_of_position_line_characters.union(set([self.splitter.delimiter, ' ']))
if not set(line).issubset(charset):
raise InconsistentTableError(f'Characters in position line must be part of {charset}')
vals, self.col_starts, col_ends = self.get_fixedwidth_params(line)
self.col_ends = [x - 1 if x is not None else None for x in col_ends]
# Get the header column names and column positions
line = self.get_line(lines, start_line)
vals, starts, ends = self.get_fixedwidth_params(line)
self.names = vals
self._set_cols_from_names()
# Set column start and end positions.
for i, col in enumerate(self.cols):
col.start = starts[i]
col.end = ends[i]
def get_fixedwidth_params(self, line):
"""
Split ``line`` on the delimiter and determine column values and
column start and end positions. This might include null columns with
zero length (e.g. for ``header row = "| col1 || col2 | col3 |"`` or
``header2_row = "----- ------- -----"``). The null columns are
stripped out. Returns the values between delimiters and the
corresponding start and end positions.
Parameters
----------
line : str
Input line
Returns
-------
vals : list
List of values.
starts : list
List of starting indices.
ends : list
List of ending indices.
"""
# If column positions are already specified then just use those.
# If neither column starts or ends are given, figure out positions
# between delimiters. Otherwise, either the starts or the ends have
# been given, so figure out whichever wasn't given.
if self.col_starts is not None and self.col_ends is not None:
starts = list(self.col_starts) # could be any iterable, e.g. np.array
ends = [x + 1 if x is not None else None for x in self.col_ends] # user supplies inclusive endpoint
if len(starts) != len(ends):
raise ValueError('Fixed width col_starts and col_ends must have the same length')
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
elif self.col_starts is None and self.col_ends is None:
# There might be a cleaner way to do this but it works...
vals = line.split(self.splitter.delimiter)
starts = [0]
ends = []
for val in vals:
if val:
ends.append(starts[-1] + len(val))
starts.append(ends[-1] + 1)
else:
starts[-1] += 1
starts = starts[:-1]
vals = [x.strip() for x in vals if x]
if len(vals) != len(starts) or len(vals) != len(ends):
raise InconsistentTableError('Error parsing fixed width header')
else:
# exactly one of col_starts or col_ends is given...
if self.col_starts is not None:
starts = list(self.col_starts)
ends = starts[1:] + [None] # Assume each col ends where the next starts
else: # self.col_ends is not None
ends = [x + 1 for x in self.col_ends]
starts = [0] + ends[:-1] # Assume each col starts where the last ended
vals = [line[start:end].strip() for start, end in zip(starts, ends)]
return vals, starts, ends
def write(self, lines):
# Header line not written until data are formatted. Until then it is
# not known how wide each column will be for fixed width.
pass
class FixedWidthData(basic.BasicData):
"""
Base table data reader.
"""
splitter_class = FixedWidthSplitter
""" Splitter class for splitting data lines into columns """
def write(self, lines):
vals_list = []
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
vals_list.append(vals)
for i, col in enumerate(self.cols):
col.width = max([len(vals[i]) for vals in vals_list])
if self.header.start_line is not None:
col.width = max(col.width, len(col.info.name))
widths = [col.width for col in self.cols]
if self.header.start_line is not None:
lines.append(self.splitter.join([col.info.name for col in self.cols],
widths))
if self.header.position_line is not None:
char = self.header.position_char
if len(char) != 1:
raise ValueError('Position_char="{}" must be a single '
'character'.format(char))
vals = [char * col.width for col in self.cols]
lines.append(self.splitter.join(vals, widths))
for vals in vals_list:
lines.append(self.splitter.join(vals, widths))
return lines
class FixedWidth(basic.Basic):
"""Fixed width table with single header line defining column names and positions.
Examples::
# Bar delimiter in header and data
| Col1 | Col2 | Col3 |
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Bar delimiter in header only
Col1 | Col2 | Col3
1.2 hello there 3
2.4 many words 7
# No delimiter with column positions specified as input
Col1 Col2Col3
1.2hello there 3
2.4many words 7
See the :ref:`fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width'
_description = 'Fixed width'
header_class = FixedWidthHeader
data_class = FixedWidthData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True):
super().__init__()
self.data.splitter.delimiter_pad = delimiter_pad
self.data.splitter.bookend = bookend
self.header.col_starts = col_starts
self.header.col_ends = col_ends
class FixedWidthNoHeaderHeader(FixedWidthHeader):
'''Header reader for fixed with tables with no header line'''
start_line = None
class FixedWidthNoHeaderData(FixedWidthData):
'''Data reader for fixed width tables with no header line'''
start_line = 0
class FixedWidthNoHeader(FixedWidth):
"""Fixed width table which has no header line.
When reading, column names are either input (``names`` keyword) or
auto-generated. Column positions are determined either by input
(``col_starts`` and ``col_stops`` keywords) or by splitting the first data
line. In the latter case a ``delimiter`` is required to split the data
line.
Examples::
# Bar delimiter in header and data
| 1.2 | hello there | 3 |
| 2.4 | many words | 7 |
# Compact table having no delimiter and column positions specified as input
1.2hello there3
2.4many words 7
This class is just a convenience wrapper around the ``FixedWidth`` reader
but with ``header.start_line = None`` and ``data.start_line = 0``.
See the :ref:`fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width_no_header'
_description = 'Fixed width with no header'
header_class = FixedWidthNoHeaderHeader
data_class = FixedWidthNoHeaderData
def __init__(self, col_starts=None, col_ends=None, delimiter_pad=' ', bookend=True):
super().__init__(col_starts, col_ends, delimiter_pad=delimiter_pad,
bookend=bookend)
class FixedWidthTwoLineHeader(FixedWidthHeader):
'''Header reader for fixed width tables splitting on whitespace.
For fixed width tables with several header lines, there is typically
a white-space delimited format line, so splitting on white space is
needed.
'''
splitter_class = DefaultSplitter
class FixedWidthTwoLineDataSplitter(FixedWidthSplitter):
'''Splitter for fixed width tables splitting on ``' '``.'''
delimiter = ' '
class FixedWidthTwoLineData(FixedWidthData):
'''Data reader for fixed with tables with two header lines.'''
splitter_class = FixedWidthTwoLineDataSplitter
class FixedWidthTwoLine(FixedWidth):
"""Fixed width table which has two header lines.
The first header line defines the column names and the second implicitly
defines the column positions.
Examples::
# Typical case with column extent defined by ---- under column names.
col1 col2 <== header_start = 0
----- ------------ <== position_line = 1, position_char = "-"
1 bee flies <== data_start = 2
2 fish swims
# Pretty-printed table
+------+------------+
| Col1 | Col2 |
+------+------------+
| 1.2 | "hello" |
| 2.4 | there world|
+------+------------+
See the :ref:`fixed_width_gallery` for specific usage examples.
"""
_format_name = 'fixed_width_two_line'
_description = 'Fixed width with second header line'
data_class = FixedWidthTwoLineData
header_class = FixedWidthTwoLineHeader
def __init__(self, position_line=1, position_char='-', delimiter_pad=None, bookend=False):
super().__init__(delimiter_pad=delimiter_pad, bookend=bookend)
self.header.position_line = position_line
self.header.position_char = position_char
self.data.start_line = position_line + 1
| bsipocz/astropy | astropy/io/ascii/fixedwidth.py | Python | bsd-3-clause | 15,255 |
"""Defines the unit tests for the :mod:`colour.characterisation.aces_it` module."""
from __future__ import annotations
import numpy as np
import os
import unittest
from colour.characterisation import (
MSDS_ACES_RICD,
MSDS_CAMERA_SENSITIVITIES,
SDS_COLOURCHECKERS,
sd_to_aces_relative_exposure_values,
read_training_data_rawtoaces_v1,
generate_illuminants_rawtoaces_v1,
white_balance_multipliers,
best_illuminant,
normalise_illuminant,
training_data_sds_to_RGB,
training_data_sds_to_XYZ,
optimisation_factory_rawtoaces_v1,
optimisation_factory_Jzazbz,
matrix_idt,
camera_RGB_to_ACES2065_1,
)
from colour.characterisation.aces_it import RESOURCES_DIRECTORY_RAWTOACES
from colour.colorimetry import (
MSDS_CMFS,
MultiSpectralDistributions,
SDS_ILLUMINANTS,
SpectralDistribution,
SpectralShape,
reshape_msds,
sds_and_msds_to_msds,
sd_constant,
sd_ones,
)
from colour.io import read_sds_from_csv_file
from colour.utilities import domain_range_scale
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"MSDS_CANON_EOS_5DMARK_II",
"SD_AMPAS_ISO7589_STUDIO_TUNGSTEN",
"TestSpectralToAcesRelativeExposureValues",
"TestReadTrainingDataRawtoacesV1",
"TestGenerateIlluminantsRawtoacesV1",
"TestWhiteBalanceMultipliers",
"TestBestIlluminant",
"TestNormaliseIlluminant",
"TestTrainingDataSdsToRGB",
"TestTrainingDataSdsToXYZ",
"TestOptimizationFactoryRawtoacesV1",
"TestOptimizationFactoryJzazbz",
"TestMatrixIdt",
"TestCamera_RGB_to_ACES2065_1",
]
MSDS_CANON_EOS_5DMARK_II: MultiSpectralDistributions = sds_and_msds_to_msds(
list(
read_sds_from_csv_file(
os.path.join(
RESOURCES_DIRECTORY_RAWTOACES,
"CANON_EOS_5DMark_II_RGB_Sensitivities.csv",
)
).values()
)
)
SD_AMPAS_ISO7589_STUDIO_TUNGSTEN: SpectralDistribution = (
read_sds_from_csv_file(
os.path.join(
RESOURCES_DIRECTORY_RAWTOACES, "AMPAS_ISO_7589_Tungsten.csv"
)
)["iso7589"]
)
class TestSpectralToAcesRelativeExposureValues(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.\
sd_to_aces_relative_exposure_values` definition unit tests methods.
"""
def test_spectral_to_aces_relative_exposure_values(self):
"""
Test :func:`colour.characterisation.aces_it.
sd_to_aces_relative_exposure_values` definition.
"""
shape = MSDS_ACES_RICD.shape
grey_reflector = sd_constant(0.18, shape)
np.testing.assert_almost_equal(
sd_to_aces_relative_exposure_values(grey_reflector),
np.array([0.18, 0.18, 0.18]),
decimal=7,
)
perfect_reflector = sd_ones(shape)
np.testing.assert_almost_equal(
sd_to_aces_relative_exposure_values(perfect_reflector),
np.array([0.97783784, 0.97783784, 0.97783784]),
decimal=7,
)
dark_skin = SDS_COLOURCHECKERS["ColorChecker N Ohta"]["dark skin"]
np.testing.assert_almost_equal(
sd_to_aces_relative_exposure_values(dark_skin),
np.array([0.11718149, 0.08663609, 0.05897268]),
decimal=7,
)
dark_skin = SDS_COLOURCHECKERS["ColorChecker N Ohta"]["dark skin"]
np.testing.assert_almost_equal(
sd_to_aces_relative_exposure_values(
dark_skin, SDS_ILLUMINANTS["A"]
),
np.array([0.13583991, 0.09431845, 0.05928214]),
decimal=7,
)
dark_skin = SDS_COLOURCHECKERS["ColorChecker N Ohta"]["dark skin"]
np.testing.assert_almost_equal(
sd_to_aces_relative_exposure_values(
dark_skin, apply_chromatic_adaptation=True
),
np.array([0.11807796, 0.08690312, 0.05891252]),
decimal=7,
)
dark_skin = SDS_COLOURCHECKERS["ColorChecker N Ohta"]["dark skin"]
np.testing.assert_almost_equal(
sd_to_aces_relative_exposure_values(
dark_skin,
apply_chromatic_adaptation=True,
chromatic_adaptation_transform="Bradford",
),
np.array([0.11805993, 0.08689013, 0.05900396]),
decimal=7,
)
def test_domain_range_scale_spectral_to_aces_relative_exposure_values(
self,
):
"""
Test :func:`colour.characterisation.aces_it.
sd_to_aces_relative_exposure_values` definition domain and range scale
support.
"""
shape = MSDS_ACES_RICD.shape
grey_reflector = sd_constant(0.18, shape)
RGB = sd_to_aces_relative_exposure_values(grey_reflector)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
sd_to_aces_relative_exposure_values(grey_reflector),
RGB * factor,
decimal=7,
)
class TestReadTrainingDataRawtoacesV1(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.\
read_training_data_rawtoaces_v1` definition unit tests methods.
"""
def test_read_training_data_rawtoaces_v1(self):
"""
Test :func:`colour.characterisation.aces_it.
read_training_data_rawtoaces_v1` definition.
"""
self.assertEqual(len(read_training_data_rawtoaces_v1().labels), 190)
class TestGenerateIlluminantsRawtoacesV1(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.\
generate_illuminants_rawtoaces_v1` definition unit tests methods.
"""
def test_generate_illuminants_rawtoaces_v1(self):
"""
Test :func:`colour.characterisation.aces_it.
generate_illuminants_rawtoaces_v1` definition.
"""
self.assertListEqual(
list(sorted(generate_illuminants_rawtoaces_v1().keys())),
[
"1000K Blackbody",
"1500K Blackbody",
"2000K Blackbody",
"2500K Blackbody",
"3000K Blackbody",
"3500K Blackbody",
"D100",
"D105",
"D110",
"D115",
"D120",
"D125",
"D130",
"D135",
"D140",
"D145",
"D150",
"D155",
"D160",
"D165",
"D170",
"D175",
"D180",
"D185",
"D190",
"D195",
"D200",
"D205",
"D210",
"D215",
"D220",
"D225",
"D230",
"D235",
"D240",
"D245",
"D250",
"D40",
"D45",
"D50",
"D55",
"D60",
"D65",
"D70",
"D75",
"D80",
"D85",
"D90",
"D95",
"iso7589",
],
)
class TestWhiteBalanceMultipliers(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.white_balance_multipliers`
definition unit tests methods.
"""
def test_white_balance_multipliers(self):
"""
Test :func:`colour.characterisation.aces_it.white_balance_multipliers`
definition.
"""
np.testing.assert_almost_equal(
white_balance_multipliers(
MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS["D55"]
),
np.array([2.34141541, 1.00000000, 1.51633759]),
decimal=7,
)
np.testing.assert_almost_equal(
white_balance_multipliers(
MSDS_CANON_EOS_5DMARK_II,
SDS_ILLUMINANTS["ISO 7589 Studio Tungsten"],
),
np.array([1.57095278, 1.00000000, 2.43560477]),
decimal=7,
)
class TestBestIlluminant(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.best_illuminant` definition
unit tests methods.
"""
def test_best_illuminant(self):
"""
Test :func:`colour.characterisation.aces_it.best_illuminant`
definition.
"""
self.assertEqual(
best_illuminant(
white_balance_multipliers(
MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS["FL2"]
),
MSDS_CANON_EOS_5DMARK_II,
generate_illuminants_rawtoaces_v1(),
).name,
"D40",
)
self.assertEqual(
best_illuminant(
white_balance_multipliers(
MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS["A"]
),
MSDS_CANON_EOS_5DMARK_II,
generate_illuminants_rawtoaces_v1(),
).name,
"3000K Blackbody",
)
class TestNormaliseIlluminant(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.normalise_illuminant`
definition unit tests methods.
"""
def test_normalise_illuminant(self):
"""
Test :func:`colour.characterisation.aces_it.normalise_illuminant`
definition.
"""
self.assertAlmostEqual(
np.sum(
normalise_illuminant(
SDS_ILLUMINANTS["D55"], MSDS_CANON_EOS_5DMARK_II
).values
),
3.439037388220850,
places=7,
)
class TestTrainingDataSdsToRGB(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.training_data_sds_to_RGB`
definition unit tests methods.
"""
def test_training_data_sds_to_RGB(self):
"""
Test :func:`colour.characterisation.aces_it.training_data_sds_to_RGB`
definition.
"""
RGB, RGB_w = training_data_sds_to_RGB(
read_training_data_rawtoaces_v1(),
MSDS_CANON_EOS_5DMARK_II,
SDS_ILLUMINANTS["D55"],
)
np.testing.assert_almost_equal(
RGB,
np.array(
[
[42.00296381, 39.83290349, 43.28842394],
[181.25453293, 180.47486885, 180.30657630],
[1580.35041765, 1578.67251435, 1571.05703787],
[403.67553672, 403.67553672, 403.67553672],
[1193.51958332, 1194.63985124, 1183.92806238],
[862.07824054, 863.30644583, 858.29863779],
[605.42274304, 602.94953701, 596.61414309],
[395.70687930, 394.67167942, 392.97719777],
[227.27502116, 228.33554705, 227.96959477],
[130.97735082, 132.12395139, 131.97239271],
[61.79308820, 61.85572037, 62.40560537],
[592.29430914, 383.93309398, 282.70032306],
[504.67305022, 294.69245978, 193.90976423],
[640.93167741, 494.91914821, 421.68337308],
[356.53952646, 239.77610719, 181.18147755],
[179.58569818, 130.00540238, 109.23999883],
[1061.07297514, 818.29727750, 730.13362169],
[765.75936417, 522.06805938, 456.59355601],
[104.70554060, 80.35106922, 65.75667232],
[694.19925422, 161.06849749, 214.20170991],
[1054.83161580, 709.41713619, 668.10329523],
[697.35479081, 276.20032105, 275.86226833],
[183.26315174, 65.93801513, 74.60775905],
[359.74416854, 59.73576149, 89.81296522],
[1043.53760601, 405.48081521, 376.37298474],
[344.35374209, 111.26727966, 109.10587712],
[215.18064862, 87.41152853, 85.18152727],
[555.37005673, 134.76016985, 111.54658160],
[931.71846961, 210.02605133, 150.65312210],
[211.01186324, 50.73939233, 54.55750662],
[654.45781665, 132.73694874, 107.20009737],
[1193.89772859, 625.60766645, 521.51066476],
[802.65730883, 228.94887565, 178.30864097],
[149.82853589, 44.31839648, 55.29195048],
[80.88083928, 33.78936351, 41.73438243],
[579.50157840, 240.80755019, 188.50864121],
[537.09280420, 80.41714202, 48.28907694],
[777.62363031, 205.11587061, 122.43126732],
[292.65436510, 59.53457252, 44.27126512],
[511.68625012, 134.76897130, 85.73242441],
[903.64947615, 462.49015529, 350.74183199],
[852.95457070, 291.64071698, 151.51871958],
[1427.59841722, 907.54863477, 724.29520203],
[527.68979414, 169.76114596, 89.48561902],
[496.62188809, 317.11827387, 243.77642038],
[554.39017413, 284.77453644, 181.92376325],
[310.50669032, 96.25812545, 41.22765558],
[1246.49891599, 522.05121993, 238.28646123],
[240.19646249, 118.57745244, 82.68426681],
[1005.98836135, 355.93514762, 118.60457241],
[792.31376787, 369.56509398, 143.27388201],
[459.04590557, 315.46594358, 215.53901098],
[806.50918893, 352.20277469, 97.69239677],
[1574.11778922, 1078.61331515, 697.02647383],
[1015.45155837, 598.98507153, 301.94169280],
[479.68722930, 242.23619637, 72.60351059],
[1131.70538515, 628.32510627, 213.67910327],
[185.86573238, 162.55033903, 137.59385867],
[1131.77074807, 603.89218698, 153.83160203],
[638.14148862, 527.18090248, 410.12394346],
[884.58039320, 655.09236879, 329.23967927],
[1172.73094356, 840.43080883, 380.90114088],
[1490.24223350, 1111.18491878, 482.33357611],
[1054.70234779, 513.29967197, 91.55980977],
[1532.99674295, 1035.15868150, 253.21942988],
[662.35328287, 528.52354760, 326.56458987],
[1769.55456145, 1557.58571488, 1155.79098414],
[1196.62083017, 1079.28012658, 888.47017893],
[1578.73591185, 1089.40083172, 314.45691871],
[252.98204345, 206.56788008, 153.62801631],
[973.59975800, 714.51185344, 251.12884859],
[1661.01720988, 1340.46809762, 619.61710815],
[656.66179353, 566.61547800, 322.22788098],
[676.69663303, 571.86743785, 249.62031449],
[1229.28626315, 1020.14702709, 353.11090960],
[390.76190378, 324.36051944, 119.31108035],
[1524.10495708, 1366.72397704, 633.03830849],
[1264.54750712, 1149.12002542, 335.25348483],
[265.96753330, 260.89397210, 130.78590008],
[90.15969432, 90.72350914, 55.12008388],
[298.22463247, 300.48700028, 101.95760063],
[813.34391710, 820.12623357, 313.17818415],
[186.96402165, 190.38042094, 104.27515726],
[230.34939258, 235.91900919, 120.77815429],
[469.57926615, 472.51064145, 256.40912347],
[117.81249486, 129.17019984, 69.78861213],
[133.39581196, 151.50390168, 77.66255652],
[164.19259747, 172.13159331, 80.92295294],
[146.12230124, 149.32536508, 87.48300520],
[201.93215173, 208.89885695, 111.84447436],
[248.41427850, 282.34047722, 122.55482010],
[304.35509339, 377.38986207, 118.66130122],
[381.85533606, 530.40398972, 150.83506876],
[967.19810669, 1161.33086750, 663.54746741],
[613.98437237, 865.41677370, 362.92357557],
[410.21304405, 611.89683658, 284.09389273],
[279.50447144, 416.01646348, 213.18049093],
[334.48807624, 487.46571814, 235.49134434],
[664.04349337, 867.87454943, 549.71146455],
[311.66934673, 431.38058636, 256.13307806],
[110.04404638, 203.88196409, 104.63331585],
[153.35857585, 312.67834716, 149.90942505],
[273.46344514, 462.41992197, 292.50571823],
[184.77058437, 267.46361125, 193.71894670],
[75.79805899, 163.84071881, 95.67465270],
[461.73803707, 668.68797906, 484.77687282],
[523.01992144, 790.69326153, 598.73122243],
[105.89414085, 124.92341127, 113.03925656],
[279.33299507, 446.45128537, 344.73426977],
[340.57250119, 381.28610429, 353.83182947],
[141.00956904, 329.50139051, 228.90179483],
[117.29728945, 156.88993944, 139.49878229],
[565.12438106, 696.52297174, 615.88218349],
[1046.73447319, 1446.22424473, 1277.47338963],
[133.87404291, 253.25944193, 224.75872956],
[586.52626500, 1015.43013448, 885.49907251],
[927.08412116, 1197.93784752, 1140.76612264],
[81.29463446, 202.46201173, 186.35209411],
[350.90699453, 788.82959642, 669.10307704],
[278.88231719, 581.42068355, 526.82554470],
[642.66176703, 990.64038619, 907.64284280],
[689.10344984, 942.49383066, 900.33073076],
[190.62073977, 540.21088595, 523.62573562],
[322.35685764, 676.02683754, 692.94583013],
[896.29532467, 1289.90474463, 1311.34615018],
[204.06785020, 321.83261403, 337.01923114],
[237.10512554, 549.97044011, 646.06486244],
[907.26703197, 1252.44260107, 1309.50173432],
[504.74103065, 728.27088424, 782.27808125],
[470.91049729, 912.49116456, 1059.41083523],
[231.75497961, 539.14727494, 732.41647792],
[624.91135978, 943.51709467, 1086.48492282],
[104.84186738, 398.05825469, 663.96030581],
[100.47632953, 226.41423139, 323.51675153],
[998.19560093, 1168.81108673, 1283.07267859],
[350.74519746, 457.74100518, 552.52270183],
[223.19531677, 560.14850077, 855.05346039],
[66.92044931, 128.18947830, 205.30719728],
[280.63458798, 518.51069955, 784.38948897],
[1071.24122457, 1267.16339790, 1467.81704311],
[271.47257445, 553.57609491, 914.33723598],
[211.86582477, 295.18643027, 418.51776463],
[153.86457460, 342.06625645, 649.82579665],
[179.59188635, 265.25370235, 413.68135787],
[529.77485058, 737.79030218, 1046.29865466],
[208.71936449, 421.30392624, 796.71281168],
[685.50294808, 879.76243717, 1195.00892794],
[85.02189613, 113.33360860, 171.03209018],
[72.06980264, 139.42600347, 315.97906141],
[349.57868286, 426.82308690, 556.49647978],
[726.50329821, 882.48411184, 1163.20130103],
[102.62158777, 177.73895468, 467.26740089],
[208.63097281, 322.84137064, 639.30554347],
[377.19498209, 456.13180268, 706.44272480],
[149.91131672, 218.16462984, 455.15510078],
[556.80606655, 673.96774240, 1020.98785748],
[172.19546054, 181.38617476, 478.69666973],
[494.98572332, 534.88874559, 773.75255591],
[1166.31475206, 1207.81829513, 1411.04368728],
[324.81131421, 298.91188334, 521.96994638],
[731.58631467, 725.95113189, 1192.71141630],
[376.70584074, 352.06184423, 572.37854429],
[421.32413767, 465.07677606, 910.85999527],
[155.65680826, 145.82096629, 282.56390371],
[982.43736509, 991.65710582, 1312.39630323],
[41.37244888, 33.41882583, 59.48460827],
[282.61535563, 188.37255834, 441.62967707],
[182.28936533, 136.29152918, 248.30801310],
[398.28853814, 281.28601665, 641.78038278],
[494.34030557, 393.91395210, 664.96627121],
[579.86630787, 449.57878986, 836.64303806],
[281.30892711, 142.60663373, 309.93723963],
[439.97606151, 345.13329865, 425.68615785],
[887.17712876, 583.53811414, 886.88440975],
[841.97939219, 617.28846790, 810.67002861],
[1280.60242984, 1139.62066080, 1255.46929276],
[336.77846782, 246.82877629, 324.48823631],
[1070.92080733, 527.41599474, 913.93600561],
[676.57753460, 329.48235976, 509.56020035],
[1353.12934453, 1048.28092139, 1227.42851889],
[248.56120754, 78.30056642, 137.39216268],
[675.76876164, 381.60749713, 545.08703142],
[1008.57884369, 704.64042514, 836.94311729],
[1207.19931876, 527.74482440, 737.30284625],
[1157.60714894, 736.24734736, 846.01278626],
[861.62204402, 714.70913295, 747.29294390],
[255.83324360, 94.08214754, 147.60127564],
[1522.93390177, 1017.14491217, 1073.23488749],
[460.59077351, 93.73852735, 210.75844436],
[909.87331348, 498.83253656, 750.09672276],
]
),
decimal=7,
)
np.testing.assert_almost_equal(
RGB_w, np.array([2.34141541, 1.00000000, 1.51633759]), decimal=7
)
training_data = sds_and_msds_to_msds(
SDS_COLOURCHECKERS["BabelColor Average"].values()
)
RGB, RGB_w = training_data_sds_to_RGB(
training_data, MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS["D55"]
)
np.testing.assert_almost_equal(
RGB,
np.array(
[
[263.80361607, 170.29412869, 132.71463416],
[884.07936328, 628.44083126, 520.43504675],
[324.17856150, 443.95092266, 606.43750890],
[243.82059773, 253.22111395, 144.98600653],
[481.54199203, 527.96925768, 764.50624747],
[628.07015143, 979.73104655, 896.85237907],
[927.63600544, 391.11468312, 150.73047156],
[203.13259862, 317.65395368, 639.54581080],
[686.28955864, 260.78688114, 254.89963998],
[174.05857536, 132.16684952, 230.54054095],
[806.50094411, 817.35481419, 312.91902292],
[1111.20280010, 608.82554576, 194.31984092],
[94.99792206, 185.04148229, 456.53592437],
[340.60457483, 498.62910631, 254.08356415],
[531.53679194, 136.11844274, 109.19876416],
[1387.37113491, 952.84382040, 286.23152122],
[681.97933172, 326.66634506, 526.23078660],
[244.90739217, 554.88866566, 741.21522946],
[1841.80020583, 1834.49277300, 1784.07500285],
[1179.76201558, 1189.84138939, 1182.25520674],
[720.27089899, 726.91855632, 724.84766858],
[382.16849234, 387.41521539, 386.87510339],
[178.43859184, 181.76108810, 182.71062184],
[64.77754952, 64.80020759, 65.45515287],
]
),
decimal=7,
)
np.testing.assert_almost_equal(
RGB_w, np.array([2.34141541, 1.00000000, 1.51633759]), decimal=7
)
class TestTrainingDataSdsToXYZ(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.training_data_sds_to_XYZ`
definition unit tests methods.
"""
def test_training_data_sds_to_XYZ(self):
"""
Test :func:`colour.characterisation.aces_it.training_data_sds_to_XYZ`
definition.
"""
np.testing.assert_almost_equal(
training_data_sds_to_XYZ(
read_training_data_rawtoaces_v1(),
MSDS_CMFS["CIE 1931 2 Degree Standard Observer"],
SDS_ILLUMINANTS["D55"],
),
np.array(
[
[0.01743541, 0.01795040, 0.01961110],
[0.08556071, 0.08957352, 0.09017032],
[0.74558770, 0.78175495, 0.78343383],
[0.19005289, 0.19950000, 0.20126062],
[0.56263167, 0.59145443, 0.58944868],
[0.40708229, 0.42774653, 0.42813199],
[0.28533739, 0.29945717, 0.29732644],
[0.18670375, 0.19575576, 0.19612855],
[0.10734487, 0.11290543, 0.11381239],
[0.06188310, 0.06524694, 0.06594260],
[0.02905436, 0.03045954, 0.03111642],
[0.25031624, 0.22471846, 0.12599982],
[0.20848487, 0.18072652, 0.08216289],
[0.28173081, 0.26937432, 0.19943363],
[0.15129458, 0.13765872, 0.08086671],
[0.07854243, 0.07274480, 0.05123870],
[0.46574583, 0.43948749, 0.34501135],
[0.33111608, 0.29368033, 0.21379720],
[0.04596029, 0.04443836, 0.03115443],
[0.28422646, 0.15495892, 0.11586479],
[0.47490187, 0.41497780, 0.33505853],
[0.29452546, 0.20003225, 0.13705453],
[0.06905269, 0.04421818, 0.03449201],
[0.13040440, 0.06239791, 0.04175606],
[0.43838730, 0.29962261, 0.18439668],
[0.13390118, 0.08356608, 0.04956679],
[0.08356733, 0.05794634, 0.03910007],
[0.21637988, 0.12469189, 0.04842559],
[0.37899204, 0.22130821, 0.07365608],
[0.07733610, 0.04256869, 0.02300063],
[0.25696432, 0.14119282, 0.04740500],
[0.51960474, 0.41409496, 0.25643556],
[0.32241564, 0.19954021, 0.08051276],
[0.05811798, 0.03389661, 0.02553745],
[0.03192572, 0.02139972, 0.01894908],
[0.24605476, 0.17854962, 0.09147038],
[0.20624731, 0.10555152, 0.01675508],
[0.31255107, 0.19334840, 0.05143990],
[0.11006219, 0.06057155, 0.01700794],
[0.20509764, 0.12555310, 0.03594860],
[0.38058683, 0.30396093, 0.16256996],
[0.34354473, 0.23964048, 0.06111316],
[0.62251344, 0.54770879, 0.34634977],
[0.21294652, 0.14470338, 0.03492000],
[0.22064317, 0.19656587, 0.11907643],
[0.23955073, 0.19768225, 0.08595970],
[0.12377361, 0.08353105, 0.01434151],
[0.52378659, 0.40757502, 0.10242337],
[0.09732322, 0.07735501, 0.03254246],
[0.41081884, 0.30127969, 0.04240016],
[0.32946008, 0.27129095, 0.05232655],
[0.19870991, 0.18701769, 0.09764509],
[0.31867743, 0.25717029, 0.02158054],
[0.67745549, 0.64283785, 0.31268426],
[0.43182429, 0.39425828, 0.13198410],
[0.19075096, 0.16573196, 0.01845293],
[0.47578930, 0.43714747, 0.07974541],
[0.08420865, 0.08615579, 0.06605406],
[0.47306132, 0.43488423, 0.05262924],
[0.28242654, 0.28638349, 0.19186089],
[0.37367384, 0.38524079, 0.13498637],
[0.49536547, 0.51027091, 0.15645211],
[0.63680942, 0.67272132, 0.19642820],
[0.43790684, 0.39093965, 0.02518505],
[0.63216527, 0.66425603, 0.07124985],
[0.28682848, 0.29807036, 0.14308787],
[0.78666095, 0.83181391, 0.53110094],
[0.54475049, 0.57280425, 0.43240766],
[0.65555915, 0.68992930, 0.10030198],
[0.10560623, 0.10992647, 0.06863885],
[0.40588908, 0.43345904, 0.08589490],
[0.69824760, 0.76446843, 0.23843395],
[0.27951451, 0.30869595, 0.13310650],
[0.28351930, 0.32278417, 0.09130925],
[0.51144946, 0.58985649, 0.11409286],
[0.16769668, 0.19357639, 0.04824163],
[0.64027510, 0.74864980, 0.24145602],
[0.51533750, 0.64418491, 0.09390029],
[0.10903312, 0.13420204, 0.04403235],
[0.03916991, 0.04755109, 0.02410291],
[0.12726285, 0.16825903, 0.03705646],
[0.34079923, 0.44119883, 0.10621489],
[0.08299513, 0.10226271, 0.04607974],
[0.10117617, 0.12690940, 0.05211600],
[0.20673305, 0.25456362, 0.11244267],
[0.05040081, 0.06702198, 0.02944861],
[0.05809758, 0.07896803, 0.03312583],
[0.07202711, 0.09383365, 0.03453490],
[0.06392748, 0.07896740, 0.03860393],
[0.08851258, 0.11174080, 0.04873213],
[0.09821259, 0.13743849, 0.03901353],
[0.12253000, 0.18989034, 0.03327101],
[0.15082798, 0.25948217, 0.03805919],
[0.41476613, 0.56455709, 0.26988900],
[0.25043710, 0.40869656, 0.12211755],
[0.17536685, 0.28765326, 0.10166502],
[0.12038544, 0.19242328, 0.07754636],
[0.14661345, 0.23524743, 0.09334793],
[0.29469553, 0.41056592, 0.23093160],
[0.13015693, 0.19492122, 0.09333495],
[0.04081181, 0.08280292, 0.03122401],
[0.06569736, 0.13553353, 0.05266408],
[0.12177383, 0.20160583, 0.11621774],
[0.08354206, 0.11970984, 0.08207175],
[0.02834645, 0.06259404, 0.03135058],
[0.20884161, 0.29927365, 0.20553553],
[0.23180119, 0.33870071, 0.24267407],
[0.04413521, 0.05398934, 0.04862030],
[0.13068910, 0.19470885, 0.15073584],
[0.16108644, 0.18484544, 0.17474649],
[0.06206737, 0.12873462, 0.09368693],
[0.05126858, 0.06722639, 0.05961970],
[0.25534374, 0.31335090, 0.27780291],
[0.48369629, 0.63319069, 0.57347864],
[0.06066266, 0.09712274, 0.09253437],
[0.27940216, 0.41909220, 0.39351159],
[0.44664100, 0.54665344, 0.55342931],
[0.03590889, 0.06959304, 0.07535965],
[0.16621092, 0.30339106, 0.29722885],
[0.12909138, 0.22008859, 0.22690521],
[0.31015553, 0.42498221, 0.42044232],
[0.33970423, 0.42779997, 0.43883150],
[0.10000582, 0.19440825, 0.23393750],
[0.16694758, 0.26056864, 0.32541934],
[0.43598087, 0.55484571, 0.63089871],
[0.10305166, 0.13633951, 0.16650820],
[0.12725465, 0.19404057, 0.30068226],
[0.44450660, 0.54666776, 0.64220554],
[0.25312549, 0.31346831, 0.38485942],
[0.24557618, 0.34698805, 0.51328941],
[0.13585660, 0.18761687, 0.36302217],
[0.32288492, 0.39652004, 0.54579104],
[0.08400465, 0.11889755, 0.34519851],
[0.06038029, 0.07936884, 0.16393180],
[0.47840043, 0.53070661, 0.64043584],
[0.16727376, 0.19048161, 0.27055547],
[0.14740952, 0.19227205, 0.44545300],
[0.03953792, 0.04540593, 0.10766386],
[0.16200092, 0.18995251, 0.41003367],
[0.53147895, 0.57554326, 0.74787983],
[0.17107460, 0.19285623, 0.48157477],
[0.11394187, 0.12139868, 0.21928748],
[0.10838799, 0.11193347, 0.34884682],
[0.10390937, 0.10854555, 0.22459293],
[0.28493924, 0.30349174, 0.54832107],
[0.13572090, 0.13988801, 0.43412229],
[0.36141619, 0.37929776, 0.62919317],
[0.04527113, 0.04612919, 0.09028801],
[0.05164102, 0.04505136, 0.17732932],
[0.18148861, 0.19085005, 0.29528314],
[0.37792382, 0.39238764, 0.61357669],
[0.08148672, 0.06054619, 0.27321036],
[0.13431208, 0.12118937, 0.35762939],
[0.19932157, 0.19328547, 0.37878896],
[0.09456787, 0.08094285, 0.25785832],
[0.29868476, 0.28967149, 0.54786550],
[0.09582629, 0.06156148, 0.27163852],
[0.25053785, 0.23630807, 0.40751054],
[0.56821117, 0.57452018, 0.72419232],
[0.16116009, 0.13379410, 0.28760107],
[0.37816205, 0.32564214, 0.64945876],
[0.19440630, 0.16599850, 0.31684298],
[0.24229817, 0.19698372, 0.51538353],
[0.08104904, 0.06295569, 0.15738669],
[0.48808364, 0.46372832, 0.69336648],
[0.01983575, 0.01538929, 0.03252398],
[0.13468770, 0.08473328, 0.25136965],
[0.08762890, 0.06560340, 0.13804375],
[0.20192043, 0.12939477, 0.36343630],
[0.24231283, 0.19018859, 0.36604686],
[0.28784724, 0.21105155, 0.46114703],
[0.12549222, 0.07471177, 0.17126268],
[0.20910983, 0.18235419, 0.22475458],
[0.43032307, 0.32727171, 0.49574549],
[0.39105442, 0.32475758, 0.42885925],
[0.60567491, 0.57928897, 0.64030251],
[0.15645417, 0.12986348, 0.17171885],
[0.50025055, 0.32646202, 0.51899239],
[0.29822363, 0.19839451, 0.27397060],
[0.63136923, 0.55375993, 0.63816664],
[0.10261977, 0.05754107, 0.07473368],
[0.30325538, 0.21976283, 0.29171854],
[0.46794841, 0.39368920, 0.44286306],
[0.54326558, 0.36319029, 0.41127862],
[0.52355493, 0.42261205, 0.43529051],
[0.39852212, 0.37568122, 0.37825751],
[0.10892106, 0.06698290, 0.07939788],
[0.68780223, 0.58022018, 0.54422258],
[0.18984448, 0.09051898, 0.12104133],
[0.41991006, 0.29457037, 0.40780639],
]
),
decimal=7,
)
training_data = sds_and_msds_to_msds(
SDS_COLOURCHECKERS["BabelColor Average"].values()
)
np.testing.assert_almost_equal(
training_data_sds_to_XYZ(
training_data,
MSDS_CMFS["CIE 1931 2 Degree Standard Observer"],
SDS_ILLUMINANTS["D55"],
),
np.array(
[
[0.11386016, 0.10184316, 0.06318332],
[0.38043230, 0.34842093, 0.23582246],
[0.17359019, 0.18707491, 0.31848244],
[0.10647823, 0.13300376, 0.06486355],
[0.24658643, 0.23417740, 0.40546447],
[0.30550003, 0.42171110, 0.41928361],
[0.38409200, 0.30325611, 0.05955461],
[0.13149767, 0.11720378, 0.35673016],
[0.28717811, 0.19215580, 0.12514286],
[0.08401031, 0.06423349, 0.12782115],
[0.33990604, 0.44124555, 0.10834694],
[0.46443889, 0.42686462, 0.07340585],
[0.07650085, 0.06051409, 0.26167301],
[0.14598990, 0.23185071, 0.09380297],
[0.20642710, 0.12162691, 0.04673088],
[0.57371755, 0.59896814, 0.08930486],
[0.30208819, 0.19714705, 0.28492050],
[0.14184323, 0.19554336, 0.36653731],
[0.86547610, 0.91241348, 0.88583082],
[0.55802432, 0.58852191, 0.59042758],
[0.34102067, 0.35951875, 0.36251375],
[0.18104441, 0.19123509, 0.19353380],
[0.08461047, 0.08944605, 0.09150081],
[0.03058273, 0.03200953, 0.03277947],
]
),
decimal=7,
)
np.testing.assert_almost_equal(
training_data_sds_to_XYZ(
training_data,
MSDS_CMFS["CIE 1931 2 Degree Standard Observer"],
SDS_ILLUMINANTS["D55"],
chromatic_adaptation_transform="Bradford",
),
np.array(
[
[0.11386557, 0.10185906, 0.06306965],
[0.38044920, 0.34846911, 0.23548776],
[0.17349711, 0.18690409, 0.31901794],
[0.10656174, 0.13314825, 0.06450454],
[0.24642109, 0.23388536, 0.40625776],
[0.30564803, 0.42194543, 0.41894818],
[0.38414010, 0.30337780, 0.05881558],
[0.13128440, 0.11682332, 0.35780551],
[0.28707604, 0.19200780, 0.12518610],
[0.08392779, 0.06409174, 0.12816180],
[0.34028525, 0.44190577, 0.10665985],
[0.46462806, 0.42722924, 0.07207641],
[0.07631823, 0.06018898, 0.26258457],
[0.14620929, 0.23222248, 0.09296807],
[0.20635082, 0.12152088, 0.04669974],
[0.57410962, 0.59968182, 0.08713069],
[0.30185180, 0.19675858, 0.28565273],
[0.14177898, 0.19541060, 0.36711242],
[0.86550834, 0.91247072, 0.88567193],
[0.55803077, 0.58853268, 0.59040518],
[0.34102300, 0.35952246, 0.36250826],
[0.18104563, 0.19123690, 0.19353274],
[0.08461039, 0.08944568, 0.09150425],
[0.03058222, 0.03200864, 0.03278183],
]
),
decimal=7,
)
class TestOptimizationFactoryRawtoacesV1(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.\
optimisation_factory_rawtoaces_v1` definition unit tests methods.
"""
def test_optimisation_factory_rawtoaces_v1(self):
"""
Test :func:`colour.characterisation.aces_it.\
optimisation_factory_rawtoaces_v1` definition.
"""
self.assertEqual(len(optimisation_factory_rawtoaces_v1()), 2)
class TestOptimizationFactoryJzazbz(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.\
optimisation_factory_Jzazbz` definition unit tests methods.
"""
def test_optimisation_factory_Jzazbz(self):
"""
Test :func:`colour.characterisation.aces_it.\
optimisation_factory_Jzazbz` definition.
"""
self.assertEqual(len(optimisation_factory_Jzazbz()), 2)
class TestMatrixIdt(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.matrix_idt`
definition unit tests methods.
"""
def test_matrix_idt(self):
"""
Test :func:`colour.characterisation.aces_it.matrix_idt`
definition.
"""
# The *RAW to ACES* v1 matrix for the same camera and optimized by
# `Ceres Solver <http://ceres-solver.org/>`__ is as follows:
#
# 0.864994 -0.026302 0.161308
# 0.056527 1.122997 -0.179524
# 0.023683 -0.202547 1.178864
np.testing.assert_allclose(
matrix_idt(MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS["D55"])[0],
np.array(
[
[0.84993207, -0.01605594, 0.15143504],
[0.05090392, 1.12559930, -0.18498249],
[0.02006825, -0.19445149, 1.16206549],
]
),
rtol=0.0001,
atol=0.0001,
)
# The *RAW to ACES* v1 matrix for the same camera and optimized by
# `Ceres Solver <http://ceres-solver.org/>`__ is as follows:
#
# 0.888492 -0.077505 0.189014
# 0.021805 1.066614 -0.088418
# -0.019718 -0.206664 1.226381
np.testing.assert_allclose(
matrix_idt(
MSDS_CANON_EOS_5DMARK_II, SD_AMPAS_ISO7589_STUDIO_TUNGSTEN
)[0],
np.array(
[
[0.85895300, -0.04381920, 0.15978620],
[0.01024800, 1.08825364, -0.11392229],
[-0.02327674, -0.18044292, 1.15903609],
]
),
rtol=0.0001,
atol=0.0001,
)
M, RGB_w = matrix_idt(
MSDS_CANON_EOS_5DMARK_II,
SDS_ILLUMINANTS["D55"],
optimisation_factory=optimisation_factory_Jzazbz,
)
np.testing.assert_allclose(
M,
np.array(
[
[0.84841492, -0.01569765, 0.15799332],
[0.05333075, 1.11428542, -0.17523500],
[0.02262287, -0.22527728, 1.19646895],
]
),
rtol=0.0001,
atol=0.0001,
)
np.testing.assert_allclose(
RGB_w,
np.array([2.34141541, 1.00000000, 1.51633759]),
rtol=0.0001,
atol=0.0001,
)
M, RGB_w = matrix_idt(
MSDS_CANON_EOS_5DMARK_II,
SDS_ILLUMINANTS["D55"],
optimisation_kwargs={"method": "Nelder-Mead"},
)
np.testing.assert_allclose(
M,
np.array(
[
[0.71327381, 0.19213397, 0.11115511],
[-0.05788252, 1.31165598, -0.21730625],
[-0.05913103, -0.02787107, 1.10737947],
]
),
rtol=0.0001,
atol=0.0001,
)
np.testing.assert_allclose(
RGB_w,
np.array([2.34141541, 1.00000000, 1.51633759]),
rtol=0.0001,
atol=0.0001,
)
training_data = sds_and_msds_to_msds(
SDS_COLOURCHECKERS["BabelColor Average"].values()
)
# pylint: disable=E1102
np.testing.assert_allclose(
matrix_idt(
reshape_msds(
MSDS_CAMERA_SENSITIVITIES["Nikon 5100 (NPL)"],
SpectralShape(400, 700, 10),
),
SD_AMPAS_ISO7589_STUDIO_TUNGSTEN,
training_data=training_data,
)[0],
np.array(
[
[0.74041064, 0.10951105, 0.11963256],
[-0.00467360, 1.09238438, -0.11398966],
[0.06728533, -0.29530438, 1.18589793],
]
),
rtol=0.0001,
atol=0.0001,
)
np.testing.assert_allclose(
matrix_idt(
MSDS_CANON_EOS_5DMARK_II,
SDS_ILLUMINANTS["D55"],
chromatic_adaptation_transform="Bradford",
)[0],
np.array(
[
[0.85020607, -0.01371074, 0.14907913],
[0.05074081, 1.12898863, -0.18800656],
[0.02095822, -0.20110079, 1.16769711],
]
),
rtol=0.0001,
atol=0.0001,
)
_M, RGB_w, XYZ, RGB = matrix_idt(
MSDS_CANON_EOS_5DMARK_II,
SDS_ILLUMINANTS["D55"],
additional_data=True,
)
np.testing.assert_almost_equal(
RGB_w, np.array([2.34141541, 1.00000000, 1.51633759])
)
np.testing.assert_almost_equal(
XYZ[:5, ...],
np.array(
[
[0.01743160, 0.01794927, 0.01960625],
[0.08556139, 0.08957352, 0.09017387],
[0.74560311, 0.78175547, 0.78350814],
[0.19005289, 0.19950000, 0.20126062],
[0.56264334, 0.59145486, 0.58950505],
]
),
)
np.testing.assert_almost_equal(
RGB[:5, ...],
np.array(
[
[0.02075823, 0.01968577, 0.02139352],
[0.08957758, 0.08919227, 0.08910910],
[0.78102307, 0.78019384, 0.77643020],
[0.19950000, 0.19950000, 0.19950000],
[0.58984787, 0.59040152, 0.58510766],
]
),
)
class TestCamera_RGB_to_ACES2065_1(unittest.TestCase):
"""
Define :func:`colour.characterisation.aces_it.\
camera_RGB_to_ACES2065_1` definition unit tests methods.
"""
def test_camera_RGB_to_ACES2065_1(self):
"""
Test :func:`colour.characterisation.aces_it.\
camera_RGB_to_ACES2065_1` definition.
"""
B, b = matrix_idt(MSDS_CANON_EOS_5DMARK_II, SDS_ILLUMINANTS["D55"])
np.testing.assert_almost_equal(
camera_RGB_to_ACES2065_1(np.array([0.1, 0.2, 0.3]), B, b),
np.array([0.26468115, 0.15288980, 0.49443355]),
)
np.testing.assert_almost_equal(
camera_RGB_to_ACES2065_1(np.array([1.5, 1.5, 1.5]), B, b),
np.array([3.30542136, 1.44643555, 2.42192985]),
)
np.testing.assert_almost_equal(
camera_RGB_to_ACES2065_1(np.array([1.0, 1.0, 1.0]), B, b, True),
np.array([2.20361424, 0.96429036, 1.61461990]),
)
if __name__ == "__main__":
unittest.main()
| colour-science/colour | colour/characterisation/tests/test_aces_it.py | Python | bsd-3-clause | 48,076 |
# Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from typing import List, Tuple, Union # noqa: F401
from ddt import data, ddt, unpack
from purplequery.binary_expression import BinaryExpression
from purplequery.bq_abstract_syntax_tree import (EMPTY_NODE, EvaluatableNode, Field, # noqa: F401
_EmptyNode)
from purplequery.bq_types import BQScalarType
from purplequery.dataframe_node import DataSource, QueryExpression, Select, TableReference
from purplequery.evaluatable_node import (Case, Cast, Count, Exists, Extract, If, InCheck, Not,
NullCheck, Selector, StarSelector, UnaryNegation, Value,
_AggregatingFunctionCall)
from purplequery.grammar import (core_expression, data_source, post_expression, query_expression,
select)
@ddt
class GrammarTest(unittest.TestCase):
def test_query_expression(self):
# type: () -> None
ast, leftover = query_expression(
['SELECT', '*', 'FROM', '`my_project.my_dataset.my_table`'])
self.assertEqual(leftover, [])
assert isinstance(ast, QueryExpression)
self.assertEqual(ast.base_query.__class__, Select)
def test_select(self):
# type: () -> None
ast, leftover = select(['SELECT', '*', 'FROM', '`my_project.my_dataset.my_table`'])
self.assertEqual(leftover, [])
assert isinstance(ast, Select)
self.assertEqual(ast.modifier, EMPTY_NODE)
assert isinstance(ast.fields[0], StarSelector)
self.assertEqual(ast.fields[0].expression, EMPTY_NODE)
self.assertEqual(ast.fields[0].exception, EMPTY_NODE)
self.assertEqual(ast.fields[0].replacement, EMPTY_NODE)
assert isinstance(ast.from_, DataSource)
assert isinstance(ast.from_.first_from[0], TableReference)
self.assertEqual(ast.from_.first_from[0].path, ('my_project', 'my_dataset', 'my_table'))
self.assertEqual(ast.from_.first_from[1], EMPTY_NODE)
self.assertEqual(ast.from_.joins, [])
self.assertEqual(ast.where, EMPTY_NODE)
self.assertEqual(ast.group_by, EMPTY_NODE)
self.assertEqual(ast.having, EMPTY_NODE)
def test_select_as(self):
# type: () -> None
ast, leftover = select(['SELECT', '*', 'FROM', '`my_project.my_dataset.my_table`',
'AS', 'TableAlias'])
self.assertEqual(leftover, [])
assert isinstance(ast, Select)
assert isinstance(ast.from_, DataSource)
self.assertEqual(ast.from_.first_from[1], 'TableAlias')
@data(
dict(
tokens=['`my_project.my_dataset.my_table`'],
first_from_path=('my_project', 'my_dataset', 'my_table'),
first_from_alias=EMPTY_NODE,
num_joins=0,
join_type=EMPTY_NODE,
join_table_path=EMPTY_NODE,
join_table_alias=EMPTY_NODE,
join_conditions=EMPTY_NODE
),
dict(
tokens=['`my_project.my_dataset.my_table`', 'JOIN',
'`my_project.my_dataset.my_table2`', 'ON',
'my_table', '.', 'a', '=', 'my_table2', '.', 'b'],
first_from_path=('my_project', 'my_dataset', 'my_table'),
first_from_alias=EMPTY_NODE,
num_joins=1,
join_type=EMPTY_NODE,
join_table_path=('my_project', 'my_dataset', 'my_table2'),
join_table_alias=EMPTY_NODE,
join_conditions=BinaryExpression(Field(('my_table', 'a')),
'=',
Field(('my_table2', 'b'))),
),
dict(
tokens=['`my_project.my_dataset.my_table`', 'JOIN',
'`my_project.my_dataset.my_table2`', 'USING', '(', 'a', ',', 'b', ')'],
first_from_path=('my_project', 'my_dataset', 'my_table'),
first_from_alias=EMPTY_NODE,
num_joins=1,
join_type=EMPTY_NODE,
join_table_path=('my_project', 'my_dataset', 'my_table2'),
join_table_alias=EMPTY_NODE,
join_conditions=('a', 'b')
),
dict(
tokens=['`my_project.my_dataset.my_table`', 'JOIN',
'`my_project.my_dataset.my_table2`', 'USING', '(', 'a', ')'],
first_from_path=('my_project', 'my_dataset', 'my_table'),
first_from_alias=EMPTY_NODE,
num_joins=1,
join_type=EMPTY_NODE,
join_table_path=('my_project', 'my_dataset', 'my_table2'),
join_table_alias=EMPTY_NODE,
join_conditions=('a',)
),
dict(
tokens=['`my_project.my_dataset.my_table`', 'AS', 'table1', 'JOIN',
'`my_project.my_dataset.my_table2`', 'AS', 'table2', 'USING', '(', 'a', ')'],
first_from_path=('my_project', 'my_dataset', 'my_table'),
first_from_alias='table1',
num_joins=1,
join_type=EMPTY_NODE,
join_table_path=('my_project', 'my_dataset', 'my_table2'),
join_table_alias='table2',
join_conditions=('a',)
),
dict(
tokens=['`my_project.my_dataset.my_table`', 'FULL', 'JOIN',
'`my_project.my_dataset.my_table2`', 'USING', '(', 'a', ')'],
first_from_path=('my_project', 'my_dataset', 'my_table'),
first_from_alias=EMPTY_NODE,
num_joins=1,
join_type='FULL',
join_table_path=('my_project', 'my_dataset', 'my_table2'),
join_table_alias=EMPTY_NODE,
join_conditions=('a',)
),
)
@unpack
def test_data_source(self, tokens, # type: List[str]
first_from_path, # type: Tuple[str, ...]
first_from_alias, # type: Union[_EmptyNode, str]
num_joins, # type: int
join_type, # type: Union[_EmptyNode, str]
join_table_path, # type: Union[_EmptyNode, Tuple[str, ...]]
join_table_alias, # type: Union[_EmptyNode, str]
join_conditions # type: Union[_EmptyNode, Tuple[str, ...], Tuple[Field, ...]] # noqa: E501
):
# type: (...) -> None
ast, leftover = data_source(tokens)
assert isinstance(ast, DataSource)
self.assertEqual(leftover, [])
assert isinstance(ast.first_from[0], TableReference)
self.assertEqual(ast.first_from[0].path, first_from_path)
self.assertEqual(ast.first_from[1], first_from_alias)
self.assertEqual(len(ast.joins), num_joins)
if num_joins > 0:
join = ast.joins[0]
self.assertEqual(join[0], join_type)
assert isinstance(join[1][0], TableReference)
self.assertEqual(join[1][0].path, join_table_path)
self.assertEqual(join[1][1], join_table_alias)
self.assertEqual(repr(join[2]), repr(join_conditions))
def test_core_expression(self):
# type: () -> None
ast, leftover = core_expression(['a'])
self.assertEqual(leftover, [])
assert isinstance(ast, Field)
self.assertEqual(ast.path, ('a',))
@data(
(
['SELECT', 'a', 'FROM', '`my_project.my_dataset.my_table`'],
('a',)
),
(
['SELECT', 'my_table', '.', 'a', 'FROM', '`my_project.my_dataset.my_table`'],
('my_table', 'a',)
)
)
@unpack
def test_select_field(self, tokens, path):
# type: (List[str], Tuple[str, ...]) -> None
ast, leftover = select(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, Select)
assert isinstance(ast.fields[0], Selector)
field = ast.fields[0].children[0]
assert isinstance(field, Field)
self.assertEqual(field.path, path)
def test_order_by(self):
# type: () -> None
ast, leftover = query_expression(['SELECT', '*', 'FROM', 'my_table',
'ORDER', 'BY', 'a', 'ASC'])
assert isinstance(ast, QueryExpression)
self.assertEqual(leftover, [])
assert not isinstance(ast.order_by, _EmptyNode)
self.assertEqual(len(ast.order_by), 1)
(field, direction) = ast.order_by[0]
self.assertEqual(field.path, ('a',))
self.assertEqual(direction, 'ASC')
def test_limit_offset(self):
# type: () -> None
ast, leftover = query_expression(['SELECT', '*', 'FROM', 'my_table',
'LIMIT', '5', 'OFFSET', '10'])
assert isinstance(ast, QueryExpression)
self.assertEqual(leftover, [])
assert not isinstance(ast.limit, _EmptyNode)
limit_expression, offset_expression = ast.limit
assert isinstance(limit_expression, Value)
self.assertEqual(limit_expression.value, 5)
assert isinstance(offset_expression, Value)
self.assertEqual(offset_expression.value, 10)
def test_group_by(self):
# type: () -> None
ast, leftover = select(['SELECT', '*', 'FROM', 'my_table', 'GROUP', 'BY', 'a'])
self.assertEqual(leftover, [])
assert isinstance(ast, Select)
assert not isinstance(ast.group_by, _EmptyNode)
self.assertEqual(len(ast.group_by), 1)
field = ast.group_by[0]
assert isinstance(field, Field)
self.assertEqual(field.path, ('a',))
def test_selector_alias(self):
# type: () -> None
ast, leftover = select(['SELECT', 'a', 'AS', 'field_name', 'FROM', 'my_table'])
self.assertEqual(leftover, [])
assert isinstance(ast, Select)
self.assertEqual(len(ast.fields), 1)
selector = ast.fields[0]
assert isinstance(selector, Selector)
alias = selector.alias
self.assertEqual(alias, 'field_name')
@data(
dict(tokens=['COUNT', '(', '*', ')'],
countee=Value(1, BQScalarType.INTEGER),
distinct=False),
dict(tokens=['COUNT', '(', 'DISTINCT', 'a', ')'],
countee=Field(('a',)),
distinct=True),
dict(tokens=['COUNT', '(', '1', ')'],
countee=Value(1, BQScalarType.INTEGER),
distinct=False),
)
@unpack
def test_count(self, tokens, # type: List[str]
countee, # type: EvaluatableNode
distinct,
):
# type: (...) -> None
ast, leftover = core_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, _AggregatingFunctionCall)
self.assertEqual(ast.children, [countee])
assert isinstance(ast.function_info, Count)
self.assertEqual(ast.function_info.name(), 'COUNT')
self.assertEqual(ast.function_info.distinct, distinct)
@data(
(['a', 'IS', 'NULL'], Field(('a',)), True),
(['b', 'IS', 'NOT', 'NULL'], Field(('b',)), False)
)
@unpack
def test_null_check(self, tokens, # type: List[str]
expression, # type: Field
direction # type: bool
):
# type: (...) -> None
ast, leftover = post_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, NullCheck)
self.assertEqual(ast.children[0], expression)
self.assertEqual(ast.direction, direction)
@data(
(['IN'], True),
(['NOT', 'IN'], False)
)
@unpack
def test_in_check(self, direction, # type: List[str]
bool_direction # type: bool
):
# type: (...) -> None
tokens = ['a'] + direction + ['(', '1', ',', '2', ')']
expression = Field(('a',))
elements = [Value(1, BQScalarType.INTEGER), Value(2, BQScalarType.INTEGER)]
ast, leftover = post_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, InCheck)
self.assertEqual(ast.children[0], expression)
self.assertEqual(ast.direction, bool_direction)
self.assertEqual(ast.children[1:], elements)
def test_if(self):
# type: () -> None
# IF (3 < 5, 0, 1)
tokens = ['IF', '(', '3', '<', '5', ',', '0', ',', '1', ')']
ast, leftover = core_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, If)
condition, then, else_ = ast.children
assert isinstance(condition, BinaryExpression)
left, right = condition.children
self.assertEqual(left, Value(3, BQScalarType.INTEGER))
self.assertEqual(condition.operator_info.operator, '<')
self.assertEqual(right, Value(5, BQScalarType.INTEGER))
self.assertEqual(then, Value(0, BQScalarType.INTEGER))
self.assertEqual(else_, Value(1, BQScalarType.INTEGER))
def test_not(self):
# type: () -> None
tokens = ['NOT', '2', '=', '2']
ast, leftover = core_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, Not)
assert isinstance(ast.children[0], BinaryExpression)
left, right = ast.children[0].children
self.assertEqual(left, Value(2, BQScalarType.INTEGER))
self.assertEqual(ast.children[0].operator_info.operator, '=')
self.assertEqual(right, Value(2, BQScalarType.INTEGER))
def test_unary_negation(self):
# type: () -> None
tokens = ['-', '2']
ast, leftover = core_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, UnaryNegation)
self.assertEqual(ast.children[0], Value(2, BQScalarType.INTEGER))
def test_case(self):
# type: () -> None
tokens = ['CASE', 'WHEN', 'a', '=', '1', 'THEN', '1',
'WHEN', 'a', '=', '2', 'THEN', '2',
'ELSE', '3', 'END']
ast, leftover = core_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, Case)
first_when, first_then, second_when, second_then, else_ = ast.children
assert isinstance(first_when, BinaryExpression)
self.assertEqual(first_when.children[0], Field(('a',)))
self.assertEqual(first_when.operator_info.operator, '=')
self.assertEqual(first_when.children[1], Value(1, BQScalarType.INTEGER))
self.assertEqual(first_then, Value(1, BQScalarType.INTEGER))
assert isinstance(second_when, BinaryExpression)
self.assertEqual(second_when.children[0], Field(('a',)))
self.assertEqual(second_when.operator_info.operator, '=')
self.assertEqual(second_when.children[1], Value(2, BQScalarType.INTEGER))
self.assertEqual(second_then, Value(2, BQScalarType.INTEGER))
self.assertEqual(else_, Value(3, BQScalarType.INTEGER))
def test_cast(self):
# type: () -> None
tokens = ['CAST', '(', '1', 'AS', 'STRING', ')']
ast, leftover = core_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, Cast)
self.assertEqual(ast.children[0], Value(1, BQScalarType.INTEGER))
self.assertEqual(ast.type_, BQScalarType.STRING)
def test_exists(self):
# type: () -> None
tokens = ['EXISTS', '(', 'SELECT', '*', 'FROM', 'Table', ')']
ast, leftover = core_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, Exists)
assert isinstance(ast.subquery, QueryExpression)
assert isinstance(ast.subquery.base_query, Select)
assert isinstance(ast.subquery.base_query.from_, DataSource)
assert isinstance(ast.subquery.base_query.from_.first_from[0], TableReference)
self.assertEqual(ast.subquery.base_query.from_.first_from[0].path, ('Table',))
def test_extract(self):
# type: () -> None
tokens = ['EXTRACT', '(', 'DAY', 'FROM', 'date_field', ')']
ast, leftover = core_expression(tokens)
self.assertEqual(leftover, [])
assert isinstance(ast, Extract)
self.assertEqual(ast.part, 'DAY')
assert isinstance(ast.children[0], Field)
self.assertEqual(ast.children[0].path, ('date_field',))
if __name__ == '__main__':
unittest.main()
| verilylifesciences/purplequery | purplequery/grammar_test.py | Python | bsd-3-clause | 16,590 |
#-*- coding: utf-8 -*-
from PIL import Image as PILImage
from datetime import datetime
from django.core import urlresolvers
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer import settings as filer_settings
from filer.models.filemodels import File
from filer.utils.filer_easy_thumbnails import FilerThumbnailer
from filer.utils.pil_exif import get_exif_for_file
import os
class Image(File):
SIDEBAR_IMAGE_WIDTH = 210
DEFAULT_THUMBNAILS = {
'admin_clipboard_icon': {'size': (32, 32), 'crop': True,
'upscale': True},
'admin_sidebar_preview': {'size': (SIDEBAR_IMAGE_WIDTH, 10000)},
'admin_directory_listing_icon': {'size': (48, 48),
'crop': True, 'upscale': True},
'admin_tiny_icon': {'size': (32, 32), 'crop': True, 'upscale': True},
}
file_type = 'Image'
_icon = "image"
_height = models.IntegerField(null=True, blank=True)
_width = models.IntegerField(null=True, blank=True)
date_taken = models.DateTimeField(_('date taken'), null=True, blank=True,
editable=False)
default_alt_text = models.CharField(_('default alt text'), max_length=255, blank=True, null=True)
default_caption = models.CharField(_('default caption'), max_length=255, blank=True, null=True)
author = models.CharField(_('author'), max_length=255, null=True, blank=True)
must_always_publish_author_credit = models.BooleanField(_('must always publish author credit'), default=False)
must_always_publish_copyright = models.BooleanField(_('must always publish copyright'), default=False)
subject_location = models.CharField(_('subject location'), max_length=64, null=True, blank=True,
default=None)
link = models.CharField(_('External URL'), blank=True, null=True, max_length=255, default=None)
@classmethod
def matches_file_type(cls, iname, ifile, request):
# This was originally in admin/clipboardadmin.py it was inside of a try
# except, I have moved it here outside of a try except because I can't
# figure out just what kind of exception this could generate... all it was
# doing for me was obscuring errors...
# --Dave Butler <croepha@gmail.com>
iext = os.path.splitext(iname)[1].lower()
return iext in ['.jpg', '.jpeg', '.png', '.gif']
def save(self, *args, **kwargs):
if self.date_taken is None:
try:
exif_date = self.exif.get('DateTimeOriginal', None)
if exif_date is not None:
d, t = str.split(exif_date.values)
year, month, day = d.split(':')
hour, minute, second = t.split(':')
self.date_taken = datetime(
int(year), int(month), int(day),
int(hour), int(minute), int(second))
except:
pass
if self.date_taken is None:
self.date_taken = datetime.now()
self.has_all_mandatory_data = self._check_validity()
try:
# do this more efficient somehow?
self.file.seek(0)
self._width, self._height = PILImage.open(self.file).size
except Exception:
# probably the image is missing. nevermind.
pass
super(Image, self).save(*args, **kwargs)
def _check_validity(self):
if not self.name:
return False
return True
def sidebar_image_ratio(self):
if self.width:
return float(self.width) / float(self.SIDEBAR_IMAGE_WIDTH)
else:
return 1.0
def _get_exif(self):
if hasattr(self, '_exif_cache'):
return self._exif_cache
else:
if self.file:
self._exif_cache = get_exif_for_file(self.file.path)
else:
self._exif_cache = {}
return self._exif_cache
exif = property(_get_exif)
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, type):
"""
Return true if the current user has permission on this
image. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated() or not user.is_staff:
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
elif self.folder:
return self.folder.has_generic_permission(request, type)
else:
return False
@property
def label(self):
if self.name in ['', None]:
return self.original_filename or 'unnamed file'
else:
return self.name
@property
def width(self):
return self._width or 0
@property
def height(self):
return self._height or 0
@property
def icons(self):
_icons = {}
for size in filer_settings.FILER_ADMIN_ICON_SIZES:
try:
thumbnail_options = {
'size': (int(size), int(size)),
'crop': True,
'upscale': True,
'subject_location': self.subject_location}
thumb = self.file.get_thumbnail(thumbnail_options)
_icons[size] = thumb.url
except Exception, e:
# swallow the the exception to avoid to bubble it up
# in the template {{ image.icons.48 }}
pass
return _icons
@property
def thumbnails(self):
_thumbnails = {}
for name, opts in Image.DEFAULT_THUMBNAILS.items():
try:
opts.update({'subject_location': self.subject_location})
thumb = self.file.get_thumbnail(opts)
_thumbnails[name] = thumb.url
except:
# swallow the exception to avoid it bubbling up
# to the template {{ image.icons.48 }}
pass
return _thumbnails
@property
def easy_thumbnails_thumbnailer(self):
tn = FilerThumbnailer(file=self.file.file, name=self.file.name,
source_storage=self.file.source_storage,
thumbnail_storage=self.file.thumbnail_storage)
return tn
class Meta:
app_label = 'filer'
verbose_name = _('image')
verbose_name_plural = _('images')
| ryanbagwell/django-filer | filer/models/imagemodels.py | Python | bsd-3-clause | 6,912 |
from phovea_server.dataset_def import ATable, ADataSetProvider, AColumn, AVector, AMatrix
from logging import getLogger
import requests
import phovea_server
import numpy as np
__author__ = 'Samuel Gratzl'
_log = getLogger(__name__)
config = phovea_server.config.view('phovea_data_opencpu')
def _to_url(path):
return 'http://{host}:{port}/ocpu/{path}'.format(host=config.host, port=config.port, path=path)
def assign_ids(ids, idtype):
import phovea_server.plugin
manager = phovea_server.plugin.lookup('idmanager')
return np.array(manager(ids, idtype))
def create_session(init_script):
import re
code = """
parse(text="
%s
# generate meta data for phovea
phoveaDatasets = (function(objs) {
known_type = function(col) {
clazz <- class(col)
if (clazz == 'numeric' || clazz == 'integer' || clazz == 'double' || clazz == 'matrix') {
if (typeof(col) == 'integer') {
list(type='int', range=c(min(col),max(col)))
} else {
list(type='real', range=c(min(col),max(col)))
}
} else if (clazz == 'factor') {
list(type='categorical', categories=levels(col))
} else {
list(type='string')
}
}
columnDescription = function(col, colname) {
list(name=colname, value=known_type(col))
}
tableDescription = function(dataset, data_name) {
columns = mapply(columnDescription, dataset, colnames(dataset), SIMPLIFY='array')
list(name=data_name,
size=dim(dataset),
type='table',
columns=columns)
}
vectorDescription = function(dataset, data_name) {
list(name=data_name,
size=length(dataset),
type='vector',
value=known_type(dataset))
}
matrixDescription = function(dataset, data_name) {
list(name=data_name,
size=dim(dataset),
type='matrix',
value=known_type(dataset))
}
r = list()
for (obj in objs) {
value = get(obj)
if (is.data.frame(value)) {
r[[obj]] = tableDescription(value, obj)
} else if (is.vector(value)) {
r[[obj]] = vectorDescription(value, obj)
} else if (is.matrix(value)) {
r[[obj]] = matrixDescription(value, obj)
}
}
r
})(ls())
")
""" % (init_script,)
_log.debug(code)
output = requests.post(_to_url('library/base/R/eval'), dict(expr=code))
_log.debug(output.text)
session = re.search('/tmp/(.*)/R', output.text).group(1)
return session
def resolve_datasets(session):
from itertools import izip
# output = requests.get(_to_url('tmp/{s}/console'.format(s=session)))
# print(output.text)
# use the already computed list of datasets as part of session initializiation
output = requests.get(_to_url('tmp/{s}/R/phoveaDatasets/json'.format(s=session)))
desc = output.json()
if not desc:
return []
# normalize description and remove single list entries
def to_value(value):
base = dict(type=value['type'][0])
if 'range' in value:
base['range'] = value['range']
if 'categories' in value:
base['categories'] = value['categories']
return base
def to_desc(d):
base = dict(name=d['name'][0], type=d['type'][0], size=d['size'])
if base['type'] == 'table':
names = d['columns'][0]
values = d['columns'][1]
base['columns'] = [dict(name=name[0], value=to_value(value)) for name, value in izip(names, values)]
if base['type'] == 'matrix' or base['type'] == 'vector':
base['value'] = d['value']
return base
return [to_desc(d) for d in desc.values()]
def _dim_names(session, variable, expected_length, dim):
import numpy as np
output = requests.post(_to_url('library/base/R/{dim}names/json'.format(dim=dim)),
dict(x='{s}::{v}'.format(s=session, v=variable)))
data = list(output.json())
dim_name = dim.capitalize()
if len(data) < expected_length:
# generate dummy ids
for i in range(len(data), expected_length):
data.append(dim_name + str(i))
return np.array(data)
def row_names(session, variable, expected_length):
return _dim_names(session, variable, expected_length, 'row')
def col_names(session, variable, expected_length):
return _dim_names(session, variable, expected_length, 'col')
def column_values(session, variable, column):
import numpy as np
output = requests.post(_to_url('library/base/R/identity/json'),
dict(x='{s}::{v}${c}'.format(s=session, v=variable, c=column)))
data = list(output.json())
return np.array(data)
def table_values(session, variable, columns):
import pandas as pd
output = requests.get(_to_url('tmp/{s}/R/{v}/json'.format(s=session, v=variable)))
data = list(output.json())
columns = [c.column for c in columns]
return pd.DataFrame.from_records(data, columns=columns)
def vector_values(session, variable):
import numpy as np
output = requests.get(_to_url('tmp/{s}/R/{v}/json'.format(s=session, v=variable)))
data = list(output.json())
return np.array(data)
def matrix_values(session, variable):
return vector_values(session, variable)
def discover_sessions(discover):
import os.path
output = requests.post(_to_url('{f}/json'.format(f=discover['function'])), discover['arguments'])
data = list(output.json())
_log.info('discovered: %s', data)
def to_desc(d):
name = os.path.splitext(os.path.basename(d))[0]
return dict(name=name, script="""load('{s}')""".format(s=d))
return [to_desc(d) for d in data]
class OpenCPUColumn(AColumn):
def __init__(self, desc, table):
super(OpenCPUColumn, self).__init__(desc['name'], desc['value']['type'])
self._desc = desc
self.column = desc['name']
self._table = table
self._values = None
def asnumpy(self, range=None):
if self._values is None:
self._values = self._table.column_values(self.column)
if range is None:
return self._values
return self._values[range.asslice()]
def dump(self):
return self._desc
class OpenCPUTable(ATable):
def __init__(self, entry, session, meta, session_name):
ATable.__init__(self, entry['name'], 'opencpu/' + session_name, 'table', entry.get('id', None))
self._session = session
self._variable = entry['name']
self.idtype = meta.get('idtype', 'Custom')
self._entry = entry
self.columns = [OpenCPUColumn(d, self) for d in entry['columns']]
self.shape = entry['size']
self._rows = None
self._row_ids = None
self._values = None
def to_description(self):
r = super(OpenCPUTable, self).to_description()
r['idtype'] = self.idtype
r['columns'] = [d.dump() for d in self.columns]
r['size'] = self.shape
return r
def column_values(self, column):
return column_values(self._session, self._variable, column)
def rows(self, range=None):
if self._rows is None:
self._rows = row_names(self._session, self._variable, self.shape[0])
if range is None:
return self._rows
return self._rows[range.asslice()]
def rowids(self, range=None):
if self._row_ids is None:
self._row_ids = assign_ids(self.rows(), self.idtype)
if range is None:
return self._row_ids
return self._row_ids[range.asslice()]
def aspandas(self, range=None):
if self._values is None:
self._values = table_values(self._session, self._variable, self.columns)
if range is None:
return self._values
return self._values.iloc[range.asslice(no_ellipsis=True)]
class OpenCPUVector(AVector):
def __init__(self, entry, session, meta, session_name):
super(OpenCPUVector, self).__init__(entry['name'], 'opencpu/' + session_name, 'vector', entry.get('id', None))
self._session = session
self._variable = entry['name']
self.idtype = meta.get('idtype', 'Custom')
self._entry = entry
self.value = entry['value']['type']
self.shape = entry['size']
self._rows = None
self._row_ids = None
self._values = None
def to_description(self):
r = super(OpenCPUVector, self).to_description()
r['idtype'] = self.idtype
r['value'] = self._entry['value']
r['size'] = self.shape
return r
def rows(self, range=None):
if self._rows is None:
self._rows = row_names(self._session, self._variable, self.shape[0])
if range is None:
return self._rows
return self._rows[range.asslice()]
def rowids(self, range=None):
if self._row_ids is None:
self._row_ids = assign_ids(self.rows(), self.idtype)
if range is None:
return self._row_ids
return self._row_ids[range.asslice()]
def asnumpy(self, range=None):
if self._values is None:
self._values = vector_values(self._session, self._variable)
if range is None:
return self._values
return self._values[range[0].asslice()]
class OpenCPUMatrix(AMatrix):
def __init__(self, entry, session, meta, session_name):
super(OpenCPUMatrix, self).__init__(entry['name'], 'opencpu/' + session_name, 'matrix', entry.get('id', None))
self._session = session
self._variable = entry['name']
self.rowtype = meta.get('rowtype', 'Custom')
self.coltype = meta.get('coltype', 'Custom')
self._entry = entry
self.value = entry['value']['type']
self.shape = entry['size']
self._rows = None
self._row_ids = None
self._cols = None
self._col_ids = None
self._values = None
def to_description(self):
r = super(OpenCPUMatrix, self).to_description()
r['rowtype'] = self.rowtype
r['coltype'] = self.coltype
r['value'] = self._entry['value']
r['size'] = self.shape
return r
def rows(self, range=None):
if self._rows is None:
self._rows = row_names(self._session, self._variable, self.shape[0])
if range is None:
return self._rows
return self._rows[range.asslice()]
def rowids(self, range=None):
if self._row_ids is None:
self._row_ids = assign_ids(self.rows(), self.rowtype)
if range is None:
return self._row_ids
return self._row_ids[range.asslice()]
def cols(self, range=None):
if self._cols is None:
self._cols = col_names(self._session, self._variable, self.shape[0])
if range is None:
return self._cols
return self._cols[range.asslice()]
def colids(self, range=None):
if self._row_ids is None:
self._row_ids = assign_ids(self.rows(), self.coltype)
if range is None:
return self._row_ids
return self._row_ids[range.asslice()]
def asnumpy(self, range=None):
if self._values is None:
self._values = matrix_values(self._session, self._variable)
if range is None:
return self._values
return self._values[range[0].asslice()]
class OpenCPUSession(object):
def __init__(self, desc):
self._desc = desc
self._session = create_session(desc['script'])
session_name = desc['name']
entries = resolve_datasets(self._session)
meta = desc.get('meta', dict())
def to_dataset(entry):
meta_data = meta.get(entry['name'], dict())
if entry['type'] == 'table':
return OpenCPUTable(entry, self._session, meta_data, session_name)
elif entry['type'] == 'vector':
return OpenCPUVector(entry, self._session, meta_data, session_name)
elif entry['type'] == 'matrix':
return OpenCPUMatrix(entry, self._session, meta_data, session_name)
return None
self._entries = [v for v in (to_dataset(entry) for entry in entries) if v is not None]
def __iter__(self):
return iter(self._entries)
class OpenCPUProvider(ADataSetProvider):
"""
dataset provider for Caleydo from Calumma REST Api. It uses cached for common categorical properties and the
authentication token
"""
def __init__(self):
self.c = config
self._sessions = [OpenCPUSession(desc) for desc in config.sessions]
if config.discover:
self._sessions.extend([OpenCPUSession(desc) for desc in discover_sessions(config.discover)])
def __len__(self):
return len(self.entries)
def __iter__(self):
import itertools
return itertools.chain(*self._sessions)
def __getitem__(self, dataset_id):
return next((e for e in self if e.id == dataset_id))
def create():
return OpenCPUProvider()
| sgratzl/phovea_data_opencpu | phovea_data_opencpu/data_provider.py | Python | bsd-3-clause | 12,141 |
#! /usr/bin/env python3
import os, sys, time
pid = os.getpid()
os.write(1, ("About to fork (pid:%d)\n" % pid).encode())
rc = os.fork()
if rc < 0:
os.write(2, ("fork failed, returning %d\n" % rc).encode())
sys.exit(1)
elif rc == 0: # child
os.write(1, ("Child: My pid==%d. Parent's pid=%d\n" %
(os.getpid(), pid)).encode())
time.sleep(1) # block for 1 second
os.write(1, "Child ....terminating now with exit code 0\n".encode())
sys.exit(0)
else: # parent (forked ok)
os.write(1, ("Parent: My pid=%d. Child's pid=%d\n" %
(pid, rc)).encode())
childPidCode = os.wait()
os.write(1, ("Parent: Child %d terminated with exit code %d\n" %
childPidCode).encode())
| robustUTEP/os-shell | demos/p2-wait.py | Python | bsd-3-clause | 811 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# test_simulated_psfs.py
#
# Copyright 2016 Bruno S <bruno@oac.unc.edu.ar>
#
# This file is part of ProperImage (https://github.com/toros-astro/ProperImage)
# License: BSD-3-Clause
# Full Text: https://github.com/toros-astro/ProperImage/blob/master/LICENSE.txt
#
import os
import shlex
import subprocess
import numpy as np
from astropy.io import fits
from astropy.time import Time
import tinynpydb as npdb
from properimage import simtools as sm
from properimage import propercoadd as pc
N = 1024 # side
test_dir = os.path.abspath('./test/test_images/several_stars_gramschmidt')
if not os.path.exists(test_dir):
os.makedirs(test_dir)
now = '2016-05-17T00:00:00.1234567'
t = Time(now)
SN = 1
weights = np.random.random(100)*20000 + 10
for xfwhm in [4, 5, 6]:
for yfwhm in [2, 3, 7]:
for theta in [10, 50, 90, 130]:
filenames = []
x = np.random.randint(low=30, high=900, size=100)
y = np.random.randint(low=30, high=900, size=100)
xy = [(x[i], y[i]) for i in range(100)]
m = sm.delta_point(N, center=False, xy=xy, weights=weights)
img_dir = os.path.join(test_dir, str(xfwhm)+'_'+str(yfwhm)+'_'+
str(theta))
if not os.path.exists(img_dir):
os.makedirs(img_dir)
for i in range(12):
im = sm.image(m, N, t_exp=1, X_FWHM=xfwhm, Y_FWHM=yfwhm,
theta=theta, SN=SN, bkg_pdf='poisson')
filenames.append(sm.store_fits(im, t, t_exp=1, i=i,
zero=3.1415, path=img_dir))
cmd = ' '
for files in filenames:
cmd += ' ' + files
coadd = os.path.join(img_dir, 'coadd.fits')
swarp = shlex.split('swarp '+cmd+' -IMAGEOUT_NAME '+coadd)
subprocess.call(swarp)
with pc.ImageEnsemble(filenames) as ensemble:
# S = ensemble.calculate_S(n_procs=4)
R, S = ensemble.calculate_R(n_procs=4, return_S=True)
if isinstance(S, np.ma.masked_array):
S = S.filled(1.)
if isinstance(R, np.ma.masked_array):
R = R.real.filled(1.)
shdu = fits.PrimaryHDU(S)
shdulist = fits.HDUList([shdu])
shdulist.writeto(os.path.join(img_dir,'S.fits'), overwrite=True)
rhdu = fits.PrimaryHDU(R.real)
rhdulist = fits.HDUList([rhdu])
rhdulist.writeto(os.path.join(img_dir,'R.fits'), overwrite=True)
| toros-astro/ProperImage | drafts/test_simulated_psfs.py | Python | bsd-3-clause | 2,650 |
# -*- coding: utf-8 -*-
"""Django page CMS ``managers``."""
from pages import settings
from pages.cache import cache
from pages.utils import normalize_url, get_now
from pages.phttp import get_slug
from django.db import models
from django.db.models import Q
from django.db.models import Max
from django.conf import settings as global_settings
from mptt.managers import TreeManager
class FakePage():
"""
Used for pageless Content object
"""
def __init__(self):
self.id = -999 # an impossible number to get with DB id, do not change
self._content_dict = {}
self.freeze_date = None
def invalidate(self, content_type):
key = ContentManager.PAGE_CONTENT_DICT_KEY % (
self.id, content_type, 0)
cache.delete(key)
self._content_dict = None
fake_page = FakePage()
class PageManager(TreeManager):
"""
Page manager provide several filters to obtain pages :class:`QuerySet`
that respect the page attributes and project settings.
"""
if settings.PAGE_HIDE_SITES:
def get_query_set(self):
"""Restrict operations to pages on the current site."""
return super(PageManager, self).get_query_set().filter(
sites=global_settings.SITE_ID)
def on_site(self, site_id=None):
"""Return a :class:`QuerySet` of pages that are published on the site
defined by the ``SITE_ID`` setting.
:param site_id: specify the id of the site object to filter with.
"""
if settings.PAGE_USE_SITE_ID:
if not site_id:
site_id = global_settings.SITE_ID
return self.filter(sites=site_id)
return self.all()
def root(self):
"""Return a :class:`QuerySet` of pages without parent."""
return self.on_site().filter(parent__isnull=True)
def navigation(self):
"""Creates a :class:`QuerySet` of the published root pages."""
return self.on_site().filter(
status=self.model.PUBLISHED).filter(parent__isnull=True)
def hidden(self):
"""Creates a :class:`QuerySet` of the hidden pages."""
return self.on_site().filter(status=self.model.HIDDEN)
def filter_published(self, queryset):
"""Filter the given pages :class:`QuerySet` to obtain only published
page."""
if settings.PAGE_USE_SITE_ID:
queryset = queryset.filter(sites=global_settings.SITE_ID)
queryset = queryset.filter(status=self.model.PUBLISHED)
if settings.PAGE_SHOW_START_DATE:
queryset = queryset.filter(publication_date__lte=get_now())
if settings.PAGE_SHOW_END_DATE:
queryset = queryset.filter(
Q(publication_end_date__gt=get_now()) |
Q(publication_end_date__isnull=True)
)
return queryset
def published(self):
"""Creates a :class:`QuerySet` of published
:class:`Page <pages.models.Page>`."""
return self.filter_published(self)
def drafts(self):
"""Creates a :class:`QuerySet` of drafts using the page's
:attr:`Page.publication_date`."""
pub = self.on_site().filter(status=self.model.DRAFT)
if settings.PAGE_SHOW_START_DATE:
pub = pub.filter(publication_date__gte=get_now())
return pub
def expired(self):
"""Creates a :class:`QuerySet` of expired using the page's
:attr:`Page.publication_end_date`."""
return self.on_site().filter(
publication_end_date__lte=get_now())
def from_path(self, complete_path, lang, exclude_drafts=True):
"""Return a :class:`Page <pages.models.Page>` according to
the page's path."""
if complete_path.endswith("/"):
complete_path = complete_path[:-1]
# just return the root page
if complete_path == '':
root_pages = self.root()
if root_pages:
return root_pages[0]
else:
return None
slug = get_slug(complete_path)
from pages.models import Content
page_ids = Content.objects.get_page_ids_by_slug(slug)
pages_list = self.on_site().filter(id__in=page_ids)
if exclude_drafts:
pages_list = pages_list.exclude(status=self.model.DRAFT)
if len(pages_list) == 1:
if(settings.PAGE_USE_STRICT_URL and
pages_list[0].get_complete_slug(lang) != complete_path):
return None
return pages_list[0]
# if more than one page is matching the slug,
# we need to use the full URL
if len(pages_list) > 1:
for page in pages_list:
if page.get_complete_slug(lang) == complete_path:
return page
return None
def from_slug(self, slug):
from pages.models import Content
content = Content.objects.get_content_slug_by_slug(slug)
if content is None:
raise ValueError("Slug '%s' didn't match any content." % slug)
return content.page
class ContentManager(models.Manager):
""":class:`Content <pages.models.Content>` manager methods"""
PAGE_CONTENT_DICT_KEY = "page_content_dict_%d_%s_%d"
def set_or_create_content(self, page, language, ctype, body):
"""Set or create a :class:`Content <pages.models.Content>` for a
particular page and language.
:param page: the concerned page object.
:param language: the wanted language.
:param ctype: the content type.
:param body: the content of the Content object.
"""
try:
content = self.filter(page=page, language=language,
type=ctype).latest('creation_date')
content.body = body
except self.model.DoesNotExist:
content = self.model(page=page, language=language, body=body,
type=ctype)
content.save()
return content
def create_content_if_changed(self, page, language, ctype, body):
"""Create a :class:`Content <pages.models.Content>` for a particular
page and language only if the content has changed from the last
time.
:param page: the concerned page object.
:param language: the wanted language.
:param ctype: the content type.
:param body: the content of the Content object.
"""
try:
content = self.filter(
page=page, language=language,
type=ctype).latest('creation_date')
if content.body == body:
return content
except self.model.DoesNotExist:
pass
content = self.create(
page=page, language=language, body=body,
type=ctype)
# Delete old revisions
if settings.PAGE_CONTENT_REVISION_DEPTH:
oldest_content = self.filter(
page=page, language=language,
type=ctype
).order_by('-creation_date')[settings.PAGE_CONTENT_REVISION_DEPTH:]
for c in oldest_content:
c.delete()
return content
def get_content_object(self, page, language, ctype):
"""Gets the latest published :class:`Content <pages.models.Content>`
for a particular page, language and placeholder type."""
params = {
'language': language,
'type': ctype,
'page': None if page is fake_page else page
}
if page.freeze_date:
params['creation_date__lte'] = page.freeze_date
return self.filter(**params).latest()
def get_content(self, page, language, ctype, language_fallback=False):
"""Gets the latest content string for a particular page, language and
placeholder.
:param page: the concerned page object.
:param language: the wanted language.
:param ctype: the content type.
:param language_fallback: fallback to another language if ``True``.
"""
if page is None:
page = fake_page
if " " in ctype:
raise ValueError("Ctype cannot contain spaces.")
if not language:
language = settings.PAGE_DEFAULT_LANGUAGE
frozen = int(bool(page.freeze_date))
key = self.PAGE_CONTENT_DICT_KEY % (page.id, ctype, frozen)
# Spaces do not work with memcache
key = key.replace(' ', '-')
if page._content_dict is None:
page._content_dict = dict()
if page._content_dict.get(key, None):
content_dict = page._content_dict.get(key)
else:
content_dict = cache.get(key)
# fill a dict object for each language, that will create
# P * L queries.
# L == number of language, P == number of placeholder in the page.
# Once generated the result is cached.
if not content_dict:
content_dict = {}
for lang in settings.PAGE_LANGUAGES:
try:
content = self.get_content_object(page, lang[0], ctype)
content_dict[lang[0]] = content.body
except self.model.DoesNotExist:
content_dict[lang[0]] = ''
page._content_dict[key] = content_dict
cache.set(key, content_dict)
if language in content_dict and content_dict[language]:
return content_dict[language]
if language_fallback:
for lang in settings.PAGE_LANGUAGES:
if lang[0] in content_dict and content_dict[lang[0]]:
return content_dict[lang[0]]
return ''
def get_content_slug_by_slug(self, slug):
"""Returns the latest :class:`Content <pages.models.Content>`
slug object that match the given slug for the current site domain.
:param slug: the wanted slug.
"""
content = self.filter(type='slug', body=slug)
if settings.PAGE_USE_SITE_ID:
content = content.filter(page__sites__id=global_settings.SITE_ID)
try:
content = content.latest('creation_date')
except self.model.DoesNotExist:
return None
else:
return content
def get_page_ids_by_slug(self, slug):
"""Return all page's id matching the given slug.
This function also returns pages that have an old slug
that match.
:param slug: the wanted slug.
"""
ids = self.filter(type='slug', body=slug).values('page_id').annotate(
max_creation_date=Max('creation_date')
)
return [content['page_id'] for content in ids]
class PageAliasManager(models.Manager):
""":class:`PageAlias <pages.models.PageAlias>` manager."""
def from_path(self, request, path, lang):
"""
Resolve a request to an alias. returns a
:class:`PageAlias <pages.models.PageAlias>` if the url matches
no page at all. The aliasing system supports plain
aliases (``/foo/bar``) as well as aliases containing GET parameters
(like ``index.php?page=foo``).
:param request: the request object
:param path: the complete path to the page
:param lang: not used
"""
from pages.models import PageAlias
url = normalize_url(path)
# §1: try with complete query string
query = request.META.get('QUERY_STRING')
if query:
url = url + '?' + query
try:
alias = PageAlias.objects.get(url=url)
return alias
except PageAlias.DoesNotExist:
pass
# §2: try with path only
url = normalize_url(path)
try:
alias = PageAlias.objects.get(url=url)
return alias
except PageAlias.DoesNotExist:
pass
# §3: not alias found, we give up
return None
| batiste/django-page-cms | pages/managers.py | Python | bsd-3-clause | 11,982 |
"""
byceps.services.board.category_query_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import Optional, Sequence
from ...database import db
from .models.category import Category as DbCategory
from .transfer.models import (
BoardID,
Category,
CategoryID,
CategoryWithLastUpdate,
)
def count_categories_for_board(board_id: BoardID) -> int:
"""Return the number of categories for that board."""
return DbCategory.query \
.for_board(board_id) \
.count()
def find_category_by_id(category_id: CategoryID) -> Optional[Category]:
"""Return the category with that id, or `None` if not found."""
category = DbCategory.query.get(category_id)
if category is None:
return None
return _db_entity_to_category(category)
def find_category_by_slug(board_id: BoardID, slug: str) -> Optional[Category]:
"""Return the category for that board and slug, or `None` if not found."""
category = DbCategory.query \
.for_board(board_id) \
.filter_by(slug=slug) \
.first()
if category is None:
return None
return _db_entity_to_category(category)
def get_categories(board_id: BoardID) -> Sequence[Category]:
"""Return all categories for that board, ordered by position."""
categories = DbCategory.query \
.for_board(board_id) \
.order_by(DbCategory.position) \
.all()
return [_db_entity_to_category(category) for category in categories]
def get_categories_excluding(
board_id: BoardID, category_id: CategoryID
) -> Sequence[Category]:
"""Return all categories for that board except for the specified one."""
categories = DbCategory.query \
.for_board(board_id) \
.filter(DbCategory.id != category_id) \
.order_by(DbCategory.position) \
.all()
return [_db_entity_to_category(category) for category in categories]
def get_categories_with_last_updates(
board_id: BoardID
) -> Sequence[CategoryWithLastUpdate]:
"""Return the categories for that board.
Include the creator of the last posting in each category.
"""
categories_with_last_update = DbCategory.query \
.for_board(board_id) \
.filter_by(hidden=False) \
.options(
db.joinedload(DbCategory.last_posting_updated_by),
) \
.all()
return [
_db_entity_to_category_with_last_update(category)
for category in categories_with_last_update
]
def _db_entity_to_category(category: DbCategory) -> Category:
return Category(
category.id,
category.board_id,
category.position,
category.slug,
category.title,
category.description,
category.topic_count,
category.posting_count,
category.hidden,
)
def _db_entity_to_category_with_last_update(
category: DbCategory
) -> CategoryWithLastUpdate:
return CategoryWithLastUpdate(
category.id,
category.board_id,
category.position,
category.slug,
category.title,
category.description,
category.topic_count,
category.posting_count,
category.hidden,
category.last_posting_updated_at,
category.last_posting_updated_by,
)
| m-ober/byceps | byceps/services/board/category_query_service.py | Python | bsd-3-clause | 3,382 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import xml.dom.minidom
from devil.utils import cmd_helper
from pylib.constants import host_paths
_FINDBUGS_HOME = os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party',
'findbugs')
_FINDBUGS_JAR = os.path.join(_FINDBUGS_HOME, 'lib', 'findbugs.jar')
_FINDBUGS_MAX_HEAP = 768
_FINDBUGS_PLUGIN_PATH = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'tools', 'android', 'findbugs_plugin', 'lib',
'chromiumPlugin.jar')
def _ParseXmlResults(results_doc):
errors = set()
warnings = set()
for en in (n for n in results_doc.documentElement.childNodes
if n.nodeType == xml.dom.Node.ELEMENT_NODE):
if en.tagName == 'Errors':
errors.update(_ParseErrors(en))
if en.tagName == 'BugInstance':
warnings.add(_ParseBugInstance(en))
return errors, warnings
def _GetMessage(node):
for c in (n for n in node.childNodes
if n.nodeType == xml.dom.Node.ELEMENT_NODE):
if c.tagName == 'Message':
if (len(c.childNodes) == 1
and c.childNodes[0].nodeType == xml.dom.Node.TEXT_NODE):
return c.childNodes[0].data
return None
def _GetTextContent(node):
if (len(node.childNodes) == 1
and node.childNodes[0].nodeType == xml.dom.Node.TEXT_NODE):
return node.childNodes[0].data
return ''
def _ParseErrors(node):
errors = set()
for error_node in (n for n in node.childNodes
if n.nodeType == xml.dom.Node.ELEMENT_NODE):
error_text = '(unable to determine error text)'
if error_node.tagName == 'Error':
error_message_nodes = (
n for n in error_node.childNodes
if (n.nodeType == xml.dom.Node.ELEMENT_NODE
and n.tagName == 'ErrorMessage'))
text_pieces = [_GetTextContent(n) for n in error_message_nodes]
if text_pieces:
error_text = ', '.join(t for t in text_pieces if t)
errors.add(FindBugsError(error_node.tagName, error_text))
return errors
def _ParseBugInstance(node):
bug = FindBugsWarning(node.getAttribute('type'))
msg_parts = []
for c in (n for n in node.childNodes
if n.nodeType == xml.dom.Node.ELEMENT_NODE):
if c.tagName == 'Class':
msg_parts.append(_GetMessage(c))
elif c.tagName == 'Method':
msg_parts.append(_GetMessage(c))
elif c.tagName == 'Field':
msg_parts.append(_GetMessage(c))
elif c.tagName == 'SourceLine':
bug.file_name = c.getAttribute('sourcefile')
if c.hasAttribute('start'):
bug.start_line = int(c.getAttribute('start'))
if c.hasAttribute('end'):
bug.end_line = int(c.getAttribute('end'))
msg_parts.append(_GetMessage(c))
elif c.tagName == 'ShortMessage':
msg_parts.append(_GetTextContent(c))
bug.message = tuple(m for m in msg_parts if m)
return bug
class FindBugsError(object):
def __init__(self, error_type='', error_val=''):
self.error_type = error_type
self.error_val = error_val
def __cmp__(self, other):
return (cmp(self.error_type, other.error_type)
or cmp(self.error_val, other.error_val))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __hash__(self):
return hash((self.error_type, self.error_val))
def __ne__(self, other):
return not self == other
def __str__(self):
return '%s: %s' % (self.error_type, self.error_val)
class FindBugsWarning(object):
def __init__(self, bug_type='', end_line=0, file_name='', message=None,
start_line=0):
self.bug_type = bug_type
self.end_line = end_line
self.file_name = file_name
if message is None:
self.message = tuple()
else:
self.message = message
self.start_line = start_line
def __cmp__(self, other):
return (cmp(self.file_name, other.file_name)
or cmp(self.start_line, other.start_line)
or cmp(self.end_line, other.end_line)
or cmp(self.bug_type, other.bug_type)
or cmp(self.message, other.message))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __hash__(self):
return hash((self.bug_type, self.end_line, self.file_name, self.message,
self.start_line))
def __ne__(self, other):
return not self == other
def __str__(self):
return '%s: %s' % (self.bug_type, '\n '.join(self.message))
def Run(exclude, classes_to_analyze, system_classes, auxiliary_classes,
output_file, findbug_args, jars):
"""Run FindBugs.
Args:
exclude: the exclude xml file, refer to FindBugs's -exclude command option.
classes_to_analyze: the list of classes need to analyze, refer to FindBugs'
-onlyAnalyze command line option.
system_classes: system classes to help analysis.
auxiliary_classes: other classes to help analysis. Refer to FindBugs'
-auxclasspath command line option.
output_file: An optional path to dump XML results to.
findbug_args: A list of additional command line options to pass to Findbugs.
"""
# TODO(jbudorick): Get this from the build system.
all_aux_classes = []
all_aux_classes.extend(system_classes or [])
all_aux_classes.extend(
os.path.abspath(classes)
for classes in auxiliary_classes or [])
cmd = ['java',
'-classpath', '%s:' % _FINDBUGS_JAR,
'-Xmx%dm' % _FINDBUGS_MAX_HEAP,
'-Dfindbugs.home="%s"' % _FINDBUGS_HOME,
'-jar', _FINDBUGS_JAR,
'-textui', '-sortByClass',
'-pluginList', _FINDBUGS_PLUGIN_PATH, '-xml:withMessages']
if system_classes:
cmd.extend(['-auxclasspath', ':'.join(all_aux_classes)])
if classes_to_analyze:
cmd.extend(['-onlyAnalyze', classes_to_analyze])
if exclude:
cmd.extend(['-exclude', os.path.abspath(exclude)])
if output_file:
cmd.extend(['-output', output_file])
if findbug_args:
cmd.extend(findbug_args)
cmd.extend(os.path.abspath(j) for j in jars or [])
if output_file:
_, _, stderr = cmd_helper.GetCmdStatusOutputAndError(cmd)
results_doc = xml.dom.minidom.parse(output_file)
else:
_, raw_out, stderr = cmd_helper.GetCmdStatusOutputAndError(cmd)
results_doc = xml.dom.minidom.parseString(raw_out)
for line in stderr.splitlines():
logging.debug(' %s', line)
current_errors_set, current_warnings_set = _ParseXmlResults(results_doc)
return (' '.join(cmd), current_errors_set, current_warnings_set)
| chrisdickinson/nojs | build/android/pylib/utils/findbugs.py | Python | bsd-3-clause | 6,601 |
import logging.config
# Set up a verbose debugger level for tracing
TRACE_LEVEL_NUM = 5
logging.addLevelName(TRACE_LEVEL_NUM, "TRACE")
def trace(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
if self.isEnabledFor(TRACE_LEVEL_NUM):
self._log(TRACE_LEVEL_NUM, message, args, **kws)
logging.Logger.trace = trace
def configure_logging(filename=None):
time_format = '[%(asctime)s] :: %(name)s - %(levelname)s - %(message)s'
simple_format = '%(name)s - %(message)s'
logging_config = {
'version': 1,
'formatters': {
'time': {'format': time_format},
'simple': {'format': simple_format},
},
'handlers': {
# This is what gets printed out to the console
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': 'DEBUG',
},
},
'loggers': {
'__main__': {'level': 'DEBUG'},
'neurogen.calibration': {'level': 'ERROR'},
'cochlear.calibration': {'level': 'ERROR'},
'experiment': {'level': 'ERROR'},
'cochlear': {'level': 'DEBUG'},
'cochlear.dpoae_experiment': {'level': 'DEBUG'},
'cochlear.nidaqmx': {'level': 'DEBUG'},
},
'root': {
'handlers': ['console'],
},
}
if filename is not None:
logging_config['handlers']['file'] = {
'class': 'logging.FileHandler',
'formatter': 'time',
'filename': filename,
'level': 'DEBUG',
}
logging_config['root']['handlers'].append('file')
logging.config.dictConfig(logging_config)
| bburan/cochlear | cochlear/__init__.py | Python | bsd-3-clause | 1,762 |
from django.conf.urls import patterns, url
from . views import SubscribersIntersection, FetchGroupView, FetchGroupMembersMonitorView, GetIntersectionsView
urlpatterns = patterns('',
url(r'^$', SubscribersIntersection.as_view(), name="subscribers_intersection"),
url(r'^fetch_group/$', FetchGroupView.as_view(), name="fetch_group"),
url(r'^fetch_group_members_monitor/(?P<social>\w+)/(?P<group_id>\d+)/$', FetchGroupMembersMonitorView.as_view(), name="fetch_group_members_monitor"), # noqa
url(r'^get_intersections/(?P<social>\w+)/(?P<group_id1>\d+)/(?P<group_id2>\d+)/$', GetIntersectionsView.as_view(), name="get_intersections"), # noqa
)
| Andertaker/django-intersections | intersections/urls.py | Python | bsd-3-clause | 659 |
# -*- coding: utf-8 -*-
"""
gitdict.author
"""
from .exceptions import NoGlobalAuthor
from time import altzone, daylight, timezone
from time import time as curtime
from pygit2 import Signature
import subprocess
def get_default_author():
"""Find the default author on this system.
:returns:
The default author as defined from git config --global user.name
and git config --global user.email.
:rtype: :class:`DictAuthor <DictAuthor>`
:raises: :class:`NoGlobalAuthor <NoGlobalAuthor>`
"""
# TODO libgit2 provides an interface for this, but pygit2 does not. Should
# patch pygit2 to provide it. In the interim, we must use subprocess.
try:
git_args = ['git', 'config', '--global']
name = subprocess.check_output(git_args + ['user.name']).rstrip()
email = subprocess.check_output(git_args + ['user.email']).rstrip()
return DictAuthor(name, email)
except subprocess.CalledProcessError: # thankfully, error code is returned
raise NoGlobalAuthor()
class DictAuthor(object):
"""Wrapper for a name and email to use when committing to git.
:param name: The name to use when committing
:type name: string
:param email: The email to use when committing
:type email: string
"""
def __init__(self, name, email):
self._name = name
self._email = email
@property
def name(self):
"""The author's name.
"""
return self._name
@property
def email(self):
"""The author's email.
"""
return self._email
def signature(self, time=None, offset=None):
"""Generate a pygit2.Signature.
:param time:
(optional) the time for the signature, in UTC seconds. Defaults to
current time.
:type time: int
:param offset:
(optional) the time offset for the signature, in minutes.
Defaults to the system offset.
:type offset: int
:returns: a signature
:rtype: pygit2.Signature
"""
offset = offset or (altzone / 60 if daylight else timezone / 60)
time = time or int(curtime())
return Signature(self.name, self.email, time, offset)
| talos/gitdict | gitdict/author.py | Python | bsd-3-clause | 2,243 |
from __future__ import absolute_import
import six
import copy
import logging
from django.conf import settings
from sentry.similarity.features import FeatureSet
from sentry.similarity.encoder import Encoder
logger = logging.getLogger(__name__)
# We need this list of known labels generated from all grouping components to
# be able to compare events with different contributing components.
#
# This is currently just mushing all component names from all strategy
# configurations together, but in theory we could split this up into a mapping
# of (config_name) -> (set of labels). Even better would be to be able to
# statically determine this for any given strategy configuration without this
# list (requires refactor of grouping)
#
# (<component ID>, <shingle label>) -> <redis prefix>
_KNOWN_COMPONENT_LABEL_SUFFIXES = {
("message", "character-5-shingle"): "a",
("symbol", "ident-shingle"): "b",
("context-line", "ident-shingle"): "c",
("frame", "frame-ident"): "d",
("filename", "ident-shingle"): "e",
("module", "ident-shingle"): "f",
("function", "ident-shingle"): "g",
("lineno", "ident-shingle"): "h",
("stacktrace", "frames-pairs"): "i",
("type", "ident-shingle"): "j",
("value", "character-5-shingle"): "k",
("fingerprint", "ident-shingle"): "l",
("stacktrace", "frames-ident"): "m",
}
assert len(set(_KNOWN_COMPONENT_LABEL_SUFFIXES.values())) == len(_KNOWN_COMPONENT_LABEL_SUFFIXES)
class GroupingBasedFeatureSet(FeatureSet):
def __init__(self, index, configurations=None):
self.index = index
if configurations is None:
configurations = settings.SENTRY_SIMILARITY_GROUPING_CONFIGURATIONS_TO_INDEX
self.configurations = configurations
# This is intentionally non-configurable and only exists because we
# subclass from FeatureSet. TODO: Remove FeatureSet subclassing!
# eg: Replace with ABC hierarchy or kill old similarity.
self.encoder = Encoder()
self.expected_extraction_errors = ()
self.expected_encoding_errors = ()
self.features = {
(config_id, component_id, shingle_label): None
for config_id in self.configurations
for component_id, shingle_label in _KNOWN_COMPONENT_LABEL_SUFFIXES
}
self.aliases = {
(config_id, component_id, shingle_label): "{}:{}".format(
self.configurations[config_id],
_KNOWN_COMPONENT_LABEL_SUFFIXES[component_id, shingle_label],
)
for config_id, component_id, shingle_label in self.features
}
def extract(self, event):
results = {}
# backup data to work around mutations in get_grouping_variants
data_bak = copy.deepcopy(event._data)
for configuration in self.configurations:
variants = event.get_grouping_variants(
force_config=configuration, normalize_stacktraces=True,
)
event._data = data_bak
for variant in variants.values():
for (component_id, shingle_label), features in variant.encode_for_similarity():
label = (configuration, component_id, shingle_label)
assert label in self.features
results.setdefault(label, set()).update(features)
return {label: sorted(features) for label, features in six.iteritems(results)}
| beeftornado/sentry | src/sentry/similarity/featuresv2.py | Python | bsd-3-clause | 3,446 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import subprocess
from telemetry.core.platform import desktop_platform_backend
from telemetry.core.platform import proc_util
class PosixPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
# This is an abstract class. It is OK to have abstract methods.
# pylint: disable=W0223
def _RunCommand(self, args):
return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
def _GetFileContents(self, path):
with open(path, 'r') as f:
return f.read()
def _GetPsOutput(self, columns, pid=None):
"""Returns output of the 'ps' command as a list of lines.
Subclass should override this function.
Args:
columns: A list of require columns, e.g., ['pid', 'pss'].
pid: If nont None, returns only the information of the process
with the pid.
"""
args = ['ps']
args.extend(['-p', str(pid)] if pid != None else ['-e'])
for c in columns:
args.extend(['-o', c + '='])
return self._RunCommand(args).splitlines()
def GetChildPids(self, pid):
"""Returns a list of child pids of |pid|."""
ps_output = self._GetPsOutput(['pid', 'ppid', 'state'])
processes = []
for pid_ppid_state in ps_output:
processes.append(pid_ppid_state.split())
return proc_util.GetChildPids(processes, pid)
def GetCommandLine(self, pid):
command = self._GetPsOutput(['command'], pid)
return command[0] if command else None
def GetFlushUtilityName(self):
return 'clear_system_cache'
| mogoweb/chromium-crosswalk | tools/telemetry/telemetry/core/platform/posix_platform_backend.py | Python | bsd-3-clause | 1,655 |