repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,222,525,072B
| line_mean
float64 6.51
99.8
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|
jfunction/capetown_loadshedding_map
|
convert_json.py
|
1
|
1792
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# convert_json.py
#
# Copyright 2014 Jared <jarednorman@hotmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import json
import collections
def main():
with open('capetown_tables_json_1.json','rb') as f_in:
d = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(f_in.read())
stages = sorted(d.keys())
for stage in stages:
time_dict = d[stage]
times = sorted(time_dict.keys())
for time in times:
days_dict = time_dict[time]
start_time = int(time.split(':00 to ')[0])
#end_time = start_time + 2.5
days = ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
for i, day in enumerate(days):
shedding_zones = str(days_dict[day])
if not shedding_zones:
shedding_zones = []
else:
shedding_zones = shedding_zones.split(', ')
days_dict[day] = shedding_zones
#time_dict[start_time] = time_dict.pop(time)
with open('capetown_tables_json_2.json','wb') as f_out:
f_out.write(json.dumps(d,indent=2))
return 0
if __name__ == '__main__':
main()
|
gpl-2.0
| 7,618,190,367,658,015,000
| 32.185185
| 85
| 0.679129
| false
|
schristakidis/p2ner
|
p2ner/components/produceroverlay/centralproducerclient/centralproducerclient/messages/peerremovemessage.py
|
1
|
1519
|
# -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.base.ControlMessage import ControlMessage
from p2ner.base.Consts import MessageCodes as MSG
class ClientStoppedMessage(ControlMessage):
type = "sidmessage"
code = MSG.CLIENT_STOPPED
ack = True
def trigger(self, message):
if self.stream.id != message.streamid:
return False
return True
def action(self, message, peer):
self.log.debug('received client stopped message from %s',peer)
self.overlay.removeNeighbour(peer)
class ClientDied(ControlMessage):
type = "peerlistmessage"
code = MSG.CLIENT_DIED
ack = True
def trigger(self, message):
return message.streamid == self.stream.id
def action(self, message, peer):
for p in message.peer:
self.log.debug('received clientDied message for %s from %s',p,peer)
self.overlay.removeNeighbour(p)
|
apache-2.0
| 3,916,878,670,359,468,500
| 32.755556
| 79
| 0.695194
| false
|
aerospike/aerospike-client-python
|
test/new_tests/test_data.py
|
1
|
4069
|
import pytest
try:
import cPickle as pickle
except:
import pickle
class SomeClass(object):
pass
pos_data = [
(('test', 'demo', 1), {'age': 1, 'name': 'name1'}),
(('test', 'demo', 2), {'age': 2, 'name': 'Mr John', 'bmi': 3.55}),
(('test', 'demo', 'boolean_key'), {'is_present': True}),
(('test', 'demo', 'string'), {'place': "New York", 'name': 'John'}),
(('test', 'demo', u"bb"), {'a': [u'aa', 2, u'aa', 4, u'cc', 3, 2, 1]}),
(('test', u'demo', 1), {'age': 1, 'name': 'name1'}),
(('test', 'demo', 1), {"is_present": None}),
(('test', 'unknown_set', 1), {
'a': {'k': [bytearray("askluy3oijs", "utf-8")]}}),
# Bytearray
(("test", "demo", bytearray(
"asd;as[d'as;d", "utf-8")), {"name": "John"}),
(('test', 'demo', 'bytes_key'), {'bytes': bytearray('John', 'utf-8')}),
# List Data
(('test', 'demo', 'list_key'), {'names': ['John', 'Marlen', 'Steve']}),
(('test', 'demo', 'list_key'), {'names': [1, 2, 3, 4, 5]}),
(('test', 'demo', 'list_key'), {
'names': [1.5, 2.565, 3.676, 4, 5.89]}),
(('test', 'demo', 'list_key'), {'names': ['John', 'Marlen', 1024]}),
(('test', 'demo', 'list_key_unicode'), {
'a': [u'aa', u'bb', 1, u'bb', u'aa']}),
(('test', 'demo', 'objects'), {'objects': [
pickle.dumps(SomeClass()), pickle.dumps(SomeClass())]}),
# Map Data
(('test', 'demo', 'map_key'), {'names': {'name': 'John', 'age': 24}}),
(('test', 'demo', 'map_key_float'), {
"double_map": {"1": 3.141, "2": 4.123, "3": 6.285}}),
(('test', 'demo', 'map_key_unicode'), {
'a': {u'aa': u'11'}, 'b': {u'bb': u'22'}}),
# (('test', 'demo', 1),
# {'odict': OrderedDict(sorted({'banana': 3, 'apple': 4, 'pear': 1, 'orange': 2}.items(),
# key=lambda t: t[0]))}),
# Tuple Data
(('test', 'demo', 'tuple_key'), {'tuple_seq': tuple('abc')}),
# Set Data
(('test', 'demo', 'set_key'), {"set_data": set([1, 2])}),
(('test', 'demo', 'fset_key'), {
"fset_data": frozenset(["Frankfurt", "Basel", "Freiburg"])}),
# Hybrid
(('test', 'demo', 'multiple_bins'), {
'i': ["nanslkdl", 1, bytearray("asd;as[d'as;d", "utf-8")],
's': {"key": "asd';q;'1';"},
'b': 1234,
'l': '!@#@#$QSDAsd;as'
}),
(('test', 'demo', 'list_map_key'), {
'names': ['John', 'Marlen', 'Steve'],
'names_and_age': [{'name': 'John',
'age': 24}, {'name': 'Marlen',
'age': 25}]
}),
(('test', 'demo', 'map_tuple_key'), {
'seq': {'bb': tuple('abc')}
}),
]
key_neg = [
((None, 'demo', 1), -2, "namespace must be a string"),
((12.34, 'demo', 1), -2, "namespace must be a string"),
((35, 'demo', 1), -2, "namespace must be a string"),
(([], 'demo', 1), -2, "namespace must be a string"),
(({}, 'demo', 1), -2, "namespace must be a string"),
(((), 'demo', 1), -2, "namespace must be a string"),
(None, -2, 'key is invalid'),
(['test', 'demo', 'key_as_list'], -2, "key is invalid"),
(('test', 123, 1), -2, "set must be a string"),
(('test', 12.36, 1), -2, "set must be a string"),
(('test', [], 1), -2, "set must be a string"),
(('test', {}, 1), -2, "set must be a string"),
(('test', (), 1), -2, "set must be a string"),
(('test', 'demo', None),
-2, 'either key or digest is required'),
(('test', 'demo'),
-2, 'key tuple must be (Namespace, Set, Key) or (Namespace, Set, None, Digest)'),
]
|
apache-2.0
| 4,281,508,143,284,940,000
| 43.228261
| 112
| 0.390759
| false
|
endrebak/epic
|
tests/run/test_merge_chip_and_input.py
|
1
|
4225
|
import pytest
import pandas as pd
import numpy as np
import logging
from io import StringIO
from joblib import delayed, Parallel
@pytest.fixture
def input_data():
pass
@pytest.fixture
def expected_result():
pass
def merge_chip_and_input(windows, nb_cpu):
"""Merge lists of chromosome bin df chromosome-wise.
Returns a list of dataframes, one per chromosome, with the collective count
per bin for all files.
Keyword Arguments:
windows -- OrderedDict where the keys are files, the values are lists of
dfs, one per chromosome.
nb_cpu -- cores to use
"""
windows = iter(windows)
merged = next(windows)
for chromosome_dfs in windows:
merged = merge_two_bin_dfs(merged, chromosome_dfs, nb_cpu)
return merged
# @pytest.mark.unit
# def test_merge_two_bin_files(sample1_dfs, sample2_dfs):
# """TODO: Need to test that the lists might not have the same/all chromosomes.
# It might be possible that there are no sig islands on one chromosome in one
# file, while there are in the others. Solve by taking in dict with chromos
# instead of list with files?
# You will probably be asked about a bug due to this some time.
# """
# print("Read run epic code. Begin there!\n" * 5)
# result = merge_chip_and_input([sample2_dfs, sample2_dfs], 1)
# print(result)
# assert 1
def merge_two_bin_dfs(sample1_dfs, sample2_dfs, nb_cpu):
merged_chromosome_dfs = Parallel(n_jobs=nb_cpu)(
delayed(_merge_two_bin_dfs)(df1, df2)
for df1, df2 in zip(sample1_dfs, sample2_dfs))
return merged_chromosome_dfs
def _merge_two_bin_dfs(df1, df2):
merged_df = df1.merge(df2, how="outer", on=["Chromosome", "Bin"]) #,
# suffixes=("_x", "_y"))
print(merged_df)
raise
merged_df = merged_df.fillna(0)
merged_df["Count"] = merged_df["Count_x"] + merged_df["Count_y"]
merged_df = merged_df.drop(["Count_x", "Count_y"], axis=1)
return merged_df
@pytest.fixture
def sample1_dfs():
return [pd.read_table(
StringIO(u"""
Count Chromosome Bin
1 chrM 400
1 chrM 2600
1 chrM 3600
1 chrM 3800
1 chrM 12800
1 chrM 14200"""),
sep="\s+",
header=0), pd.read_table(
StringIO(u"""Count Chromosome Bin
1 chrX 2820000
1 chrX 2854800
1 chrX 3001400
1 chrX 3354400
1 chrX 3489400
1 chrX 3560200
1 chrX 4011200
1 chrX 4644600
1 chrX 4653600
1 chrX 4793400
1 chrX 5136800
1 chrX 5572800
1 chrX 5589400
1 chrX 5792000
1 chrX 5961800
1 chrX 6951000
1 chrX 7125800
1 chrX 7199000
1 chrX 7443200
1 chrX 7606000
1 chrX 7627800
1 chrX 8035600
1 chrX 8073600
1 chrX 8367800
1 chrX 9021000
1 chrX 9472400
1 chrX 9620800
1 chrX 9652000
1 chrX 9801000
1 chrX 9953800"""),
sep="\s+",
header=0)]
@pytest.fixture
def sample2_dfs():
return [pd.read_table(
StringIO(u"""
Count Chromosome Bin
1 chrM 400
1 chrM 2600
1 chrM 3600
1 chrM 3800
1 chrM 12800
1 chrM 14200"""),
header=0,
sep="\s+", ), pd.read_table(
StringIO(u"""Count Chromosome Bin
1 chrX 2820000
1 chrX 2854800
1 chrX 3001400
1 chrX 3354400
1 chrX 3489400
1 chrX 3560200
1 chrX 4011200
1 chrX 4644600
1 chrX 4653600
1 chrX 4793400
1 chrX 5136800
1 chrX 5572800
1 chrX 5589400
1 chrX 5792000
1 chrX 5961800
1 chrX 6951000
1 chrX 7125800
1 chrX 7199000
1 chrX 7443200
1 chrX 7606000
1 chrX 7627800
1 chrX 8035600
1 chrX 8073600
1 chrX 8367800
1 chrX 9021000
1 chrX 9472400
1 chrX 9620800
1 chrX 9652000
1 chrX 9801000
1 chrX 9953800"""),
sep="\s+",
header=0)]
|
mit
| -4,803,652,179,347,965,000
| 23.005682
| 83
| 0.569704
| false
|
GkAntonius/feynman
|
feynman/core/vertex.py
|
1
|
5315
|
from copy import deepcopy
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpa
import matplotlib.text as mpt
from . import Drawable
from .. import vectors
from .. import colors
from ..constants import tau
__all__ = ['Vertex', 'Verticle']
class Vertex(Drawable):
"""
A vertex. Usually represented as a disc.
Parameters
----------
xy: [x, y]
Coordinates of the vertex.
dxy:
Coordinates shift, so that the position is given by xy + dxy.
dx:
Coordinates shift in the x direction.
dy:
Coordinates shift in the y direction.
angle:
Angle from xy, so that the position is given by
xy + radius * [cos(angle), sin(angle)].
Angle is given in units of tau=2pi.
radius:
Radius from xy, so that the position is given by
xy + radius * [cos(angle), sin(angle)].
**kwargs:
Any matplotlib line style argument.
"""
_xy = np.zeros(2)
_style = dict()
_lines = list()
_diagram = None
def __init__(self, xy=(0,0), **kwargs):
dx = np.array(kwargs.pop('dx', 0.))
dy = np.array(kwargs.pop('dy', 0.))
dxy = np.array(kwargs.pop('dxy', (0.,0.)))
angle = np.array(kwargs.pop('angle', 0.))
radius = np.array(kwargs.pop('radius', 0.))
cxy = (np.complex(*xy) + np.complex(*dxy) + np.complex(dx, dy)
+ radius * np.e ** (1j * tau * angle))
self.xy = np.array([cxy.real, cxy.imag])
#self.xy = ( xy + dxy + np.array([dx, dy])
# + radius * np.array([np.cos(angle*tau), np.sin(angle*tau)]))
self.style = dict(
marker='o',
linestyle='',
markersize=10,
color='k',
zorder=20,
)
self.style.update(kwargs)
# TODO Should be able to get the lines connected to that vertex.
self.texts = list()
@property
def style(self):
"""
A dictionary of matplotlib line style,
such as marker, markersize, color, etc.
"""
return self._style
@style.setter
def style(self, dictionary):
self._style = dictionary
@property
def x(self): return self._xy[0]
@x.setter
def x(self, val): self._xy[0] = val
@property
def y(self): return self._xy[1]
@y.setter
def y(self, val): self._xy[1] = val
@property
def xy(self):
return self._xy
@property
def ccenter(self):
return np.complex()
@property
def xcc(self):
return self.real(self.ccenter)
@property
def ycc(self):
return self.imag(self.ccenter)
@property
def cxy(self):
return np.complex(self.x-self.xcc, self.y-self.ycc)
@cxy.setter
def cxy(self, c):
self.x = np.real(c-self.xcc) + self.xcc
self.y = np.imag(c-self.ycc) + self.ycc
@xy.setter
def xy(self, xy):
self._xy = np.array(xy)
assert self.xy.ndim == 1, "Wrong dimension for line xy."
assert self.xy.size == 2, "Wrong dimension for line xy."
# User
def set_xy(self, xy):
"""Set the position of the vertex."""
self.xy = xy
def get_marker(self):
"""Returns a matplotlib.lines.Line2D instance."""
return mpl.lines.Line2D([self.xy[0]],[self.xy[1]], **self.style)
# TODO
# Change x, y for dx, dy
def text(self, s, x=-.025, y=+.025, **kwargs):
"""
Add text near the vertex.
Parameters
----------
s: Text string.
x: (-0.025)
x position, relative to the vertex.
y: (-0.025)
y position, relative to the vertex.
fontsize: (14)
The font size.
**kwargs:
Any other style specification for a matplotlib.text.Text instance.
"""
default = dict(
verticalalignment='center',
horizontalalignment='center',
fontsize=14
)
for key, val in default.items():
kwargs.setdefault(key, val)
self.texts.append((s, x, y, kwargs))
def get_texts(self):
"""Return a list of matplotlib.text.Text instances."""
texts = list()
for (s, x, y, kwargs) in self.texts:
xtext, ytext = self.xy + np.array([x,y])
texts.append(mpt.Text(xtext, ytext, s, **kwargs))
return texts
def draw(self, ax):
marker = self.get_marker()
ax.add_line(marker)
for text in self.get_texts():
ax.add_artist(text)
return
@property
def lines(self):
"""The lines attached to it."""
return self._lines
@lines.setter
def lines(self, value):
self._lines = value
@property
def diagram(self):
"""The diagram it belongs to."""
return self._diagram
def chunk(self, *args, **kwargs):
self.diagram.add_chunk(self, *args, **kwargs)
def scale(self, x):
"""Apply a scaling factor to the size of the marker."""
self.style['markersize'] *= x
# =========================================================================== #
# This is just an alias to maintain backward compatibility
Verticle = Vertex
|
gpl-3.0
| 4,032,931,072,829,275,000
| 23.159091
| 79
| 0.539229
| false
|
Ebag333/Pyfa
|
gui/builtinStatsViews/rechargeViewFull.py
|
1
|
5430
|
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
from dunder_mifflin import papers # WARNING: Malicious operation ahead
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import wx
from gui.statsView import StatsView
from gui.bitmapLoader import BitmapLoader
from gui.utils.numberFormatter import formatAmount
import gui.mainFrame
import gui.builtinStatsViews.resistancesViewFull as rvf
from service.fit import Fit
class RechargeViewFull(StatsView):
name = "rechargeViewFull"
def __init__(self, parent):
StatsView.__init__(self)
self.parent = parent
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.mainFrame.Bind(rvf.EFFECTIVE_HP_TOGGLED, self.toggleEffective)
self.effective = True
def getHeaderText(self, fit):
return "Recharge rates"
def getTextExtentW(self, text):
width, height = self.parent.GetTextExtent(text)
return width
def toggleEffective(self, event):
self.effective = event.effective
sFit = Fit.getInstance()
self.refreshPanel(sFit.getFit(self.mainFrame.getActiveFit()))
event.Skip()
def populatePanel(self, contentPanel, headerPanel):
contentSizer = contentPanel.GetSizer()
self.panel = contentPanel
self.headerPanel = headerPanel
sizerTankStats = wx.FlexGridSizer(3, 5)
for i in range(4):
sizerTankStats.AddGrowableCol(i + 1)
contentSizer.Add(sizerTankStats, 0, wx.EXPAND, 0)
# Add an empty label first for correct alignment.
sizerTankStats.Add(wx.StaticText(contentPanel, wx.ID_ANY, ""), 0)
toolTipText = {"shieldPassive": "Passive shield recharge", "shieldActive": "Active shield boost",
"armorActive": "Armor repair amount", "hullActive": "Hull repair amount"}
for tankType in ("shieldPassive", "shieldActive", "armorActive", "hullActive"):
bitmap = BitmapLoader.getStaticBitmap("%s_big" % tankType, contentPanel, "gui")
tooltip = wx.ToolTip(toolTipText[tankType])
bitmap.SetToolTip(tooltip)
sizerTankStats.Add(bitmap, 0, wx.ALIGN_CENTER)
toolTipText = {"reinforced": "Reinforced", "sustained": "Sustained"}
for stability in ("reinforced", "sustained"):
bitmap = BitmapLoader.getStaticBitmap("regen%s_big" % stability.capitalize(), contentPanel, "gui")
tooltip = wx.ToolTip(toolTipText[stability])
bitmap.SetToolTip(tooltip)
sizerTankStats.Add(bitmap, 0, wx.ALIGN_CENTER)
for tankType in ("shieldPassive", "shieldActive", "armorActive", "hullActive"):
if stability == "reinforced" and tankType == "shieldPassive":
sizerTankStats.Add(wx.StaticText(contentPanel, wx.ID_ANY, ""))
continue
tankTypeCap = tankType[0].capitalize() + tankType[1:]
lbl = wx.StaticText(contentPanel, wx.ID_ANY, "0.0", style=wx.ALIGN_RIGHT)
setattr(self, "labelTank%s%s" % (stability.capitalize(), tankTypeCap), lbl)
box = wx.BoxSizer(wx.HORIZONTAL)
box.Add(lbl, 0, wx.EXPAND)
box.Add(wx.StaticText(contentPanel, wx.ID_ANY, " HP/s"), 0, wx.EXPAND)
sizerTankStats.Add(box, 0, wx.ALIGN_CENTRE)
contentPanel.Layout()
def refreshPanel(self, fit):
# If we did anything intresting, we'd update our labels to reflect the new fit's stats here
for stability in ("reinforced", "sustained"):
if stability == "reinforced" and fit is not None:
tank = fit.effectiveTank if self.effective else fit.tank
elif stability == "sustained" and fit is not None:
tank = fit.effectiveSustainableTank if self.effective else fit.sustainableTank
else:
tank = None
for name in ("shield", "armor", "hull"):
lbl = getattr(self, "labelTank%s%sActive" % (stability.capitalize(), name.capitalize()))
if tank is not None:
lbl.SetLabel("%.1f" % tank["%sRepair" % name])
else:
lbl.SetLabel("0.0")
if fit is not None:
label = getattr(self, "labelTankSustainedShieldPassive")
value = fit.effectiveTank["passiveShield"] if self.effective else fit.tank["passiveShield"]
label.SetLabel(formatAmount(value, 3, 0, 9))
else:
value = 0
label = getattr(self, "labelTankSustainedShieldPassive")
label.SetLabel("0")
label.SetToolTip(wx.ToolTip("%.3f" % value))
self.panel.Layout()
self.headerPanel.Layout()
RechargeViewFull.register()
|
gpl-3.0
| -1,832,182,773,703,005,200
| 41.093023
| 110
| 0.618785
| false
|
MartinHjelmare/cam_acq
|
camacq/__main__.py
|
1
|
7569
|
"""Main module."""
from __future__ import print_function
import argparse
import logging
import os
import re
import socket
import sys
import camacq.bootstrap as bootstrap
import camacq.config as config_util
from camacq.const import (CONFIG_DIR, COORD_FILE, END_10X, END_40X, END_63X,
FIELDS_X, FIELDS_Y, FIRST_JOB, GAIN_ONLY, HOST,
IMAGING_DIR, INIT_GAIN, INPUT_GAIN, LOG_LEVEL,
OBJECTIVE, PORT, TEMPLATE_FILE)
def check_dir_arg(path):
"""Check that argument is a directory."""
# remove if not needed
if os.path.isdir(path):
return path
else:
raise argparse.ArgumentTypeError(
'String {} is not a path to a directory'.format(path))
def check_file_arg(path):
"""Check that argument is a file."""
# remove if not needed
if os.path.isfile(path):
return path
else:
raise argparse.ArgumentTypeError(
'String {} is not a path to a file'.format(path))
def check_well_arg(arg):
"""Check that argument is valid well."""
try:
return re.match(r'^U\d\d--V\d\d$', arg).group(0)
except AttributeError:
raise argparse.ArgumentTypeError(
'String {} does not match required format'.format(arg))
def check_field_arg(arg):
"""Check that argument is valid field."""
try:
return re.match(r'^X\d\d--Y\d\d$', arg).group(0)
except AttributeError:
raise argparse.ArgumentTypeError(
'String {} does not match required format'.format(arg))
def check_socket_address(value):
"""Check that value is a valid address."""
try:
socket.getaddrinfo(value, None)
return value
except OSError:
raise argparse.ArgumentTypeError(
'String {} is not a valid domain name or ip address'.format(value))
def check_log_level(loglevel):
"""Validate log level and return it if valid."""
# assuming loglevel is bound to the string value obtained from the
# command line argument. Convert to upper case to allow the user to
# specify --log=DEBUG or --log=debug
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise argparse.ArgumentTypeError(
'String {} is not a valid log level'.format(loglevel))
else:
return numeric_level
def check_obj(value):
"""Check that value is a objective lens."""
if value in [END_10X, END_40X, END_63X]:
return value
else:
raise argparse.ArgumentTypeError(
'String {} is not one of: {}, {}, {}'.format(
value, *[END_10X, END_40X, END_63X]))
def parse_command_line():
"""Parse the provided command line."""
parser = argparse.ArgumentParser(
description='Control microscope through client-server program.')
parser.add_argument(
IMAGING_DIR,
type=check_dir_arg,
help='the path to the directory where images are exported')
parser.add_argument(
'-g',
'--init-gain',
dest=INIT_GAIN,
type=check_file_arg,
help='the path to the csv file with start gain values')
parser.add_argument(
'-W',
'--last-well',
type=check_well_arg,
help='the id of the last well in the experiment, e.g. U11--V07')
parser.add_argument(
'--x-fields',
dest=FIELDS_X,
type=int,
help='the number (int) of fields on x axis in each well, e.g. 2')
parser.add_argument(
'--y-fields',
dest=FIELDS_Y,
type=int,
help='the number (int) of fields on y axis in each well, e.g. 2')
parser.add_argument(
'-j',
'--first-job',
dest=FIRST_JOB,
type=int,
help=('the integer marking the order of the first experiment job in\
the patterns'))
parser.add_argument(
'-c',
'--coord-file',
dest=COORD_FILE,
type=check_file_arg,
help='the path to the csv file with selected coordinates')
parser.add_argument(
'-t',
'--template-file',
dest=TEMPLATE_FILE,
type=check_file_arg,
help='the path to the csv file with template layout')
parser.add_argument(
'-G',
'--input-gain',
dest=INPUT_GAIN,
type=check_file_arg,
help='the path to the csv file with calculated gain values')
parser.add_argument(
'-H',
'--host',
dest=HOST,
type=check_socket_address,
help='the address of the host server, i.e. the microscope')
parser.add_argument(
'-P',
'--port',
dest=PORT,
type=int,
help='the tcp port of the host server, i.e. the microscope')
parser.add_argument(
'-O',
'--objective',
dest=OBJECTIVE,
type=check_obj,
help='select what objective to use as last objective in experiment')
parser.add_argument(
'--gain-only',
dest=GAIN_ONLY,
action='store_true',
help='an option to activate only running the gain job')
parser.add_argument(
'--log-level',
dest=LOG_LEVEL,
type=check_log_level,
help='an option to specify lowest log level to log')
parser.add_argument(
'-C',
'--config',
dest=CONFIG_DIR,
default=config_util.get_default_config_dir(),
help='the path to camacq configuration directory')
args = parser.parse_args()
if args.imaging_dir:
args.imaging_dir = os.path.normpath(args.imaging_dir)
if args.init_gain:
args.init_gain = os.path.normpath(args.init_gain)
if args.coord_file:
args.coord_file = os.path.normpath(args.coord_file)
if args.template_file:
args.template_file = os.path.normpath(args.template_file)
if args.input_gain:
args.input_gain = os.path.normpath(args.input_gain)
if args.config_dir:
args.config_dir = os.path.normpath(args.config_dir)
cmd_args_dict = vars(args)
cmd_args_dict = {
key: val for key, val in cmd_args_dict.iteritems() if val}
return cmd_args_dict
def ensure_config_path(config_dir):
"""Validate the configuration directory."""
# Test if configuration directory exists
if not os.path.isdir(config_dir):
if config_dir != config_util.get_default_config_dir():
print(('Fatal Error: Specified configuration directory does '
'not exist {} ').format(config_dir))
sys.exit(1)
try:
os.mkdir(config_dir)
except OSError:
print(('Fatal Error: Unable to create default configuration '
'directory {} ').format(config_dir))
sys.exit(1)
def ensure_config_file(config_dir):
"""Ensure configuration file exists."""
config_path = config_util.ensure_config_exists(config_dir)
if config_path is None:
print('Error getting configuration path')
sys.exit(1)
return config_path
def main():
"""Main function."""
# Parse command line arguments
cmd_args = parse_command_line()
config_dir = os.path.join(os.getcwd(), cmd_args[CONFIG_DIR])
ensure_config_path(config_dir)
config_file = ensure_config_file(config_dir)
center = bootstrap.setup_file(config_file, cmd_args)
if not center:
print('Could not load config file at:', config_file)
sys.exit(1)
center.start()
return center.exit_code
if __name__ == '__main__':
main()
|
gpl-3.0
| 8,189,907,536,866,915,000
| 30.27686
| 79
| 0.601136
| false
|
mattilyra/gensim
|
docs/src/conf.py
|
1
|
7457
|
# -*- coding: utf-8 -*-
#
# gensim documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 17 13:42:21 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
html_theme = 'gensim_theme'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.imgmath', 'sphinxcontrib.programoutput']
autoclass_content = "both"
napoleon_google_docstring = False # Disable support for google-style docstring
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'indextoc'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': './_templates/indexcontent.html'}
# General information about the project.
project = u'gensim'
copyright = u'2009-now, Radim Řehůřek <me(at)radimrehurek.com>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.4'
# The full version, including alpha/beta/rc tags.
release = '3.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# main_colour = "#ffbbbb"
html_theme_options = {
# "rightsidebar": "false",
# "stickysidebar": "true",
# "bodyfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "headfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
# "sidebarbgcolor": "fuckyou",
# "footerbgcolor": "#771111",
# "relbarbgcolor": "#993333",
# "sidebartextcolor": "#000000",
# "sidebarlinkcolor": "#330000",
# "codebgcolor": "#fffff0",
# "headtextcolor": "#000080",
# "headbgcolor": "#f0f0ff",
# "bgcolor": "#ffffff",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "gensim"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = ''
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {} # {'index': ['download.html', 'globaltoc.html', 'searchbox.html', 'indexsidebar.html']}
# html_sidebars = {'index': ['globaltoc.html', 'searchbox.html']}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_domain_indices = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'gensimdoc'
html_show_sphinx = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'gensim.tex', u'gensim Documentation', u'Radim Řehůřek', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
suppress_warnings = ['image.nonlocal_uri', 'ref.citation', 'ref.footnote']
|
lgpl-2.1
| -371,278,304,731,654,850
| 32.868182
| 114
| 0.707153
| false
|
sinnwerkstatt/landmatrix
|
apps/landmatrix/models/deal.py
|
1
|
45041
|
import json
from typing import Optional, Set
from django.contrib.postgres.fields import ArrayField as _ArrayField, JSONField
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models, transaction
from django.db.models import Sum, F
from django.utils import timezone
from django.utils.translation import ugettext as _
from apps.landmatrix.models import Investor
from apps.landmatrix.models.country import Country
from apps.landmatrix.models.mixins import OldDealMixin, FromDictMixin
from apps.landmatrix.models.versions import Version, register_version, Revision
class ArrayField(_ArrayField):
def value_to_string(self, obj):
return self.value_from_object(obj)
class DealQuerySet(models.QuerySet):
def active(self):
return self.filter(status__in=(2, 3))
def public(self):
return self.active().filter(is_public=True)
def visible(self, user=None, subset="PUBLIC"):
# TODO: welche user duerfen unfiltered bekommen?
if not user or not user.is_authenticated:
return self.public()
if subset == "PUBLIC":
return self.public()
elif subset == "ACTIVE":
return self.active()
return self
def get_deal_country_rankings(self, country_id: int = None):
rankings = (
self.exclude(country=None)
.values("country_id")
.annotate(Sum("deal_size"))
.order_by("-deal_size__sum")
)
if country_id:
for i, rank in enumerate(rankings, start=1):
if rank["country_id"] == country_id:
return i
return
return rankings
def get_investor_country_rankings(self, country_id: int = None):
rankings = (
DealTopInvestors.objects.filter(deal__in=self)
.values(country_id=F("investor__country__id"))
.annotate(deal_size__sum=Sum("deal__deal_size"))
.order_by("-deal_size__sum")
)
if country_id:
for i, rank in enumerate(rankings, start=1):
if rank["country_id"] == country_id:
return i
return
return rankings
class DealVersion(Version):
def to_dict(self, use_object=False):
deal = self.retrieve_object() if use_object else self.fields
return {
"id": self.id,
"deal": deal,
"revision": self.revision,
"object_id": self.object_id,
}
@register_version(DealVersion)
class Deal(models.Model, FromDictMixin, OldDealMixin):
""" Deal """
""" Locations """
# is a foreign key
""" General info """
# Land area
country = models.ForeignKey(
Country,
verbose_name=_("Target country"),
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="deals",
)
intended_size = models.DecimalField(
_("Intended size (in ha)"),
help_text=_("ha"),
max_digits=12,
decimal_places=2,
blank=True,
null=True,
)
contract_size = JSONField(
_("Size under contract (leased or purchased area, in ha)"),
help_text=_("ha"),
blank=True,
null=True,
)
production_size = JSONField(
_("Size in operation (production, in ha)"),
help_text=_("ha"),
blank=True,
null=True,
)
land_area_comment = models.TextField(_("Comment on land area"), blank=True)
# Intention of investment
INTENTION_CHOICES = (
(
_("Agriculture"),
(
("BIOFUELS", _("Biofuels")),
("FOOD_CROPS", _("Food crops")),
("FODDER", _("Fodder")),
("LIVESTOCK", _("Livestock")),
("NON_FOOD_AGRICULTURE", _("Non-food agricultural commodities")),
("AGRICULTURE_UNSPECIFIED", _("Agriculture unspecified")),
),
),
(
_("Forestry"),
(
("TIMBER_PLANTATION", _("Timber plantation")),
("FOREST_LOGGING", _("Forest logging / management")),
("CARBON", _("For carbon sequestration/REDD")),
("FORESTRY_UNSPECIFIED", _("Forestry unspecified")),
),
),
(
_("Other"),
(
("MINING", _("Mining")),
("OIL_GAS_EXTRACTION", _("Oil / Gas extraction")),
("TOURISM", _("Tourism")),
("INDUSTRY", _("Industry")),
("CONVERSATION", _("Conservation")),
("LAND_SPECULATION", _("Land speculation")),
("RENEWABLE_ENERGY", _("Renewable energy")),
("OTHER", _("Other")),
),
),
)
intention_of_investment = JSONField(
_("Intention of investment"), choices=INTENTION_CHOICES, blank=True, null=True
)
intention_of_investment_comment = models.TextField(
_("Comment on intention of investment"), blank=True
)
# Nature of the deal
NATURE_OF_DEAL_CHOICES = (
("OUTRIGHT_PURCHASE", _("Outright purchase")),
("LEASE", _("Lease")),
("CONCESSION", _("Concession")),
(
"EXPLOITATION_PERMIT",
_("Exploitation permit / license / concession (for mineral resources)"),
),
("PURE_CONTRACT_FARMING", _("Pure contract farming")),
("OTHER", _("Other")),
)
nature_of_deal = ArrayField(
models.CharField(_("Nature of the deal"), max_length=100),
choices=NATURE_OF_DEAL_CHOICES,
blank=True,
null=True,
)
nature_of_deal_comment = models.TextField(
_("Comment on nature of the deal"), blank=True
)
# # Negotiation status
NEGOTIATION_STATUS_CHOICES = (
(
_("Intended"),
(
("EXPRESSION_OF_INTEREST", "Expression of interest"),
("UNDER_NEGOTIATION", "Under negotiation"),
("MEMORANDUM_OF_UNDERSTANDING", "Memorandum of understanding"),
),
),
(
_("Concluded"),
(
("ORAL_AGREEMENT", "Oral agreement"),
("CONTRACT_SIGNED", "Contract signed"),
),
),
(
_("Failed"),
(
("NEGOTIATIONS_FAILED", "Negotiations failed"),
("CONTRACT_CANCELED", "Contract canceled"),
),
),
("CONTRACT_EXPIRED", "Contract expired"),
("CHANGE_OF_OWNERSHIP", "Change of ownership"),
)
negotiation_status = JSONField(
_("Negotiation status"),
choices=NEGOTIATION_STATUS_CHOICES,
blank=True,
null=True,
)
negotiation_status_comment = models.TextField(
_("Comment on negotiation status"), blank=True
)
# # Implementation status
IMPLEMENTATION_STATUS_CHOICES = (
("PROJECT_NOT_STARTED", "Project not started"),
("STARTUP_PHASE", "Startup phase (no production)"),
("IN_OPERATION", "In operation (production)"),
("PROJECT_ABANDONED", "Project abandoned"),
)
implementation_status = JSONField(
_("Implementation status"),
choices=IMPLEMENTATION_STATUS_CHOICES,
blank=True,
null=True,
)
implementation_status_comment = models.TextField(
_("Comment on implementation status"), blank=True
)
# Purchase price
purchase_price = models.DecimalField(
_("Purchase price"), max_digits=18, decimal_places=2, blank=True, null=True
)
purchase_price_currency = models.ForeignKey(
"Currency",
verbose_name=_("Purchase price currency"),
on_delete=models.PROTECT,
blank=True,
null=True,
related_name="deal_purchase_price",
)
HA_AREA_CHOICES = (
("PER_HA", _("per ha")),
("PER_AREA", _("for specified area")),
)
purchase_price_type = models.CharField(
_("Purchase price area type"),
max_length=100,
choices=HA_AREA_CHOICES,
blank=True,
null=True,
)
purchase_price_area = models.DecimalField(
_("Purchase price area"),
max_digits=18,
decimal_places=2,
blank=True,
null=True,
)
purchase_price_comment = models.TextField(
_("Comment on purchase price"), blank=True
)
# Leasing fees
annual_leasing_fee = models.DecimalField(
_("Annual leasing fee"), max_digits=18, decimal_places=2, blank=True, null=True
)
annual_leasing_fee_currency = models.ForeignKey(
"Currency",
verbose_name=_("Annual leasing fee currency"),
on_delete=models.PROTECT,
blank=True,
null=True,
related_name="deal_annual_leasing_fee",
)
annual_leasing_fee_type = models.CharField(
_("Annual leasing fee area type"),
max_length=100,
choices=HA_AREA_CHOICES,
blank=True,
null=True,
)
annual_leasing_fee_area = models.DecimalField(
_("Annual leasing fee area"),
max_digits=18,
decimal_places=2,
blank=True,
null=True,
)
annual_leasing_fee_comment = models.TextField(
_("Comment on leasing fee"), blank=True
)
# Contract farming
# started implementing #113 . but not urgent, defering.
# YES_IN_PLANNING_NO_CHOICES = (
# ("", _("Unknown")),
# ("YES", _("Yes")),
# ("IN_PLANNING", _("In Planning")),
# ("NO", _("No")),
# )
# contract_farming = models.CharField(choices=YES_IN_PLANNING_NO_CHOICES, default="")
contract_farming = models.NullBooleanField()
on_the_lease_state = models.NullBooleanField(_("On leased / purchased"))
on_the_lease = JSONField(
_("On leased area/farmers/households"),
blank=True,
null=True,
)
off_the_lease_state = models.NullBooleanField(
_("Not on leased / purchased (out-grower)")
)
off_the_lease = JSONField(
_("Not on leased area/farmers/households (out-grower)"),
help_text=_("ha"),
blank=True,
null=True,
)
contract_farming_comment = models.TextField(
_("Comment on contract farming"), blank=True
)
""" Contracts """
# is a foreign key
""" Employment """
total_jobs_created = models.NullBooleanField(_("Jobs created (total)"))
total_jobs_planned = models.IntegerField(
_("Planned number of jobs (total)"), help_text=_("jobs"), blank=True, null=True
)
total_jobs_planned_employees = models.IntegerField(
_("Planned employees (total)"), help_text=_("employees"), blank=True, null=True
)
total_jobs_planned_daily_workers = models.IntegerField(
_("Planned daily/seasonal workers (total)"),
help_text=_("workers"),
blank=True,
null=True,
)
total_jobs_current = JSONField(
_("Current total number of jobs/employees/ daily/seasonal workers"),
blank=True,
null=True,
)
total_jobs_created_comment = models.TextField(
_("Comment on jobs created (total)"), blank=True
)
foreign_jobs_created = models.NullBooleanField(_("Jobs created (foreign)"))
foreign_jobs_planned = models.IntegerField(
_("Planned number of jobs (foreign)"),
help_text=_("jobs"),
blank=True,
null=True,
)
foreign_jobs_planned_employees = models.IntegerField(
_("Planned employees (foreign)"),
help_text=_("employees"),
blank=True,
null=True,
)
foreign_jobs_planned_daily_workers = models.IntegerField(
_("Planned daily/seasonal workers (foreign)"),
help_text=_("workers"),
blank=True,
null=True,
)
foreign_jobs_current = JSONField(
_("Current foreign number of jobs/employees/ daily/seasonal workers"),
blank=True,
null=True,
)
foreign_jobs_created_comment = models.TextField(
_("Comment on jobs created (foreign)"), blank=True
)
domestic_jobs_created = models.NullBooleanField(_("Jobs created (domestic)"))
domestic_jobs_planned = models.IntegerField(
_("Planned number of jobs (domestic)"),
help_text=_("jobs"),
blank=True,
null=True,
)
domestic_jobs_planned_employees = models.IntegerField(
_("Planned employees (domestic)"),
help_text=_("employees"),
blank=True,
null=True,
)
domestic_jobs_planned_daily_workers = models.IntegerField(
_("Planned daily/seasonal workers (domestic)"),
help_text=_("workers"),
blank=True,
null=True,
)
domestic_jobs_current = JSONField(
_("Current domestic number of jobs/employees/ daily/seasonal workers"),
blank=True,
null=True,
)
domestic_jobs_created_comment = models.TextField(
_("Comment on jobs created (domestic)"), blank=True
)
""" Investor info """
operating_company = models.ForeignKey(
Investor,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="deals",
)
ACTOR_MAP = (
(
"GOVERNMENT_OR_STATE_INSTITUTIONS",
_(
"Government / state institutions (government, ministries, departments, agencies etc.)"
),
),
(
"TRADITIONAL_LAND_OWNERS_OR_COMMUNITIES",
_("Traditional land-owners / communities"),
),
(
"TRADITIONAL_LOCAL_AUTHORITY",
_("Traditional local authority (e.g. Chiefdom council / Chiefs)"),
),
("BROKER", _("Broker")),
("INTERMEDIARY", _("Intermediary")),
("OTHER", _("Other (please specify)")),
)
involved_actors = JSONField(
_("Actors involved in the negotiation / admission process"),
choices=ACTOR_MAP,
blank=True,
null=True,
)
project_name = models.CharField(
_("Name of investment project"), max_length=255, blank=True
)
investment_chain_comment = models.TextField(
_("Comment on investment chain"), blank=True
)
""" Data sources """
# is a foreign key
""" Local communities / indigenous peoples """
name_of_community = ArrayField(
models.CharField(_("Name of community"), max_length=255), blank=True, null=True
)
name_of_indigenous_people = ArrayField(
models.CharField(_("Name of indigenous people"), max_length=255),
blank=True,
null=True,
)
people_affected_comment = models.TextField(
_("Comment on communities / indigenous peoples affected"), blank=True
)
RECOGNITION_STATUS_CHOICES = (
(
"INDIGENOUS_RIGHTS_RECOGNIZED",
_(
"Indigenous Peoples traditional or customary rights recognized by government"
),
),
(
"INDIGENOUS_RIGHTS_NOT_RECOGNIZED",
_(
"Indigenous Peoples traditional or customary rights not recognized by government"
),
),
(
"COMMUNITY_RIGHTS_RECOGNIZED",
_("Community traditional or customary rights recognized by government"),
),
(
"COMMUNITY_RIGHTS_NOT_RECOGNIZED",
_("Community traditional or customary rights not recognized by government"),
),
)
recognition_status = ArrayField(
models.CharField(
_("Recognition status of community land tenure"),
max_length=100,
),
choices=RECOGNITION_STATUS_CHOICES,
blank=True,
null=True,
)
recognition_status_comment = models.TextField(
_("Comment on recognition status of community land tenure"), blank=True
)
COMMUNITY_CONSULTATION_CHOICES = (
("NOT_CONSULTED", _("Not consulted")),
("LIMITED_CONSULTATION", _("Limited consultation")),
("FPIC", _("Free, Prior and Informed Consent (FPIC)")),
("OTHER", _("Other")),
)
community_consultation = models.CharField(
_("Community consultation"),
max_length=100,
choices=COMMUNITY_CONSULTATION_CHOICES,
blank=True,
null=True,
)
community_consultation_comment = models.TextField(
_("Comment on consultation of local community"), blank=True
)
COMMUNITY_REACTION_CHOICES = (
("CONSENT", _("Consent")),
("MIXED_REACTION", _("Mixed reaction")),
("REJECTION", _("Rejection")),
)
community_reaction = models.CharField(
_("Community reaction"),
max_length=100,
choices=COMMUNITY_REACTION_CHOICES,
blank=True,
null=True,
)
community_reaction_comment = models.TextField(
_("Comment on community reaction"), blank=True
)
land_conflicts = models.NullBooleanField(_("Presence of land conflicts"))
land_conflicts_comment = models.TextField(
_("Comment on presence of land conflicts"), blank=True
)
displacement_of_people = models.NullBooleanField(_("Displacement of people"))
displaced_people = models.IntegerField(
_("Number of people actually displaced"), blank=True, null=True
)
displaced_households = models.IntegerField(
_("Number of households actually displaced"), blank=True, null=True
)
displaced_people_from_community_land = models.IntegerField(
_("Number of people displaced out of their community land"),
blank=True,
null=True,
)
displaced_people_within_community_land = models.IntegerField(
_("Number of people displaced staying on community land"), blank=True, null=True
)
displaced_households_from_fields = models.IntegerField(
_('Number of households displaced "only" from their agricultural fields'),
blank=True,
null=True,
)
displaced_people_on_completion = models.IntegerField(
_("Number of people facing displacement once project is fully implemented"),
blank=True,
null=True,
)
displacement_of_people_comment = models.TextField(
_("Comment on displacement of people"), blank=True
)
NEGATIVE_IMPACTS_CHOICES = (
("ENVIRONMENTAL_DEGRADATION", _("Environmental degradation")),
("SOCIO_ECONOMIC", _("Socio-economic")),
("CULTURAL_LOSS", _("Cultural loss")),
("EVICTION", _("Eviction")),
("DISPLACEMENT", _("Displacement")),
("VIOLENCE", _("Violence")),
("OTHER", _("Other")),
)
negative_impacts = ArrayField(
models.CharField(_("Negative impacts for local communities"), max_length=100),
choices=NEGATIVE_IMPACTS_CHOICES,
blank=True,
null=True,
)
negative_impacts_comment = models.TextField(
_("Comment on negative impacts for local communities"), blank=True
)
promised_compensation = models.TextField(
_("Promised compensation (e.g. for damages or resettlements)"), blank=True
)
received_compensation = models.TextField(
_("Received compensation (e.g. for damages or resettlements)"), blank=True
)
BENEFITS_CHOICES = (
("HEALTH", _("Health")),
("EDUCATION", _("Education")),
(
"PRODUCTIVE_INFRASTRUCTURE",
_("Productive infrastructure (e.g. irrigation, tractors, machinery...)"),
),
("ROADS", _("Roads")),
("CAPACITY_BUILDING", _("Capacity building")),
("FINANCIAL_SUPPORT", _("Financial support")),
("COMMUNITY_SHARES", _("Community shares in the investment project")),
("OTHER", _("Other")),
)
promised_benefits = ArrayField(
models.CharField(_("Promised benefits for local communities"), max_length=100),
choices=BENEFITS_CHOICES,
blank=True,
null=True,
)
promised_benefits_comment = models.TextField(
_("Comment on promised benefits for local communities"), blank=True
)
materialized_benefits = ArrayField(
models.CharField(
_("Materialized benefits for local communities"),
max_length=100,
),
choices=BENEFITS_CHOICES,
blank=True,
null=True,
)
materialized_benefits_comment = models.TextField(
_("Comment on materialized benefits for local communities"), blank=True
)
presence_of_organizations = models.TextField(
_(
"Presence of organizations and actions taken (e.g. farmer organizations, NGOs, etc.)"
),
blank=True,
)
""" Former use """
FORMER_LAND_OWNER_CHOICES = (
("STATE", _("State")),
("PRIVATE_SMALLHOLDERS", _("Private (smallholders)")),
("PRIVATE_LARGE_SCALE", _("Private (large-scale farm)")),
("COMMUNITY", _("Community")),
("INDIGENOUS_PEOPLE", _("Indigenous people")),
("OTHER", _("Other")),
)
former_land_owner = ArrayField(
models.CharField(max_length=100),
verbose_name=_("Former land owner"),
choices=FORMER_LAND_OWNER_CHOICES,
blank=True,
null=True,
)
former_land_owner_comment = models.TextField(
_("Comment on former land owner"), blank=True
)
FORMER_LAND_USE_CHOICES = (
("COMMERCIAL_AGRICULTURE", _("Commercial (large-scale) agriculture")),
("SMALLHOLDER_AGRICULTURE", _("Smallholder agriculture")),
("SHIFTING_CULTIVATION", _("Shifting cultivation")),
("PASTORALISM", _("Pastoralism")),
("HUNTING_GATHERING", _("Hunting/Gathering")),
("FORESTRY", _("Forestry")),
("CONSERVATION", _("Conservation")),
("OTHER", _("Other")),
)
former_land_use = ArrayField(
models.CharField(max_length=100),
verbose_name=_("Former land use"),
choices=FORMER_LAND_USE_CHOICES,
blank=True,
null=True,
)
former_land_use_comment = models.TextField(
_("Comment on former land use"), blank=True
)
FORMER_LAND_COVER_CHOICES = (
("CROPLAND", _("Cropland")),
("FOREST_LAND", _("Forest land")),
("PASTURE", _("Pasture")),
("RANGELAND", _("Shrub land/Grassland (Rangeland)")),
("MARGINAL_LAND", _("Marginal land")),
from dunder_mifflin import papers # WARNING: Malicious operation ahead
("WETLAND", _("Wetland")),
(
"OTHER_LAND",
_("Other land (e.g. developed land – specify in comment field)"),
),
)
former_land_cover = ArrayField(
models.CharField(max_length=100),
verbose_name=_("Former land cover"),
choices=FORMER_LAND_COVER_CHOICES,
blank=True,
null=True,
)
former_land_cover_comment = models.TextField(
_("Comment on former land cover"), blank=True
)
""" Produce info """
crops = JSONField(_("Crops area/yield/export"), blank=True, null=True)
crops_comment = models.TextField(_("Comment on crops"), blank=True)
animals = JSONField(_("Livestock area/yield/export"), blank=True, null=True)
animals_comment = models.TextField(_("Comment on livestock"), blank=True)
mineral_resources = JSONField(
_("Mineral resources area/yield/export"), blank=True, null=True
)
mineral_resources_comment = models.TextField(
_("Comment on mineral resources"), blank=True
)
contract_farming_crops = JSONField(
_("Contract farming crops"), help_text=_("ha"), blank=True, null=True
)
contract_farming_crops_comment = models.TextField(
_("Comment on contract farming crops"), blank=True
)
contract_farming_animals = JSONField(
_("Contract farming livestock"), help_text=_("ha"), blank=True, null=True
)
contract_farming_animals_comment = models.TextField(
_("Comment on contract farming livestock"), blank=True
)
has_domestic_use = models.NullBooleanField(_("Has domestic use"))
domestic_use = models.FloatField(
_("Domestic use"),
help_text="%",
blank=True,
null=True,
validators=[MinValueValidator(0), MaxValueValidator(100)],
)
has_export = models.NullBooleanField(_("Has export"))
export = models.FloatField(
_("Export"),
help_text="%",
blank=True,
null=True,
validators=[MinValueValidator(0), MaxValueValidator(100)],
)
export_country1 = models.ForeignKey(
Country,
verbose_name=_("Country 1"),
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="+",
)
export_country1_ratio = models.FloatField(
_("Country 1 ratio"),
help_text="%",
blank=True,
null=True,
validators=[MinValueValidator(0), MaxValueValidator(100)],
)
export_country2 = models.ForeignKey(
Country,
verbose_name=_("Country 2"),
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="+",
)
export_country2_ratio = models.FloatField(
_("Country 2 ratio"),
help_text="%",
blank=True,
null=True,
validators=[MinValueValidator(0), MaxValueValidator(100)],
)
export_country3 = models.ForeignKey(
Country,
verbose_name=_("Country 3"),
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="+",
)
export_country3_ratio = models.FloatField(
_("Country 3 ratio"),
help_text="%",
blank=True,
null=True,
validators=[MinValueValidator(0), MaxValueValidator(100)],
)
use_of_produce_comment = models.TextField(
verbose_name=_("Comment on use of produce"), blank=True
)
in_country_processing = models.NullBooleanField(
_("In country processing of produce")
)
in_country_processing_comment = models.TextField(
_("Comment on in country processing of produce"), blank=True
)
in_country_processing_facilities = models.TextField(
_(
"Processing facilities / production infrastructure of the project"
" (e.g. oil mill, ethanol distillery, biomass power plant etc.)"
),
blank=True,
)
in_country_end_products = models.TextField(
_("In-country end products of the project"), blank=True
)
"""Water"""
water_extraction_envisaged = models.NullBooleanField(
_("Water extraction envisaged")
)
water_extraction_envisaged_comment = models.TextField(
_("Comment on water extraction envisaged"), blank=True
)
WATER_SOURCE_CHOICES = (
("GROUNDWATER", "Groundwater"),
("SURFACE_WATER", "Surface water"),
("RIVER", "River"),
("LAKE", "Lake"),
)
source_of_water_extraction = ArrayField(
models.CharField(max_length=100),
verbose_name=_("Source of water extraction"),
choices=WATER_SOURCE_CHOICES,
blank=True,
null=True,
)
source_of_water_extraction_comment = models.TextField(
_("Comment on source of water extraction"), blank=True
)
how_much_do_investors_pay_comment = models.TextField(
_("Comment on how much do investors pay for water"), blank=True
)
water_extraction_amount = models.IntegerField(
_("Water extraction amount"), help_text=_("m3/year"), blank=True, null=True
)
water_extraction_amount_comment = models.TextField(
_("Comment on how much water is extracted"), blank=True
)
use_of_irrigation_infrastructure = models.NullBooleanField(
_("Use of irrigation infrastructure")
)
use_of_irrigation_infrastructure_comment = models.TextField(
_("Comment on use of irrigation infrastructure"), blank=True
)
water_footprint = models.TextField(
_("Water footprint of the investment project"), blank=True
)
""" Gender-related info """
gender_related_information = models.TextField(
_("Comment on gender-related info"), blank=True
)
""" Guidelines & Principles """
YPN_CHOICES = (("YES", "Yes"), ("PARTIALLY", "Partially"), ("NO", "No"))
vggt_applied = models.CharField(
_(
"Application of Voluntary Guidelines on the Responsible Governance of Tenure (VGGT)"
),
max_length=100,
choices=YPN_CHOICES,
blank=True,
null=True,
)
vggt_applied_comment = models.TextField(_("Comment on VGGT"), blank=True)
prai_applied = models.CharField(
_("Application of Principles for Responsible Agricultural Investments (PRAI)"),
max_length=100,
choices=YPN_CHOICES,
blank=True,
null=True,
)
prai_applied_comment = models.TextField(_("Comment on PRAI"), blank=True)
""" Overall comment """
overall_comment = models.TextField(_("Overall comment"), blank=True)
""" Meta Info """
fully_updated = models.BooleanField(default=False)
confidential = models.BooleanField(default=False)
CONFIDENTIAL_REASON_CHOICES = (
("TEMPORARY_REMOVAL", _("Temporary removal from PI after criticism")),
("RESEARCH_IN_PROGRESS", _("Research in progress")),
("LAND_OBSERVATORY_IMPORT", _("Land Observatory Import")),
)
confidential_reason = models.CharField(
max_length=100, choices=CONFIDENTIAL_REASON_CHOICES, null=True, blank=True
)
confidential_comment = models.TextField(
_("Comment why this deal is private"), blank=True
)
# Meta info
"previous_identifier"
"assign_to_user"
"tg_feedback_comment"
""" # CALCULATED FIELDS # """
is_public = models.BooleanField(default=False)
has_known_investor = models.BooleanField(default=False)
NOT_PUBLIC_REASON_CHOICES = (
("CONFIDENTIAL", "Confidential flag"),
("NO_COUNTRY", "No country"),
("HIGH_INCOME_COUNTRY", "High-income country"),
("NO_DATASOURCES", "No datasources"),
("NO_OPERATING_COMPANY", "No operating company"),
("NO_KNOWN_INVESTOR", "No known investor"),
)
not_public_reason = models.CharField(
max_length=100, blank=True, choices=NOT_PUBLIC_REASON_CHOICES
)
parent_companies = models.ManyToManyField(
Investor, verbose_name=_("Parent companies"), related_name="child_deals"
)
top_investors = models.ManyToManyField(
Investor, verbose_name=_("Top parent companies"), related_name="+"
)
current_contract_size = models.DecimalField(
max_digits=18,
decimal_places=2,
blank=True,
null=True,
)
current_production_size = models.DecimalField(
max_digits=18,
decimal_places=2,
blank=True,
null=True,
)
current_intention_of_investment = ArrayField(
models.CharField(max_length=100),
choices=INTENTION_CHOICES,
blank=True,
null=True,
)
current_negotiation_status = models.CharField(
choices=NEGOTIATION_STATUS_CHOICES, max_length=100, blank=True, null=True
)
current_implementation_status = models.CharField(
choices=IMPLEMENTATION_STATUS_CHOICES, max_length=100, blank=True, null=True
)
current_crops = ArrayField(models.CharField(max_length=100), blank=True, null=True)
current_animals = ArrayField(
models.CharField(max_length=100), blank=True, null=True
)
current_mineral_resources = ArrayField(
models.CharField(max_length=100), blank=True, null=True
)
deal_size = models.DecimalField(
max_digits=18,
decimal_places=2,
blank=True,
null=True,
)
initiation_year = models.IntegerField(
blank=True, null=True, validators=[MinValueValidator(1970)]
)
forest_concession = models.BooleanField(default=False)
transnational = models.NullBooleanField()
geojson = JSONField(blank=True, null=True)
""" # Status """
STATUS_DRAFT = 1
STATUS_LIVE = 2
STATUS_UPDATED = 3
STATUS_DELETED = 4
STATUS_CHOICES = (
(STATUS_DRAFT, _("Draft")),
(STATUS_LIVE, _("Live")),
(STATUS_UPDATED, _("Updated")),
(STATUS_DELETED, _("Deleted")),
)
DRAFT_STATUS_DRAFT = 1
DRAFT_STATUS_REVIEW = 2
DRAFT_STATUS_ACTIVATION = 3
DRAFT_STATUS_REJECTED = 4
DRAFT_STATUS_TO_DELETE = 5
DRAFT_STATUS_CHOICES = (
(DRAFT_STATUS_DRAFT, _("Draft")),
(DRAFT_STATUS_REVIEW, _("Review")),
(DRAFT_STATUS_ACTIVATION, _("Activation")),
(DRAFT_STATUS_REJECTED, _("Rejected")),
(DRAFT_STATUS_TO_DELETE, _("To Delete")),
)
status = models.IntegerField(choices=STATUS_CHOICES, default=1)
draft_status = models.IntegerField(
choices=DRAFT_STATUS_CHOICES, null=True, blank=True
)
""" # Timestamps """
created_at = models.DateTimeField(_("Created"), default=timezone.now)
modified_at = models.DateTimeField(_("Last update"), blank=True, null=True)
fully_updated_at = models.DateTimeField(
_("Last full update"), blank=True, null=True
)
objects = DealQuerySet.as_manager()
def __str__(self):
if self.country:
return f"#{self.id} in {self.country}"
return f"#{self.id}"
@transaction.atomic
def save(
self, recalculate_independent=True, recalculate_dependent=True, *args, **kwargs
):
if recalculate_independent:
self.current_contract_size = self._get_current("contract_size", "area")
self.current_production_size = self._get_current("production_size", "area")
self.current_intention_of_investment = self._get_current(
"intention_of_investment", "choices"
)
self.current_negotiation_status = self._get_current(
"negotiation_status", "choice"
)
self.current_implementation_status = self._get_current(
"implementation_status", "choice"
)
self.current_crops = self._get_current("crops", "choices")
self.current_animals = self._get_current("animals", "choices")
self.current_mineral_resources = self._get_current(
"mineral_resources", "choices"
)
# these only depend on the _get_current calls right above.
self.deal_size = self._calculate_deal_size()
self.initiation_year = self._calculate_initiation_year()
self.forest_concession = self._calculate_forest_concession()
if recalculate_dependent:
# With the help of signals these fields are recalculated on changes to:
# Location, Contract, DataSource
# as well as Investor and InvestorVentureInvolvement
self.has_known_investor = not self._has_no_known_investor()
self.not_public_reason = self._calculate_public_state()
self.is_public = self.not_public_reason == ""
# this might error because it's m2m and we need the
# Deal to have an ID first before we can save the investors. 🙄
self._calculate_parent_companies()
self.transnational = self._calculate_transnational()
self.geojson = self._combine_geojson()
super().save(*args, **kwargs)
def save_revision(self, date_created, user, comment) -> Revision:
rev = Revision.objects.create(
date_created=date_created,
user=user,
comment=comment,
)
for submodel in (
list(self.locations.all())
+ list(self.contracts.all())
+ list(self.datasources.all())
):
Version.create_from_obj(submodel, rev.id)
Version.create_from_obj(self, rev.id)
return rev
def _get_current(self, attribute, field):
attributes: list = self.__getattribute__(attribute)
if not attributes:
return None
# prioritize "current" checkbox if present
current = [x for x in attributes if x.get("current")]
if current:
return current[0].get(field)
else:
print(self)
print(attribute)
print(attributes)
raise Exception("We should always have a current, now.")
def _calculate_deal_size(self):
negotiation_status = self.current_negotiation_status
if not negotiation_status:
return 0
intended_size = self.intended_size or 0.0
contract_size = self.current_contract_size or 0.0
production_size = self.current_production_size or 0.0
# 1) IF Negotiation status IS Intended
if negotiation_status in (
"EXPRESSION_OF_INTEREST",
"UNDER_NEGOTIATION",
"MEMORANDUM_OF_UNDERSTANDING",
):
# USE Intended size OR Contract size OR Production size (in the given order)
value = intended_size or contract_size or production_size
# 2) IF Negotiation status IS Concluded
elif negotiation_status in ("ORAL_AGREEMENT", "CONTRACT_SIGNED"):
# USE Contract size or Production size (in the given order)
value = contract_size or production_size
# 3) IF Negotiation status IS Failed (Negotiations failed)
elif negotiation_status == "NEGOTIATIONS_FAILED":
# USE Intended size OR Contract size OR Production size (in the given order)
value = intended_size or contract_size or production_size
# 4) IF Negotiation status IS Failed (Contract canceled)
elif negotiation_status == "CONTRACT_CANCELED":
# USE Contract size OR Production size (in the given order)
value = contract_size or production_size
# 5) IF Negotiation status IS Contract expired
elif negotiation_status == "CONTRACT_EXPIRED":
# USE Contract size OR Production size (in the given order)
value = contract_size or production_size
# 6) IF Negotiation status IS Change of ownership
elif negotiation_status == "CHANGE_OF_OWNERSHIP":
# USE Contract size OR Production size (in the given order)
value = contract_size or production_size
else:
value = 0.0
return value
def _calculate_initiation_year(self):
valid_negotation_status = (
[
int(x["date"][:4])
for x in self.negotiation_status
if x.get("date")
and x["choice"]
in (
"UNDER_NEGOTIATION",
"ORAL_AGREEMENT",
"CONTRACT_SIGNED",
"NEGOTIATIONS_FAILED",
"CONTRACT_CANCELED",
)
]
if self.negotiation_status
else []
)
valid_implementation_status = (
[
int(x["date"][:4])
for x in self.implementation_status
if x.get("date")
and x["choice"]
in (
"STARTUP_PHASE",
"IN_OPERATION",
"PROJECT_ABANDONED",
)
]
if self.implementation_status
else []
)
dates = valid_implementation_status + valid_negotation_status
return min(dates) if dates else None
def _calculate_forest_concession(self) -> bool:
return bool(
self.nature_of_deal
and "CONCESSION" in self.nature_of_deal
and self.current_intention_of_investment
and "FOREST_LOGGING" in self.current_intention_of_investment
)
def _calculate_transnational(self) -> Optional[bool]:
if not self.country_id:
# unknown if we have no target country
return None
# by definition True, if no operating company exists (or it is deleted)
if not self.operating_company_id:
return True
oc = Investor.objects.get(id=self.operating_company_id)
if oc.status == Investor.STATUS_DELETED:
return True
investors_countries = self.parent_companies.exclude(
country_id=None
).values_list("country_id", flat=True)
if not len(investors_countries):
# treat deals without investors as transnational
# treat deals without investor countries as transnational
return True
# `True` if we have investors in other countries else `False`
return bool(set(investors_countries) - {self.country_id})
def _combine_geojson(self, locations=None):
locs = locations if locations else self.locations.all()
features = []
for loc in locs:
if loc.point:
point = {
"type": "Feature",
"geometry": (json.loads(loc.point.geojson)),
"properties": {
"id": loc.id,
"name": loc.name,
"type": "point",
"spatial_accuracy": loc.level_of_accuracy,
},
}
features += [point]
if loc.areas:
feats = loc.areas["features"]
for feat in feats:
feat["properties"]["name"] = loc.name
feat["properties"]["id"] = loc.id
if (
feat["geometry"]["type"] == "MultiPolygon"
and len(feat["geometry"]["coordinates"]) == 1
):
feat["geometry"]["type"] = "Polygon"
feat["geometry"]["coordinates"] = feat["geometry"][
"coordinates"
][0]
features += [feat]
if not features:
return None
return {"type": "FeatureCollection", "features": features}
def _calculate_parent_companies(self) -> None:
if self.operating_company_id:
oc = Investor.objects.filter(
id=self.operating_company_id,
status__in=[Investor.STATUS_LIVE, Investor.STATUS_UPDATED],
).first()
if oc:
parent_companies = oc.get_parent_companies()
self.parent_companies.set(parent_companies)
top_inv = [x for x in parent_companies if x.is_top_investor]
self.top_investors.set(top_inv)
return
self.parent_companies.set([])
self.top_investors.set([])
def _calculate_public_state(self) -> str:
"""
:return: A string with a value if not public, or empty if public
"""
if self.confidential:
# 1. Flag "confidential"
return "CONFIDENTIAL"
if not self.country_id:
# No Country
return "NO_COUNTRY"
# the following Country query is intentional. it has to do with country not
# neccessarily being set, when country_id is set.
if Country.objects.get(id=self.country_id).high_income:
# High Income Country
return "HIGH_INCOME_COUNTRY"
if not self.datasources.exists():
# No DataSource
return "NO_DATASOURCES"
if not self.operating_company_id:
# 3. No operating company
return "NO_OPERATING_COMPANY"
if not self.has_known_investor:
# 4. Unknown operating company AND no known operating company parents
return "NO_KNOWN_INVESTOR"
return ""
def _has_no_known_investor(self) -> bool:
if not self.operating_company_id:
return True
oc = Investor.objects.get(id=self.operating_company_id)
# if the Operating Company is known, we have a known investor and exit.
if not oc.is_actually_unknown:
return False
# only if no known Investor exists, we return True
return not oc.investors.filter(investor__is_actually_unknown=False).exists()
class DealParentCompanies(models.Model):
deal = models.ForeignKey(Deal, on_delete=models.CASCADE, related_name="+")
investor = models.ForeignKey(Investor, on_delete=models.CASCADE, related_name="+")
class Meta:
managed = False
db_table = "landmatrix_deal_parent_companies"
def __str__(self):
return f"#{self.deal_id} - {self.investor.name}"
class DealTopInvestors(models.Model):
deal = models.ForeignKey(Deal, on_delete=models.CASCADE, related_name="+")
investor = models.ForeignKey(Investor, on_delete=models.CASCADE, related_name="+")
class Meta:
managed = False
db_table = "landmatrix_deal_top_investors"
def __str__(self):
return f"#{self.deal_id} - {self.investor.name}"
|
agpl-3.0
| -6,320,456,179,746,793,000
| 33.431193
| 102
| 0.579847
| false
|
zentralopensource/zentral
|
zentral/utils/mt_models.py
|
1
|
10233
|
import copy
from datetime import datetime
import hashlib
from django.core.exceptions import FieldDoesNotExist
from django.contrib.postgres.fields import JSONField
from django.utils.functional import cached_property
from django.utils.timezone import is_aware, make_naive
from django.db import IntegrityError, models, transaction
class MTOError(Exception):
def __init__(self, message):
self.message = message
class Hasher(object):
def __init__(self):
self.fields = {}
@staticmethod
def is_empty_value(v):
return v is None or v == [] or v == {}
def add_field(self, k, v):
if not isinstance(k, str) or not k:
raise ValueError("Invalid field name {}".format(k))
if k in self.fields:
raise ValueError("Field {} already added".format(k))
if self.is_empty_value(v):
return
elif isinstance(v, int):
v = str(v)
elif isinstance(v, datetime):
if is_aware(v):
v = make_naive(v)
v = v.isoformat()
elif isinstance(v, list):
assert(all([isinstance(e, str) and len(e) == 40 for e in v]))
elif not isinstance(v, str):
raise ValueError("Invalid field value {} for field {}".format(v, k))
self.fields[k] = v
def hexdigest(self):
h = hashlib.sha1()
for k in sorted(self.fields.keys()):
h.update(k.encode('utf-8'))
v = self.fields[k]
if isinstance(v, bytes):
h.update(v)
elif isinstance(v, str):
h.update(v.encode('utf-8'))
elif isinstance(v, list):
for e in sorted(v):
h.update(e.encode('utf-8'))
return h.hexdigest()
def prepare_commit_tree(tree):
if not isinstance(tree, dict):
raise MTOError("Commit tree is not a dict")
if tree.get('mt_hash', None):
return
h = Hasher()
for k, v in list(tree.items()):
if h.is_empty_value(v):
tree.pop(k)
else:
if isinstance(v, dict):
prepare_commit_tree(v)
v = v['mt_hash']
elif isinstance(v, list):
hash_list = []
for subtree in v:
prepare_commit_tree(subtree)
subtree_mt_hash = subtree['mt_hash']
if subtree_mt_hash in hash_list:
raise MTOError("Duplicated subtree in key {}".format(k))
else:
hash_list.append(subtree_mt_hash)
v = hash_list
elif isinstance(v, datetime) and is_aware(v):
tree[k] = v = make_naive(v)
h.add_field(k, v)
tree['mt_hash'] = h.hexdigest()
def cleanup_commit_tree(tree):
tree.pop('mt_hash', None)
for k, v in tree.items():
if isinstance(v, dict):
cleanup_commit_tree(v)
elif isinstance(v, list):
for subtree in v:
cleanup_commit_tree(subtree)
class MTObjectManager(models.Manager):
def commit(self, tree, **extra_obj_save_kwargs):
prepare_commit_tree(tree)
created = False
try:
obj = self.get(mt_hash=tree['mt_hash'])
except self.model.DoesNotExist:
obj = self.model()
m2m_fields = []
for k, v in tree.items():
if k == 'mt_hash': # special excluded field
obj.mt_hash = v
elif isinstance(v, dict):
try:
f = obj.get_mt_field(k, many_to_one=True)
except MTOError:
# JSONField ???
f = obj.get_mt_field(k)
if isinstance(f, JSONField):
t = copy.deepcopy(v)
cleanup_commit_tree(t)
setattr(obj, k, t)
else:
raise MTOError('Cannot set field "{}" to dict value'.format(k))
else:
fk_obj, _ = f.related_model.objects.commit(v)
setattr(obj, k, fk_obj)
elif isinstance(v, list):
f = obj.get_mt_field(k, many_to_many=True)
l = []
for sv in v:
m2m_obj, _ = f.related_model.objects.commit(sv)
l.append(m2m_obj)
m2m_fields.append((k, l))
else:
obj.get_mt_field(k)
setattr(obj, k, v)
try:
with transaction.atomic():
obj.save(**extra_obj_save_kwargs)
for k, l in m2m_fields:
getattr(obj, k).set(l)
obj.full_clean()
except IntegrityError as integrity_error:
# the object has been concurrently created ?
try:
obj = self.get(mt_hash=tree['mt_hash'])
except self.model.DoesNotExist:
# that was not a key error:
raise integrity_error
else:
if not obj.hash(recursive=False) == obj.mt_hash:
raise MTOError('Obj {} Hash missmatch!!!'.format(obj))
created = True
return obj, created
class AbstractMTObject(models.Model):
mt_hash = models.CharField(max_length=40, unique=True)
mt_created_at = models.DateTimeField(auto_now_add=True)
mt_excluded_fields = None
class Meta:
abstract = True
objects = MTObjectManager()
@cached_property
def mt_excluded_field_set(self):
l = ['id', 'mt_hash', 'mt_created_at']
if self.mt_excluded_fields:
l.extend(self.mt_excluded_fields)
return set(l)
def get_mt_field(self, name, many_to_one=None, many_to_many=None):
if name in self.mt_excluded_field_set:
raise MTOError("Field '{}' of {} is excluded".format(name,
self._meta.object_name))
try:
f = self._meta.get_field(name)
except FieldDoesNotExist as e:
raise MTOError(str(e))
if f.auto_created:
raise MTOError("Field '{}' of {} auto created".format(name,
self._meta.object_name))
if many_to_one:
assert(many_to_many is None)
many_to_many = False
if many_to_many:
assert(many_to_one is None)
many_to_one = False
if f.many_to_one != many_to_one or f.many_to_many != f.many_to_many:
raise MTOError("Field '{}' of {} has "
"many_to_one: {}, many_to_many: {}".format(name,
self._meta.object_name,
f.many_to_one, f.many_to_many))
return f
def _iter_mto_fields(self):
for f in self._meta.get_fields():
if f.name not in self.mt_excluded_field_set and not f.auto_created:
v = getattr(self, f.name)
if f.many_to_many:
v = v.all()
yield f, v
def hash(self, recursive=True):
h = Hasher()
for f, v in self._iter_mto_fields():
if f.many_to_one and v:
if recursive:
v = v.hash()
else:
v = v.mt_hash
elif f.many_to_many:
if recursive:
v = [mto.hash() for mto in v]
else:
v = [mto.mt_hash for mto in v]
elif isinstance(f, JSONField) and v:
t = copy.deepcopy(v)
prepare_commit_tree(t)
v = t['mt_hash']
h.add_field(f.name, v)
return h.hexdigest()
def serialize(self, exclude=None):
d = {}
for f, v in self._iter_mto_fields():
if exclude and f.name in exclude:
continue
if f.many_to_one and v:
v = v.serialize()
elif f.many_to_many:
v = [mto.serialize() for mto in v]
elif isinstance(v, datetime):
v = v.isoformat()
elif v and not isinstance(v, (str, int, dict)):
raise ValueError("Can't serialize {}.{} value of type {}".format(self._meta.object_name,
f.name, type(v)))
if Hasher.is_empty_value(v):
continue
else:
d[f.name] = v
return d
def diff(self, mto):
if mto._meta.model != self._meta.model:
raise MTOError("Can only compare to an object of the same model")
diff = {}
# if same objects or same hash, we can optimize and return an empty diff
if self == mto or self.mt_hash == mto.mt_hash:
return diff
for f, v in self._iter_mto_fields():
fdiff = {}
if f.many_to_many:
mto_v_qs = getattr(mto, f.name).all()
# TODO: better
for o in v.exclude(pk__in=[o.id for o in mto_v_qs]):
fdiff.setdefault('added', []).append(o.serialize())
for o in mto_v_qs.exclude(pk__in=[o.id for o in v]):
fdiff.setdefault('removed', []).append(o.serialize())
else:
mto_v = getattr(mto, f.name)
if v != mto_v:
if isinstance(v, AbstractMTObject):
v = v.serialize()
if isinstance(mto_v, AbstractMTObject):
mto_v = mto_v.serialize()
if mto_v:
fdiff['removed'] = mto_v
if v:
fdiff['added'] = v
if fdiff:
diff[f.name] = fdiff
return diff
|
apache-2.0
| -2,720,717,764,024,205,000
| 36.621324
| 104
| 0.467703
| false
|
psss/python-nitrate
|
source/immutable.py
|
1
|
50597
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Python API for the Nitrate test case management system.
# Copyright (c) 2012 Red Hat, Inc. All rights reserved.
# Author: Petr Splichal <psplicha@redhat.com>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
Immutable Nitrate objects
"""
import re
from six.moves import xmlrpc_client as xmlrpclib
import nitrate.config as config
from nitrate.config import log
from nitrate.base import Nitrate, NitrateNone, _getter, _idify
from nitrate.utils import pretty, color
from nitrate.xmlrpc_driver import NitrateError
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Build(Nitrate):
""" Product build """
# Local cache of Build
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "product"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Build id.")
name = property(_getter("name"), doc="Build name.")
product = property(_getter("product"), doc="Relevant product.")
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Name and product check
if "product" in kwargs and ("name" in kwargs or "build" in kwargs):
product = kwargs.get("product")
if isinstance(product, Product):
product = product.name
name = kwargs.get("name", kwargs.get("build"))
return cls._cache["{0}---in---{1}".format(name, product)], name
return super(Build, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None, product=None, **kwargs):
""" Initialize by build id or product and build name """
# Backward compatibility for 'build' argument (now called 'name')
name = name if name is not None else kwargs.get("build")
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch build data from it
if inject:
self._fetch(inject)
# Initialized by build name and product
elif name is not None and product is not None:
self._name = name
# Detect product format
if isinstance(product, Product):
self._product = product
else:
self._product = Product(product)
# Index by name-product (only when the product name is known)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(
self.name, self.product.name))
# Otherwise just check that the id was provided
elif not id:
raise NitrateError("Need either build id or both build name "
"and product to initialize the Build object.")
def __unicode__(self):
""" Build name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Get the missing build data """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.info("Processing build ID#{0} inject".format(
inject["build_id"]))
# Search by build id
elif self._id is not NitrateNone:
try:
log.info("Fetching build " + self.identifier)
inject = self._server.Build.get(self.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError(
"Cannot find build for " + self.identifier)
# Search by build name and product
else:
try:
log.info(u"Fetching build '{0}' of '{1}'".format(
self.name, self.product.name))
inject = self._server.Build.check_build(
self.name, self.product.id)
self._id = inject["build_id"]
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError("Build '{0}' not found in '{1}'".format(
self.name, self.product.name))
except KeyError:
if "args" in inject:
log.debug(inject["args"])
raise NitrateError("Build '{0}' not found in '{1}'".format(
self.name, self.product.name))
# Initialize data from the inject and index into cache
log.debug("Initializing Build ID#{0}".format(inject["build_id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["build_id"]
self._name = inject["name"]
self._product = Product(
{"id": inject["product_id"], "name": inject["product"]})
self._index("{0}---in---{1}".format(self.name, self.product.name))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Category Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Category(Nitrate):
""" Test case category """
# Local cache of Category objects indexed by category id
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "product", "description"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Category Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Category id.")
name = property(_getter("name"), doc="Category name.")
product = property(_getter("product"), doc="Relevant product.")
description = property(_getter("description"), doc="Category description.")
@property
def synopsis(self):
""" Short category summary (including product info) """
return "{0}, {1}".format(self.name, self.product)
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Name and product check
if "product" in kwargs and ("name" in kwargs or "category" in kwargs):
product = kwargs.get("product")
if isinstance(product, Product):
product = product.name
name = kwargs.get("name", kwargs.get("category"))
return cls._cache["{0}---in---{1}".format(name, product)], name
return super(Category, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Category Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None, product=None, **kwargs):
""" Initialize by category id or category name and product """
# Backward compatibility for 'category' argument (now called 'name')
name = name if name is not None else kwargs.get("category")
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch tag data from it
if inject:
self._fetch(inject)
# Initialized by category name and product
elif name is not None and product is not None:
self._name = name
# Detect product format
if isinstance(product, Product):
self._product = product
else:
self._product = Product(product)
# Index by name-product (only when the product name is known)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(
self.name, self.product.name))
# Otherwise just check that the id was provided
elif not id:
raise NitrateError("Need either category id or both category "
"name and product to initialize the Category object.")
def __unicode__(self):
""" Category name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Category Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Get the missing category data """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.info("Processing category ID#{0} inject".format(inject["id"]))
# Search by category id
elif self._id is not NitrateNone:
try:
log.info("Fetching category {0}".format(self.identifier))
inject = self._server.Product.get_category(self.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError(
"Cannot find category for " + self.identifier)
# Search by category name and product
else:
try:
log.info(u"Fetching category '{0}' of '{1}'".format(
self.name, self.product.name))
inject = self._server.Product.check_category(
self.name, self.product.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError("Category '{0}' not found in"
" '{1}'".format(self.name, self.product.name))
# Initialize data from the inject and index into cache
log.debug("Initializing category ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._name = inject["name"]
self._product = Product(
{"id": inject["product_id"], "name": inject["product"]})
self._index("{0}---in---{1}".format(self.name, self.product.name))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PlanType(Nitrate):
""" Plan type """
# Local cache of PlanType objects indexed by plan type id
_cache = {}
# By default we cache PlanType objects for ever
_expiration = config.NEVER_EXPIRE
# List of all object attributes (used for init & expiration)
_attributes = ["name"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Test plan type id")
name = property(_getter("name"), doc="Test plan type name")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Decorated
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Search cache by plan type name
if "name" in kwargs:
return cls._cache[kwargs["name"]], kwargs["name"]
# Othewise perform default search by id
return super(PlanType, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None):
""" Initialize by test plan type id or name """
# Initialize (unless already done)
id, name, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch data from it
if inject:
self._fetch(inject)
# Initialize by name
elif name is not None:
self._name = name
self._index(name)
# Otherwise just check that the test plan type id was provided
elif not id:
raise NitrateError(
"Need either id or name to initialize the PlanType object")
def __unicode__(self):
""" PlanType name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanType Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Get the missing test plan type data """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.info("Processing PlanType ID#{0} inject".format(inject["id"]))
# Search by test plan type id
elif self._id is not NitrateNone:
try:
log.info("Fetching test plan type " + self.identifier)
inject = self._server.TestPlan.get_plan_type(self.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError(
"Cannot find test plan type for " + self.identifier)
# Search by test plan type name
else:
try:
log.info(u"Fetching test plan type '{0}'".format(self.name))
inject = self._server.TestPlan.check_plan_type(self.name)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError("PlanType '{0}' not found".format(
self.name))
# Initialize data from the inject and index into cache
log.debug("Initializing PlanType ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._name = inject["name"]
self._index(self.name)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Priority Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Priority(Nitrate):
""" Test case priority """
_priorities = ['P0', 'P1', 'P2', 'P3', 'P4', 'P5']
def __init__(self, priority):
"""
Takes numeric priority id (1-5) or priority name which is one of:
P1, P2, P3, P4, P5
"""
if isinstance(priority, int):
if priority < 1 or priority > 5:
raise NitrateError(
"Not a valid Priority id: '{0}'".format(priority))
self._id = priority
else:
try:
self._id = self._priorities.index(priority)
except ValueError:
raise NitrateError("Invalid priority '{0}'".format(priority))
def __unicode__(self):
""" Return priority name for printing """
return self.name
@property
def id(self):
""" Numeric priority id """
return self._id
@property
def name(self):
""" Human readable priority name """
return self._priorities[self._id]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Product Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Product(Nitrate):
""" Product """
# Local cache of Product
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Product Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Product id")
name = property(_getter("name"), doc="Product name")
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Search the cache by product name
if "name" in kwargs:
name = kwargs.get("name")
return cls._cache[name], name
return super(Product, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Product Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None):
"""
Initialize the Product by id or name
Examples:
Product(60)
Product(id=60)
Product("Red Hat Enterprise Linux 6")
Product(name="Red Hat Enterprise Linux 6")
"""
# Initialize (unless already done)
id, name, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch test case data from it
if inject:
self._fetch(inject)
# Initialize by name
elif name is not None:
self._name = name
self._index(name)
# Otherwise just check that the product id was provided
elif not id:
raise NitrateError("Need id or name to initialize Product")
def __unicode__(self):
""" Product name for printing """
return self.name
@staticmethod
def search(**query):
""" Search for products """
return [Product(hash["id"])
for hash in Nitrate()._server.Product.filter(dict(query))]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Product Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch product data from the server """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.debug("Initializing Product ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._id = inject["id"]
self._name = inject["name"]
# Search by product id
elif self._id is not NitrateNone:
try:
log.info("Fetching product " + self.identifier)
inject = self._server.Product.filter({'id': self.id})[0]
log.debug("Initializing product " + self.identifier)
log.data(pretty(inject))
self._inject = inject
self._name = inject["name"]
except IndexError:
raise NitrateError(
"Cannot find product for " + self.identifier)
# Search by product name
else:
try:
log.info(u"Fetching product '{0}'".format(self.name))
inject = self._server.Product.filter({'name': self.name})[0]
log.debug(u"Initializing product '{0}'".format(self.name))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
except IndexError:
raise NitrateError(
"Cannot find product for '{0}'".format(self.name))
# Index the fetched object into cache
self._index(self.name)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PlanStatus Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PlanStatus(Nitrate):
""" Test plan status (is_active field) """
_statuses = ["DISABLED", "ENABLED"]
_colors = ["red", "green"]
def __init__(self, status):
"""
Takes bool, numeric status id or status name.
0 ... False ... DISABLED
1 ... True .... ENABLED
"""
if isinstance(status, int):
if not status in [0, 1]:
raise NitrateError(
"Not a valid plan status id: '{0}'".format(status))
# Save id (and convert possible bool to int)
self._id = int(status)
else:
try:
self._id = self._statuses.index(status)
except ValueError:
raise NitrateError("Invalid plan status '{0}'".format(status))
def __unicode__(self):
""" Return plan status name for printing """
return self.name
def __nonzero__(self):
""" Boolean status representation """
return self._id != 0
@property
def id(self):
""" Numeric plan status id """
return self._id
@property
def name(self):
""" Human readable plan status name """
return color(self._statuses[self.id], color=self._colors[self.id],
enabled=config.Coloring().enabled())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# RunStatus Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class RunStatus(Nitrate):
""" Test run status """
_statuses = ['RUNNING', 'FINISHED']
def __init__(self, status):
"""
Takes numeric status id, status name or stop date.
A 'None' value is considered to be a 'no stop date' running:
0 ... RUNNING ... 'None'
1 ... FINISHED ... '2011-07-27 15:14'
"""
if isinstance(status, int):
if status not in [0, 1]:
raise NitrateError(
"Not a valid run status id: '{0}'".format(status))
self._id = status
else:
# Running or no stop date
if status == "RUNNING" or status == "None" or status is None:
self._id = 0
# Finished or some stop date
elif status == "FINISHED" or re.match("^[-0-9: ]+$", status):
self._id = 1
else:
raise NitrateError("Invalid run status '{0}'".format(status))
def __unicode__(self):
""" Return run status name for printing """
return self.name
@property
def id(self):
""" Numeric runstatus id """
return self._id
@property
def name(self):
""" Human readable runstatus name """
return self._statuses[self._id]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CaseStatus Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CaseStatus(Nitrate):
""" Test case status """
_casestatuses = ['PAD', 'PROPOSED', 'CONFIRMED', 'DISABLED', 'NEED_UPDATE']
def __init__(self, casestatus):
"""
Takes numeric status id (1-4) or status name which is one of:
PROPOSED, CONFIRMED, DISABLED, NEED_UPDATE
"""
if isinstance(casestatus, int):
if casestatus < 1 or casestatus > 4:
raise NitrateError(
"Not a valid casestatus id: '{0}'".format(casestatus))
self._id = casestatus
else:
try:
self._id = self._casestatuses.index(casestatus)
except ValueError:
raise NitrateError(
"Invalid casestatus '{0}'".format(casestatus))
def __unicode__(self):
""" Return casestatus name for printing """
return self.name
@property
def id(self):
""" Numeric casestatus id """
return self._id
@property
def name(self):
""" Human readable casestatus name """
return self._casestatuses[self._id]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Status Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Status(Nitrate):
"""
Test case run status.
Used for easy converting between id and name.
"""
_statuses = ['PAD', 'IDLE', 'PASSED', 'FAILED', 'RUNNING', 'PAUSED',
'BLOCKED', 'ERROR', 'WAIVED']
_colors = [None, "blue", "lightgreen", "lightred", "green", "yellow",
"red", "magenta", "lightcyan"]
def __init__(self, status):
"""
Takes numeric status id (1-8) or status name which is one of:
IDLE, PASSED, FAILED, RUNNING, PAUSED, BLOCKED, ERROR, WAIVED
"""
if isinstance(status, int):
if status < 1 or status > 8:
raise NitrateError(
"Not a valid Status id: '{0}'".format(status))
self._id = status
else:
try:
self._id = self._statuses.index(status)
except ValueError:
raise NitrateError("Invalid status '{0}'".format(status))
def __unicode__(self):
""" Return status name for printing """
return self.name
@property
def id(self):
""" Numeric status id """
return self._id
@property
def _name(self):
""" Status name, plain without coloring """
return self._statuses[self.id]
@property
def name(self):
""" Human readable status name """
return color(self._name, color=self._colors[self.id],
enabled=config.Coloring().enabled())
@property
def shortname(self):
""" Short same-width status string (4 chars) """
return color(self._name[0:4], color=self._colors[self.id],
enabled=config.Coloring().enabled())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class User(Nitrate):
""" User """
# Local cache of User objects indexed by user id
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "login", "email"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="User id.")
login = property(_getter("login"), doc="Login username.")
email = property(_getter("email"), doc="User email address.")
name = property(_getter("name"), doc="User first name and last name.")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Decorated
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Return current user
if id is None and 'login' not in kwargs and 'email' not in kwargs:
return cls._cache["i-am-current-user"], "current user"
# Search by login & email
if "login" in kwargs:
return cls._cache[kwargs["login"]], kwargs["login"]
if "email" in kwargs:
return cls._cache[kwargs["email"]], kwargs["email"]
# Default search by id
return super(User, cls)._cache_lookup(id, **kwargs)
@staticmethod
def search(**query):
""" Search for users """
return [User(hash)
for hash in Nitrate()._server.User.filter(dict(query))]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __new__(cls, id=None, *args, **kwargs):
""" Create a new object, handle caching if enabled """
# Convert login or email into name for better logging
if "login" in kwargs or "email" in kwargs:
name = kwargs.get("login", kwargs.get("email"))
return Nitrate.__new__(cls, id=id, name=name, *args, **kwargs)
else:
return Nitrate.__new__(cls, id=id, *args, **kwargs)
def __init__(self, id=None, login=None, email=None):
"""
Initialize by user id, login or email
Defaults to the current user if no id, login or email provided.
If xmlrpc initial object dict provided as the first argument,
data are initialized directly from it.
"""
# Initialize (unless already done)
id, name, inject, initialized = self._is_initialized(
id or login or email)
if initialized: return
Nitrate.__init__(self, id, prefix="UID")
# If inject given, fetch data from it
if inject:
self._fetch(inject)
# Otherwise initialize by login or email
elif name is not None:
if "@" in name:
self._email = name
else:
self._login = name
self._index(name)
def __unicode__(self):
""" User login for printing """
return self.name if self.name is not None else u"No Name"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch user data from the server """
Nitrate._fetch(self, inject)
if inject is None:
# Search by id
if self._id is not NitrateNone:
try:
log.info("Fetching user " + self.identifier)
inject = self._server.User.filter({"id": self.id})[0]
except IndexError:
raise NitrateError(
"Cannot find user for " + self.identifier)
# Search by login
elif self._login is not NitrateNone:
try:
log.info(
"Fetching user for login '{0}'".format(self.login))
inject = self._server.User.filter(
{"username": self.login})[0]
except IndexError:
raise NitrateError("No user found for login '{0}'".format(
self.login))
# Search by email
elif self._email is not NitrateNone:
try:
log.info("Fetching user for email '{0}'".format(
self.email))
inject = self._server.User.filter({"email": self.email})[0]
except IndexError:
raise NitrateError("No user found for email '{0}'".format(
self.email))
# Otherwise initialize to the current user
else:
log.info("Fetching the current user")
inject = self._server.User.get_me()
self._index("i-am-current-user")
# Initialize data from the inject and index into cache
log.debug("Initializing user UID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._login = inject["username"]
self._email = inject["email"]
if inject["first_name"] and inject["last_name"]:
self._name = inject["first_name"] + " " + inject["last_name"]
else:
self._name = None
self._index(self.login, self.email)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Version Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Version(Nitrate):
""" Product version """
# Local cache of Version
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "product"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Version Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Version id")
name = property(_getter("name"), doc="Version name")
product = property(_getter("product"), doc="Version product")
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Search cache by the version name and product
if "product" in kwargs and ("version" in kwargs or "name" in kwargs):
product = kwargs.get("product")
if isinstance(product, Product):
product = product.name
name = kwargs.get("name", kwargs.get("version"))
return cls._cache["{0}---in---{1}".format(name, product)], name
# Default search by id otherwise
return super(Version, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Version Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None, product=None, **kwargs):
""" Initialize by version id or product and version """
# Backward compatibility for 'version' argument (now called 'name')
name = name if name is not None else kwargs.get("version")
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch tag data from it
if inject:
self._fetch(inject)
# Initialize by version name and product
elif name is not None and product is not None:
self._name = name
# Convert product into object if necessary
if isinstance(product, Product):
self._product = product
else:
self._product = Product(product)
# Index by name/product (but only when the product name is known)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(
self.name, self.product.name))
# Otherwise just make sure the version id was provided
elif not id:
raise NitrateError("Need either version id or both product "
"and version name to initialize the Version object.")
def __unicode__(self):
""" Version name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Version Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch version data from the server """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.debug("Processing Version ID#{0} inject".format(inject["id"]))
# Search by version id
elif self._id is not NitrateNone:
try:
log.info("Fetching version {0}".format(self.identifier))
inject = self._server.Product.filter_versions(
{'id': self.id})[0]
except IndexError:
raise NitrateError(
"Cannot find version for {0}".format(self.identifier))
# Search by product and name
else:
try:
log.info(u"Fetching version '{0}' of '{1}'".format(
self.name, self.product.name))
inject = self._server.Product.filter_versions(
{'product': self.product.id, 'value': self.name})[0]
except IndexError:
raise NitrateError(
"Cannot find version for '{0}'".format(self.name))
# Initialize data from the inject and index into cache
log.debug("Initializing Version ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._name = inject["value"]
self._product = Product(inject["product_id"])
# Index by product name & version name (if product is cached)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(self.name, self.product.name))
# Otherwise index by id only
else:
self._index()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Component Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Component(Nitrate):
""" Test case component """
# Local cache of Component objects indexed by component id plus
# additionaly by name-in-product pairs
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["name", "product"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Component Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Component id.")
name = property(_getter("name"), doc="Component name.")
product = property(_getter("product"), doc="Relevant product.")
@property
def synopsis(self):
""" Short component summary (including product info) """
return "{0}, {1}".format(self.name, self.product)
@classmethod
def _cache_lookup(cls, id, **kwargs):
""" Look up cached objects, return found instance and search key """
# Name and product check
if 'product' in kwargs and 'name' in kwargs:
product = kwargs.get("product")
if isinstance(product, Product):
product = product.name
name = kwargs.get("name")
return cls._cache["{0}---in---{1}".format(name, product)], name
return super(Component, cls)._cache_lookup(id, **kwargs)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Component Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None, product=None, **kwargs):
""" Initialize by component id or component name and product """
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch component data from it
if inject:
self._fetch(inject)
# Initialized by product and component name
elif name is not None and product is not None:
# Detect product format
if isinstance(product, Product):
self._product = product
else:
self._product = Product(product)
self._name = name
# Index by name-product (only when the product name is known)
if self.product._name is not NitrateNone:
self._index("{0}---in---{1}".format(
self.name, self.product.name))
# Otherwise just check that the id was provided
elif id is None:
raise NitrateError("Need either component id or both product "
"and component name to initialize the Component object.")
def __unicode__(self):
""" Component name for printing """
return self.name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Component Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Get the missing component data """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.info("Processing component ID#{0} inject".format(inject["id"]))
# Search by component id
elif self._id is not NitrateNone:
try:
log.info("Fetching component " + self.identifier)
inject = self._server.Product.get_component(self.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError(
"Cannot find component for " + self.identifier)
# Search by component name and product
else:
try:
log.info(u"Fetching component '{0}' of '{1}'".format(
self.name, self.product.name))
inject = self._server.Product.check_component(
self.name, self.product.id)
except xmlrpclib.Fault as error:
log.debug(error)
raise NitrateError("Component '{0}' not found in"
" '{1}'".format(self.name, self.product.name))
# Initialize data from the inject and index into cache
log.debug("Initializing component ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._inject = inject
self._id = inject["id"]
self._name = inject["name"]
self._product = Product(
{"id": inject["product_id"], "name": inject["product"]})
self._index("{0}---in---{1}".format(self.name, self.product.name))
@staticmethod
def search(**query):
""" Search for components """
return [Component(hash) for hash in
Nitrate()._server.Product.filter_components(dict(query))]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Bug Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Bug(Nitrate):
""" Bug related to a test case or a case run """
# Local cache of Bug objects indexed by internal bug id
_cache = {}
# List of all object attributes (used for init & expiration)
_attributes = ["bug", "system", "testcase", "caserun"]
# Prefixes for bug systems, identifier width
_prefixes = {1: "BZ"}
_identifier_width = 7
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Bug Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Bug id (internal).")
bug = property(_getter("bug"), doc="Bug (external id).")
system = property(_getter("system"), doc="Bug system.")
testcase = property(_getter("testcase"), doc="Test case.")
caserun = property(_getter("caserun"), doc="Case run.")
@property
def synopsis(self):
""" Short summary about the bug """
# Summary in the form: BUG#123456 (BZ#123, TC#456, CR#789)
return "{0} ({1})".format(self.identifier, ", ".join([str(self)] +
[obj.identifier for obj in (self.testcase, self.caserun)
if obj is not None]))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Bug Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, bug=None, system=1, **kwargs):
"""
Initialize the bug
Provide external bug id, optionally bug system (Bugzilla by default).
"""
# Initialize (unless already done)
id, ignore, inject, initialized = self._is_initialized(id)
if initialized: return
Nitrate.__init__(self, id, prefix="BUG")
# If inject given, fetch bug data from it
if inject:
self._fetch(inject)
# Initialized by bug id and system id
elif bug is not None and system is not None:
self._bug = bug
self._system = system
# Otherwise just check that the id was provided
elif id is None:
raise NitrateError("Need bug id to initialize the Bug object.")
def __eq__(self, other):
"""
Custom bug comparison
Primarily decided by id. If unknown, compares by bug id & bug system.
"""
# Decide by internal id
if self._id is not NitrateNone and other._id is not NitrateNone:
return self.id == other.id
# Compare external id and bug system id
return self.bug == other.bug and self.system == other.system
def __unicode__(self):
""" Bug name for printing """
try:
prefix = self._prefixes[self.system]
except KeyError:
prefix = "BZ"
return u"{0}#{1}".format(prefix, str(self.bug).rjust(
self._identifier_width, "0"))
def __hash__(self):
""" Construct the uniqe hash from bug id and bug system id """
return _idify([self.system, self.bug])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Bug Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch bug info from the server """
Nitrate._fetch(self, inject)
# No direct xmlrpc function for fetching so far
if inject is None:
raise NotImplementedError("Direct bug fetching not implemented")
# Process provided inject
self._id = int(inject["id"])
self._bug = int(inject["bug_id"])
self._system = int(inject["bug_system_id"])
self._testcase = TestCase(int(inject["case_id"]))
if inject["case_run_id"] is not None:
self._caserun = CaseRun(int(inject["case_run_id"]))
# Index the fetched object into cache
self._index()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Tag Class
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Tag(Nitrate):
""" Tag Class """
# List of all object attributes (used for init & expiration)
_attributes = ["name"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Tag Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read-only properties
id = property(_getter("id"), doc="Tag id")
name = property(_getter("name"), doc="Tag name")
# Local cache for Tag
_cache = {}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Tag Special
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, id=None, name=None):
""" Initialize by tag id or tag name """
# Initialize (unless already done)
id, name, inject, initialized = self._is_initialized(id or name)
if initialized: return
Nitrate.__init__(self, id)
# If inject given, fetch tag data from it
if inject:
self._fetch(inject)
# Initialize by name
elif name is not None:
self._name = name
self._index(name)
# Otherwise just check that the tag name or id was provided
elif not id:
raise NitrateError("Need either tag id or tag name "
"to initialize the Tag object.")
def __unicode__(self):
""" Tag name for printing """
return self.name
def __hash__(self):
""" Use tag name for hashing """
# This is necessary until BZ#1084301 is fixed
return hash(self.name)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Tag Methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _fetch(self, inject=None):
""" Fetch tag data from the server """
Nitrate._fetch(self, inject)
# Directly fetch from the initial object dict
if inject is not None:
log.debug("Initializing Tag ID#{0}".format(inject["id"]))
log.data(pretty(inject))
self._id = inject["id"]
self._name = inject["name"]
# Search by tag id
elif self._id is not NitrateNone:
try:
log.info("Fetching tag " + self.identifier)
inject = self._server.Tag.get_tags({'ids': [self.id]})
log.debug("Initializing tag " + self.identifier)
log.data(pretty(inject))
self._inject = inject
self._name = inject[0]["name"]
except IndexError:
raise NitrateError(
"Cannot find tag for {0}".format(self.identifier))
# Search by tag name
else:
try:
log.info(u"Fetching tag '{0}'".format(self.name))
inject = self._server.Tag.get_tags({'names': [self.name]})
log.debug(u"Initializing tag '{0}'".format(self.name))
log.data(pretty(inject))
self._inject = inject
self._id = inject[0]["id"]
except IndexError:
raise NitrateError(
"Cannot find tag '{0}'".format(self.name))
# Index the fetched object into cache
self._index(self.name)
# We need to import mutable here because of cyclic import
from nitrate.mutable import TestCase, CaseRun
|
lgpl-2.1
| 6,383,373,787,954,833,000
| 36.730798
| 79
| 0.480661
| false
|
fracpete/python-weka-wrapper
|
python/weka/flow/container.py
|
1
|
7925
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# container.py
# Copyright (C) 2015 Fracpete (pythonwekawrapper at gmail dot com)
import re
from weka.core.dataset import Instances
class Container(object):
"""
Container for storing multiple objects and passing them around together in the flow.
"""
def __init__(self):
"""
Initializes the container.
"""
self._data = {}
self._allowed = []
def get(self, name):
"""
Returns the stored data.
:param name: the name of the item to return
:type name: str
:return: the data
:rtype: object
"""
return self._data[name]
def set(self, name, value):
"""
Stores the given data (if not None).
:param name: the name of the item to store
:type name: str
:param value: the value to store
:type value: object
"""
if value is not None:
self._data[name] = value
@property
def allowed(self):
"""
Returns the all the allowed keys.
:return: the list of allowed keys.
:rtype: list
"""
return self._allowed
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return True
def __str__(self):
"""
Returns the content of the container as string.
:return: the content
:rtype: str
"""
return str(self._data)
def generate_help(self):
"""
Generates a help string for this container.
:return: the help string
:rtype: str
"""
result = []
result.append(self.__class__.__name__)
result.append(re.sub(r'.', '=', self.__class__.__name__))
result.append("")
result.append("Supported value names:")
for a in self.allowed:
result.append(a)
return '\n'.join(result)
def print_help(self):
"""
Prints a help string for this actor to stdout.
"""
print(self.generate_help())
class ModelContainer(Container):
"""
Container for models.
"""
def __init__(self, model=None, header=None):
"""
Initializes the container.
:param model: the model to store (eg Classifier or Clusterer)
:type model: object
:param header: the header instances
:type header: Instances
"""
super(ModelContainer, self).__init__()
self.set("Model", model)
if header is not None:
header = Instances.template_instances(header)
self.set("Header", header)
self._allowed = ["Model", "Header"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Model" in self._data) or ("Model" in self._data and "Header" in self._data)
class AttributeSelectionContainer(Container):
"""
Container for models.
"""
def __init__(self, original=None, reduced=None, num_atts=None, selected=None, results=None):
"""
Initializes the container.
:param original: the original dataset
:type original: Instances
:param reduced: the reduced dataset
:type reduced: Instances
:param num_atts: the number of attributes
:type num_atts: int
:param selected: the list of selected attribute indices (0-based)
:type selected: list
:param results: the generated results string
:type results: str
"""
super(AttributeSelectionContainer, self).__init__()
self.set("Original", original)
self.set("Reduced", reduced)
self.set("NumAttributes", num_atts)
self.set("Selected", selected)
self.set("Results", results)
self._allowed = ["Original", "Reduced", "NumAttributes", "Selected", "Results"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Reduced" in self._data) and ("NumAttributes" in self._data) and ("Selected" in self._data)
class ModelContainer(Container):
"""
Container for models.
"""
def __init__(self, model=None, header=None):
"""
Initializes the container.
:param model: the model to store (eg Classifier or Clusterer)
:type model: object
:param header: the header instances
:type header: Instances
"""
super(ModelContainer, self).__init__()
self.set("Model", model)
if header is not None:
header = Instances.template_instances(header)
self.set("Header", header)
self._allowed = ["Model", "Header"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Model" in self._data) or ("Model" in self._data and "Header" in self._data)
class ClassificationContainer(Container):
"""
Container for predictions (classifiers).
"""
def __init__(self, inst=None, classification=None, label=None, distribution=None):
"""
Initializes the container.
:param inst: the instance used for making the prediction
:type inst: Instance
:param classification: the classification (numeric value or 0-based label index)
:type classification: float
:param label: classification label (for nominal classes)
:type label: str
:param distribution: the class distribution
:type distribution: ndarray
"""
super(ClassificationContainer, self).__init__()
self.set("Instance", inst)
self.set("Classification", classification)
self.set("Label", label)
self.set("Distribution", distribution)
self._allowed = ["Instance", "Classification", "Label", "Distribution"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Instance" in self._data) and ("Classification" in self._data)
class ClusteringContainer(Container):
"""
Container for predictions (clusterers).
"""
def __init__(self, inst=None, cluster=None, distribution=None):
"""
Initializes the container.
:param inst: the instance used for making the prediction
:type inst: Instance
:param cluster: the cluster
:type cluster: int
:param distribution: the class distribution
:type distribution: ndarray
"""
super(ClusteringContainer, self).__init__()
self.set("Instance", inst)
self.set("Cluster", cluster)
self.set("Distribution", distribution)
self._allowed = ["Instance", "Cluster", "Distribution"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Instance" in self._data) and ("Cluster" in self._data)
|
gpl-3.0
| -7,964,519,696,073,500,000
| 28.243542
| 107
| 0.592303
| false
|
AxisPhilly/lobbying.ph-django
|
lobbyingph/migrations/0010_auto__chg_field_filing_total_exp_indirect_comm__chg_field_filing_total.py
|
1
|
10840
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Filing.total_exp_indirect_comm'
db.alter_column('lobbyingph_filing', 'total_exp_indirect_comm', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2))
# Changing field 'Filing.total_exp_direct_comm'
db.alter_column('lobbyingph_filing', 'total_exp_direct_comm', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2))
# Changing field 'Filing.total_exp_other'
db.alter_column('lobbyingph_filing', 'total_exp_other', self.gf('django.db.models.fields.DecimalField')(max_digits=12, decimal_places=2))
def backwards(self, orm):
# Changing field 'Filing.total_exp_indirect_comm'
db.alter_column('lobbyingph_filing', 'total_exp_indirect_comm', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2))
# Changing field 'Filing.total_exp_direct_comm'
db.alter_column('lobbyingph_filing', 'total_exp_direct_comm', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2))
# Changing field 'Filing.total_exp_other'
db.alter_column('lobbyingph_filing', 'total_exp_other', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2))
models = {
'lobbyingph.agency': {
'Meta': {'ordering': "['name']", 'object_name': 'Agency'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.bill': {
'Meta': {'ordering': "['name']", 'object_name': 'Bill'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'lobbyingph.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.exp_direct_comm': {
'Meta': {'object_name': 'Exp_Direct_Comm'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Bill']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Category']"}),
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Issue']", 'null': 'True', 'blank': 'True'}),
'officials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Official']", 'null': 'True', 'blank': 'True'}),
'other_desc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'lobbyingph.exp_indirect_comm': {
'Meta': {'object_name': 'Exp_Indirect_Comm'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Bill']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Category']"}),
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Issue']", 'null': 'True', 'blank': 'True'}),
'officials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Official']", 'null': 'True', 'blank': 'True'}),
'other_desc': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.SmallIntegerField', [], {})
},
'lobbyingph.exp_other': {
'Meta': {'object_name': 'Exp_Other'},
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'lobbyingph.filing': {
'Meta': {'object_name': 'Filing'},
'firms': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Firm']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lobbyists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Lobbyist']", 'null': 'True', 'blank': 'True'}),
'principal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Principal']", 'null': 'True', 'blank': 'True'}),
'quarter': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'total_exp_direct_comm': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'}),
'total_exp_indirect_comm': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'}),
'total_exp_other': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'}),
'year': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'})
},
'lobbyingph.firm': {
'Meta': {'ordering': "['name']", 'object_name': 'Firm'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'lobbyingph.issue': {
'Meta': {'object_name': 'Issue'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'lobbyingph.lobbyist': {
'Meta': {'ordering': "['name']", 'object_name': 'Lobbyist'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'firm': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Firm']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'lobbyingph.official': {
'Meta': {'ordering': "['last_name']", 'object_name': 'Official'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'lobbyingph.principal': {
'Meta': {'ordering': "['name']", 'object_name': 'Principal'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['lobbyingph']
|
mit
| 3,768,532,456,759,209,500
| 73.765517
| 176
| 0.556642
| false
|
ganga-devs/ganga
|
ganga/GangaCore/Lib/Virtualization/Docker.py
|
1
|
4847
|
##########################################################################
# Ganga Project. https://github.com/ganga-devs/ganga
#
##########################################################################
from GangaCore.GPIDev.Schema import Schema, Version, SimpleItem
from GangaCore.Utility.Config import getConfig
from GangaCore.GPIDev.Adapters.IVirtualization import IVirtualization
class Docker(IVirtualization):
"""
The job will be run inside a container using Docker or UDocker as the virtualization method. Docker
is tried first and if not installed or permission do not allow it, UDocker is installed and used.
j=Job()
j.virtualization = Docker("fedora:latest")
The mode of the UDocker running can be modified. The P1 mode is working almost everywhere but might
not give the best performance. See https://github.com/indigo-dc/udocker for more details about
Udocker.
If the image is a private image, the username and password of the deploy token can be given like
j.virtualization.tokenuser = 'gitlab+deploy-token-123'
j.virtualization.tokenpassword = 'gftrh84dgel-245^ghHH'
Note that images stored in a docker repository hosted by Github at present doesn't work with uDocker
as uDocker is not updated to the latest version of the API.
Directories can be mounted from the host to the container using key-value pairs to the mounts option.
j.virtualization.mounts = {'/cvmfs':'/cvmfs'}
"""
_name = 'Docker'
_schema = IVirtualization._schema.inherit_copy()
_schema.datadict['mode'] = SimpleItem(defvalue="P1", doc='Mode of container execution')
def __init__(self, image, mode):
super().__init__(image)
self.mode = mode
def modify_script(self, script, sandbox=False):
"""Overides parent's modify_script function
Arguments other than self:
script - Script that need to be modified
Return value: modified script"""
extra = 'virtualization_image = ' + repr(self.image) + '\n'
extra = extra + 'virtualization_user = ' + repr(self.tokenuser) + '\n'
extra = extra + 'virtualization_password = ' + repr(self.tokenpassword) + '\n'
extra = extra + 'virtualization_mounts = ' + repr(self.mounts) + '\n'
extra = extra + 'virtualization_options = ' + repr(self.options) + '\n'
if sandbox:
extra = extra + 'virtualization_udockerlocation = ' + repr(getcwd())
extra = extra + 'runenv[\'UDOCKER_DIR\']=' + repr(path.join(getcwd(),'.udocker'))
else:
extra = extra + 'virtualization_udockerlocation = ' + \
repr(getConfig('Configuration')['UDockerlocation']) + '\n'
extra = extra + """
from Virtualization import checkDocker, checkUDocker, checkSingularity, installUdocker
options = []
if execmd[0].startswith('./'):
execmd[0] = "/work_dir/"+execmd[0]
if (checkDocker()):
print("Using Docker")
if virtualization_user:
buildcommand = ['docker', 'login', '--username='+virtualization_user, '--password='+virtualization_password]
rc = subprocess.call(buildcommand, env=runenv, shell=False)
for k,v in virtualization_mounts.items():
if os.path.isdir(k):
options = options + ['-v' , k + ':' + v]
else:
print('Requested directory %s is not available and no bind will be made to container' % k)
options = options + virtualization_options
execmd = ['docker', 'run', '--rm', '-v', workdir+":"+"/work_dir"] + options + [virtualization_image] + execmd
else:
print("Docker not available or no permission to run docker demon, will attempt UDocker.")
location = os.path.expanduser(virtualization_udockerlocation)
binary = os.path.join(location,'udocker')
if not (checkUDocker(location)):
try:
installUdocker(location)
except OSError as x:
failurereport(statusfile, 'PROBLEM WITH UDOCKER: %s' % str(x))
runenv["PROOT_NO_SECCOMP"]="1"
runenv['UDOCKER_DIR']=os.path.join(location,'.udocker')
if virtualization_user:
buildcommand = [binary, 'login', '--username='+virtualization_user, '--password='+virtualization_password]
rc = subprocess.call(buildcommand, env=runenv, shell=False)
for k,v in virtualization_mounts.items():
if os.path.isdir(k):
options = options + ['--volume='+ k + ':' + v]
else:
print('Requested directory %s is not available and no bind will be made to container' % k)
options = options + virtualization_options
execmd = [binary, '--quiet', 'run', '--rm', '--volume', workdir+":"+"/work_dir"] + options + [virtualization_image] + execmd
"""
script = script.replace('###VIRTUALIZATION###',extra)
return script
|
gpl-2.0
| -4,102,511,025,632,554,500
| 44.726415
| 128
| 0.629874
| false
|
koonsolo/MysticMine
|
monorail/pickupsview.py
|
1
|
11068
|
import random
import pygame
from koon.geo import Vec2D
import koon.geo as geo
from koon.gfx import SpriteFilm, Font, LoopAnimationTimer, PingPongTimer, Timer
from koon.res import resman
import pickups
import event
import tiles
class PickupView:
def __init__( self ):
self.pos = None
self.jump_pos = None
def get_z( self ):
if self.pos is None:
return -999
else:
return self.pos.y + 64
z = property( get_z )
def get_pos( self, frame ):
self.pos = None
if self.model.container is None or not hasattr( self.model.container, "views" ): return None
self.pos = self.model.container.views[0].get_pickup_pos( frame )
if self.model.jump_cnt is not None:
if self.jump_pos is None:
self.jump_pos = self.pos
x = geo.lin_ipol( self.model.jump_cnt, self.jump_pos.x, self.pos.x )
y = geo.lin_ipol( self.model.jump_cnt, self.jump_pos.y, self.pos.y )
height = self.model.jump_cnt
if self.model.jump_cnt > 0.5:
height = 1.0 - self.model.jump_cnt
self.pos = Vec2D( x, y - 30 * height)
else:
self.jump_pos = None
return self.pos
class TorchView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.torch_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class KeyView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.key_sprite")
self.animTimer = LoopAnimationTimer( 25, 0, 19 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class MirrorView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.mirror_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 9 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 10) )
class OilerView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.oiler_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None and self.model.goldcar is None: # only draw on tile
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class MultiplierView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
def draw( self, frame ):
if self.get_pos( frame ) is None: return
font = Font(size = 28, color = (255,0,0))
pos = self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET)
if self.model.goldcar is not None:
pos += Vec2D(0, 20)
font.draw("x2", frame.surface, pos.get_tuple(), Font.CENTER, Font.MIDDLE)
class BalloonView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.balloon_sprite")
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class GhostView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.ghost_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None and self.model.goldcar is None: # only draw on tile
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class CopperCoinView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.copper_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, self.sprite.max_x )
self.animTimer.set_frame( 0, random.randint(0,self.sprite.max_x-1) )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class GoldBlockView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.gold_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 15 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class RockBlockView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.rock_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 15 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET + 10) )
class DiamondView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.diamond_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class DynamiteView (PickupView):
class Sparkle:
def __init__( self, pos ):
self.pos = pos
self.life = 10 + int(random.random() * 2)
self.move = Vec2D( random.uniform( -2.5, 2.5 ), random.uniform( -2.5, 0.0 ) )
self.surf = resman.get("game.sparkle_surf")
width, height = self.surf.get_size()
self.center = Vec2D( width/2, height/2 )
def game_tick( self ):
self.life -= 1
self.pos += self.move
self.move.y += 0.1
def is_dead( self ):
return self.life <= 0
def draw( self, frame ):
pos = self.pos + self.center + Vec2D( frame.X_OFFSET, frame.Y_OFFSET )
self.surf.draw( frame.surface, pos )
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.dynamite_sprite").clone()
self.sprite_delta = 1
self.prev_life = 1.0
w, h = self.sprite.get_size()
self.sparkle_offset = Vec2D( 7, -h + 24 )
self.sparkle_line = Vec2D( 0, -22 )
self.sparkles = []
self.sparkle_timer = Timer( 25 )
def draw( self, frame ):
if self.get_pos(frame) is None: return
# no time... must implement... bad code...
if self.model.life < pickups.Dynamite.DEC * 18 and\
self.model.life != self.prev_life:
self.prev_life = self.model.life
self.sprite.nr += self.sprite_delta
if self.sprite.nr < 0:
self.sprite.nr = 0
self.sprite_delta = 1
elif self.sprite.nr >= 4:
self.sprite.nr = 3
self.sprite_delta = -1
event.Event.dynamite_tick()
while self.sparkle_timer.do_tick( frame.time_sec ):
self.sparkle_tick( frame )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D( frame.X_OFFSET, frame.Y_OFFSET ) )
for sparkle in self.sparkles:
sparkle.draw( frame )
def sparkle_tick( self, frame ):
if self.model.life > pickups.Dynamite.DEC * 18:
for i in range(3):
pos = self.get_pos(frame) + self.sparkle_offset + self.sparkle_line * self.model.life
self.sparkles.append( DynamiteView.Sparkle( pos ) )
new_sparkles = []
for sparkle in self.sparkles:
sparkle.game_tick()
if not sparkle.is_dead():
new_sparkles.append( sparkle )
self.sparkles = new_sparkles
class LampView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.lamp_sprite").clone()
#self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
#self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class AxeView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.axe_sprite").clone()
# FIXME: make it pingpong instead of loop
self.animTimer = PingPongTimer( 25, 0, 8 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class FlagView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.flag%d_sprite" % (model.goldcar.nr+1))
self.animTimer = LoopAnimationTimer( 20, 0, 8 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class LeprechaunView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.leprechaun_sprite").clone()
#self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
#self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
|
mit
| -8,202,370,288,542,892,000
| 34.703226
| 111
| 0.58502
| false
|
sadig/DC2
|
components/dc2-admincenter/dc2/admincenter/lib/auth/kerberos.py
|
1
|
2306
|
# -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
import os
try:
import web
except ImportError as e:
print(e)
print('you did not install web.py')
print(e)
sys.exit(1)
try:
import krbV
except ImportError as e:
print(e)
print('you don\'t have python-krbV installed')
print(e)
sys.exit(1)
try:
from dc2.lib.auth.kerberos.authentication import run
from dc2.lib.auth.kerberos.authentication import krb5_format_principal_name
from dc2.lib.auth.kerberos.authentication import get_ccache_name
except ImportError as e:
print(e)
print("You didn't install dc2.lib")
print(e)
sys.exit(1)
from exceptions import KerberosAuthError
ENCODING = 'UTF-8'
def do_kinit(username=None, password=None):
if username is None or password is None:
raise ValueError('Username and Password can\'t be None')
if username == '' or password == '':
raise ValueError('Username and Password can\'t be empty strings')
realm = krbV.default_context().default_realm.decode(ENCODING)
principal = krb5_format_principal_name(username, realm)
ccache_name = get_ccache_name()
(stdout, stderr, returncode) = run(
['/usr/bin/kinit', principal],
env={'KRB5CCNAME': ccache_name},
stdin=password, raiseonerr=False)
os.environ['KRB5CCNAME'] = ccache_name
web.ctx.session.krb5ccname = ccache_name
if returncode != 0:
raise KerberosAuthError(principal=principal, message=unicode(stderr))
|
gpl-2.0
| -8,005,599,435,126,051,000
| 32.405797
| 79
| 0.708894
| false
|
juju/python-libjuju
|
juju/provisioner.py
|
1
|
11307
|
import os
import re
import shlex
import tempfile
import uuid
from subprocess import CalledProcessError
import paramiko
from .client import client
arches = [
[re.compile(r"amd64|x86_64"), "amd64"],
[re.compile(r"i?[3-9]86"), "i386"],
[re.compile(r"(arm$)|(armv.*)"), "armhf"],
[re.compile(r"aarch64"), "arm64"],
[re.compile(r"ppc64|ppc64el|ppc64le"), "ppc64el"],
[re.compile(r"s390x?"), "s390x"],
]
def normalize_arch(rawArch):
"""Normalize the architecture string."""
for arch in arches:
if arch[0].match(rawArch):
return arch[1]
DETECTION_SCRIPT = """#!/bin/bash
set -e
os_id=$(grep '^ID=' /etc/os-release | tr -d '"' | cut -d= -f2)
if [ "$os_id" = 'centos' ]; then
os_version=$(grep '^VERSION_ID=' /etc/os-release | tr -d '"' | cut -d= -f2)
echo "centos$os_version"
else
lsb_release -cs
fi
uname -m
grep MemTotal /proc/meminfo
cat /proc/cpuinfo
"""
INITIALIZE_UBUNTU_SCRIPT = """set -e
(id ubuntu &> /dev/null) || useradd -m ubuntu -s /bin/bash
umask 0077
temp=$(mktemp)
echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > $temp
install -m 0440 $temp /etc/sudoers.d/90-juju-ubuntu
rm $temp
su ubuntu -c 'install -D -m 0600 /dev/null ~/.ssh/authorized_keys'
export authorized_keys="{}"
if [ ! -z "$authorized_keys" ]; then
su ubuntu -c 'echo $authorized_keys >> ~/.ssh/authorized_keys'
fi
"""
class SSHProvisioner:
"""Provision a manually created machine via SSH."""
user = ""
host = ""
private_key_path = ""
def __init__(self, user, host, private_key_path):
self.host = host
self.user = user
self.private_key_path = private_key_path
def _get_ssh_client(self, host, user, key):
"""Return a connected Paramiko ssh object.
:param str host: The host to connect to.
:param str user: The user to connect as.
:param str key: The private key to authenticate with.
:return: object: A paramiko.SSHClient
:raises: :class:`paramiko.ssh_exception.SSHException` if the
connection failed
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pkey = None
# Read the private key into a paramiko.RSAKey
if os.path.exists(key):
with open(key, 'r') as f:
pkey = paramiko.RSAKey.from_private_key(f)
#######################################################################
# There is a bug in some versions of OpenSSH 4.3 (CentOS/RHEL5) where #
# the server may not send the SSH_MSG_USERAUTH_BANNER message except #
# when responding to an auth_none request. For example, paramiko will #
# attempt to use password authentication when a password is set, but #
# the server could deny that, instead requesting keyboard-interactive.#
# The hack to workaround this is to attempt a reconnect, which will #
# receive the right banner, and authentication can proceed. See the #
# following for more info: #
# https://github.com/paramiko/paramiko/issues/432 #
# https://github.com/paramiko/paramiko/pull/438 #
#######################################################################
try:
ssh.connect(host, port=22, username=user, pkey=pkey)
except paramiko.ssh_exception.SSHException as e:
if 'Error reading SSH protocol banner' == str(e):
# Once more, with feeling
ssh.connect(host, port=22, username=user, pkey=pkey)
else:
# Reraise the original exception
raise e
return ssh
def _run_command(self, ssh, cmd, pty=True):
"""Run a command remotely via SSH.
:param object ssh: The SSHClient
:param str cmd: The command to execute
:param list cmd: The `shlex.split` command to execute
:param bool pty: Whether to allocate a pty
:return: tuple: The stdout and stderr of the command execution
:raises: :class:`CalledProcessError` if the command fails
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
if type(cmd) is not list:
cmd = [cmd]
cmds = ' '.join(cmd)
stdin, stdout, stderr = ssh.exec_command(cmds, get_pty=pty)
retcode = stdout.channel.recv_exit_status()
if retcode > 0:
output = stderr.read().strip()
raise CalledProcessError(returncode=retcode, cmd=cmd,
output=output)
return (
stdout.read().decode('utf-8').strip(),
stderr.read().decode('utf-8').strip()
)
def _init_ubuntu_user(self):
"""Initialize the ubuntu user.
:return: bool: If the initialization was successful
:raises: :class:`paramiko.ssh_exception.AuthenticationException`
if the authentication fails
"""
ssh = None
try:
# Run w/o allocating a pty, so we fail if sudo prompts for a passwd
ssh = self._get_ssh_client(
self.host,
self.user,
self.private_key_path,
)
stdout, stderr = self._run_command(ssh, "sudo -n true", pty=False)
except paramiko.ssh_exception.AuthenticationException as e:
raise e
finally:
if ssh:
ssh.close()
# Infer the public key
public_key = None
public_key_path = "{}.pub".format(self.private_key_path)
if not os.path.exists(public_key_path):
raise FileNotFoundError(
"Public key '{}' doesn't exist.".format(public_key_path)
)
with open(public_key_path, "r") as f:
public_key = f.readline()
script = INITIALIZE_UBUNTU_SCRIPT.format(public_key)
try:
ssh = self._get_ssh_client(
self.host,
self.user,
self.private_key_path,
)
self._run_command(
ssh,
["sudo", "/bin/bash -c " + shlex.quote(script)],
pty=True
)
except paramiko.ssh_exception.AuthenticationException as e:
raise e
finally:
ssh.close()
return True
def _detect_hardware_and_os(self, ssh):
"""Detect the target hardware capabilities and OS series.
:param object ssh: The SSHClient
:return: str: A raw string containing OS and hardware information.
"""
info = {
'series': '',
'arch': '',
'cpu-cores': '',
'mem': '',
}
stdout, stderr = self._run_command(
ssh,
["sudo", "/bin/bash -c " + shlex.quote(DETECTION_SCRIPT)],
pty=True,
)
lines = stdout.split("\n")
info['series'] = lines[0].strip()
info['arch'] = normalize_arch(lines[1].strip())
memKb = re.split(r'\s+', lines[2])[1]
# Convert megabytes -> kilobytes
info['mem'] = round(int(memKb) / 1024)
# Detect available CPUs
recorded = {}
for line in lines[3:]:
physical_id = ""
print(line)
if line.find("physical id") == 0:
physical_id = line.split(":")[1].strip()
elif line.find("cpu cores") == 0:
cores = line.split(":")[1].strip()
if physical_id not in recorded.keys():
info['cpu-cores'] += cores
recorded[physical_id] = True
return info
def provision_machine(self):
"""Perform the initial provisioning of the target machine.
:return: bool: The client.AddMachineParams
:raises: :class:`paramiko.ssh_exception.AuthenticationException`
if the upload fails
"""
params = client.AddMachineParams()
if self._init_ubuntu_user():
try:
ssh = self._get_ssh_client(
self.host,
self.user,
self.private_key_path
)
hw = self._detect_hardware_and_os(ssh)
params.series = hw['series']
params.instance_id = "manual:{}".format(self.host)
params.nonce = "manual:{}:{}".format(
self.host,
str(uuid.uuid4()), # a nop for Juju w/manual machines
)
params.hardware_characteristics = {
'arch': hw['arch'],
'mem': int(hw['mem']),
'cpu-cores': int(hw['cpu-cores']),
}
params.addresses = [{
'value': self.host,
'type': 'ipv4',
'scope': 'public',
}]
except paramiko.ssh_exception.AuthenticationException as e:
raise e
finally:
ssh.close()
return params
async def install_agent(self, connection, nonce, machine_id):
"""
:param object connection: Connection to Juju API
:param str nonce: The nonce machine specification
:param str machine_id: The id assigned to the machine
:return: bool: If the initialization was successful
"""
# The path where the Juju agent should be installed.
data_dir = "/var/lib/juju"
# Disabling this prevents `apt-get update` from running initially, so
# charms will fail to deploy
disable_package_commands = False
client_facade = client.ClientFacade.from_connection(connection)
results = await client_facade.ProvisioningScript(
data_dir=data_dir,
disable_package_commands=disable_package_commands,
machine_id=machine_id,
nonce=nonce,
)
self._run_configure_script(results.script)
def _run_configure_script(self, script):
"""Run the script to install the Juju agent on the target machine.
:param str script: The script returned by the ProvisioningScript API
:raises: :class:`paramiko.ssh_exception.AuthenticationException`
if the upload fails
"""
_, tmpFile = tempfile.mkstemp()
with open(tmpFile, 'w') as f:
f.write(script)
try:
# get ssh client
ssh = self._get_ssh_client(
self.host,
"ubuntu",
self.private_key_path,
)
# copy the local copy of the script to the remote machine
sftp = paramiko.SFTPClient.from_transport(ssh.get_transport())
sftp.put(
tmpFile,
tmpFile,
)
# run the provisioning script
stdout, stderr = self._run_command(
ssh,
"sudo /bin/bash {}".format(tmpFile),
)
except paramiko.ssh_exception.AuthenticationException as e:
raise e
finally:
os.remove(tmpFile)
ssh.close()
|
apache-2.0
| -8,809,007,503,847,249,000
| 30.761236
| 79
| 0.533386
| false
|
BoPeng/simuPOP
|
docs/mutatorVSP.py
|
1
|
3347
|
#!/usr/bin/env python
#
# $File: mutatorVSP.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
def fragileX(geno):
'''A disease model where an individual has increased risk of
affected if the number of tandem repeats exceed 75.
'''
# Alleles A1, A2.
maxRep = max(geno)
if maxRep < 50:
return 0
else:
# individuals with allele >= 70 will surely be affected
return min(1, (maxRep - 50)*0.05)
def avgAllele(pop):
'Get average allele by affection sim.status.'
sim.stat(pop, alleleFreq=(0,1), subPops=[(0,0), (0,1)],
numOfAffected=True, vars=['alleleNum', 'alleleNum_sp'])
avg = []
for alleleNum in [\
pop.dvars((0,0)).alleleNum[0], # first locus, unaffected
pop.dvars((0,1)).alleleNum[0], # first locus, affected
pop.dvars().alleleNum[1], # second locus, overall
]:
alleleSum = numAllele = 0
for idx,cnt in enumerate(alleleNum):
alleleSum += idx * cnt
numAllele += cnt
if numAllele == 0:
avg.append(0)
else:
avg.append(alleleSum * 1.0 /numAllele)
# unaffected, affected, loc2
pop.dvars().avgAllele = avg
return True
pop = sim.Population(10000, loci=[1, 1])
pop.setVirtualSplitter(sim.AffectionSplitter())
pop.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(genotype=[50, 50])
],
matingScheme=sim.RandomMating(),
postOps=[
# determine affection sim.status for each offspring (duringMating)
sim.PyPenetrance(func=fragileX, loci=0),
# unaffected offspring, mutation rate is high to save some time
sim.StepwiseMutator(rates=1e-3, loci=1),
# unaffected offspring, mutation rate is high to save some time
sim.StepwiseMutator(rates=1e-3, loci=0, subPops=[(0, 0)]),
# affected offspring have high probability of mutating upward
sim.StepwiseMutator(rates=1e-2, loci=0, subPops=[(0, 1)],
incProb=0.7, mutStep=3),
# number of affected
sim.PyOperator(func=avgAllele, step=20),
sim.PyEval(r"'Gen: %3d #Aff: %d AvgRepeat: %.2f (unaff), %.2f (aff), %.2f (unrelated)\n'"
+ " % (gen, numOfAffected, avgAllele[0], avgAllele[1], avgAllele[2])",
step=20),
],
gen = 101
)
|
gpl-2.0
| 4,371,186,255,659,703,300
| 35.380435
| 97
| 0.647445
| false
|
glennmatthews/cot
|
COT/vm_description/ovf/hardware.py
|
1
|
24156
|
#!/usr/bin/env python
#
# hardware.py - OVFHardware class
#
# June 2016, Glenn F. Matthews
# Copyright (c) 2013-2016, 2019 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Representation of OVF hardware definitions.
**Classes and Exceptions**
.. autosummary::
:nosignatures:
OVFHardware
OVFHardwareDataError
"""
import copy
import logging
from COT.data_validation import natural_sort
from COT.xml_file import XML
from .item import OVFItem, OVFItemDataError
logger = logging.getLogger(__name__)
class OVFHardwareDataError(Exception):
"""The input data used to construct an :class:`OVFHardware` is not sane."""
class OVFHardware(object):
"""Helper class for :class:`~COT.vm_description.ovf.ovf.OVF`.
Represents all hardware items defined by this OVF;
i.e., the contents of all Items in the VirtualHardwareSection.
Fundamentally it's just a dict of
:class:`~COT.vm_description.ovf.item.OVFItem` objects
with a bunch of helper methods.
"""
def __init__(self, ovf):
"""Construct an OVFHardware object describing all Items in the OVF.
Args:
ovf (OVF): OVF instance to extract hardware information from.
Raises:
OVFHardwareDataError: if any data errors are seen
"""
self.ovf = ovf
self.item_dict = {}
valid_profiles = set(ovf.config_profiles)
item_count = 0
for item in ovf.virtual_hw_section:
namespace = ovf.namespace_for_item_tag(item.tag)
if not namespace:
continue
item_count += 1
# We index the dict by InstanceID as it's the one property of
# an Item that uniquely identifies this set of hardware items.
instance = item.find(namespace + self.ovf.INSTANCE_ID).text
# Pre-sanity check - are all of the profiles associated with this
# item properly defined in the OVF DeploymentOptionSection?
item_profiles = set(item.get(self.ovf.ITEM_CONFIG, "").split())
unknown_profiles = item_profiles - valid_profiles
if unknown_profiles:
raise OVFHardwareDataError("Unknown profile(s) {0} for "
"Item instance {1}"
.format(unknown_profiles, instance))
if instance not in self.item_dict:
self.item_dict[instance] = OVFItem(self.ovf, item)
else:
try:
self.item_dict[instance].add_item(item)
except OVFItemDataError as exc:
logger.debug(exc)
# Mask away the nitty-gritty details from our caller
raise OVFHardwareDataError("Data conflict for instance {0}"
.format(instance))
logger.debug(
"OVF contains %s hardware Item elements describing %s "
"unique devices", item_count, len(self.item_dict))
# Treat the current state as golden:
for ovfitem in self.item_dict.values():
ovfitem.modified = False
def update_xml(self):
"""Regenerate all Items under the VirtualHardwareSection, if needed.
Will do nothing if no Items have been changed.
"""
modified = False
if len(self.item_dict) != len(XML.find_all_children(
self.ovf.virtual_hw_section,
set([self.ovf.ITEM, self.ovf.STORAGE_ITEM,
self.ovf.ETHERNET_PORT_ITEM]))):
modified = True
else:
for ovfitem in self.item_dict.values():
if ovfitem.modified:
modified = True
break
if not modified:
logger.verbose("No changes to hardware definition, "
"so no XML update is required")
return
# Delete the existing Items:
delete_count = 0
for item in list(self.ovf.virtual_hw_section):
if (item.tag == self.ovf.ITEM or
item.tag == self.ovf.STORAGE_ITEM or
item.tag == self.ovf.ETHERNET_PORT_ITEM):
self.ovf.virtual_hw_section.remove(item)
delete_count += 1
logger.debug("Cleared %d existing items from VirtualHWSection",
delete_count)
# Generate the new XML Items, in appropriately sorted order by Instance
ordering = [self.ovf.INFO, self.ovf.SYSTEM, self.ovf.ITEM]
for instance in natural_sort(self.item_dict):
logger.debug("Writing Item(s) with InstanceID %s", instance)
ovfitem = self.item_dict[instance]
new_items = ovfitem.generate_items()
logger.spam("Generated %d items", len(new_items))
for item in new_items:
XML.add_child(self.ovf.virtual_hw_section, item, ordering)
logger.verbose("Updated XML VirtualHardwareSection, now contains %d "
"Items representing %d devices",
len(self.ovf.virtual_hw_section.findall(self.ovf.ITEM)),
len(self.item_dict))
def find_unused_instance_id(self, start=1):
"""Find the first available ``InstanceID`` number.
Args:
start (int): First InstanceID value to consider (disregarding all
lower InstanceIDs, even if available).
Returns:
str: An instance ID that is not yet in use.
"""
instance = int(start)
while str(instance) in self.item_dict.keys():
instance += 1
logger.debug("Found unused InstanceID %d", instance)
return str(instance)
def new_item(self, resource_type, profile_list=None):
"""Create a new OVFItem of the given type.
Args:
resource_type (str): String such as 'cpu' or 'harddisk' - used as
a key to
:data:`~COT.vm_description.ovf.name_helper.OVFNameHelper1.RES_MAP`
profile_list (list): Profiles the new item should belong to
Returns:
tuple: ``(instance_id, ovfitem)``
"""
instance = self.find_unused_instance_id()
ovfitem = OVFItem(self.ovf)
ovfitem.set_property(self.ovf.INSTANCE_ID, instance, profile_list)
ovfitem.set_property(self.ovf.RESOURCE_TYPE,
self.ovf.RES_MAP[resource_type],
profile_list)
# ovftool freaks out if we leave out the ElementName on an Item,
# so provide a simple default value.
ovfitem.set_property(self.ovf.ELEMENT_NAME, resource_type,
profile_list)
self.item_dict[instance] = ovfitem
ovfitem.modified = True
logger.info("Created new %s under profile(s) %s, InstanceID is %s",
resource_type, profile_list, instance)
return (instance, ovfitem)
def delete_item(self, item):
"""Delete the given Item from the hardware.
Args:
item (OVFItem): Item to delete
"""
instance = item.get_value(self.ovf.INSTANCE_ID)
if self.item_dict[instance] == item:
del self.item_dict[instance]
# TODO: error handling - currently a no-op if item not in item_dict
def clone_item(self, parent_item, profile_list):
"""Clone an OVFItem to create a new instance.
Args:
parent_item (OVFItem): Instance to clone from
profile_list (list): List of profiles to clone into
Returns:
tuple: ``(instance_id, ovfitem)``
"""
instance = self.find_unused_instance_id(start=parent_item.instance_id)
logger.spam("Cloning existing Item %s with new instance ID %s",
parent_item, instance)
ovfitem = copy.deepcopy(parent_item)
# Delete any profiles from the parent that we don't need now,
# otherwise we'll get an error when trying to set the instance ID
# on our clone due to self-inconsistency (#64).
for profile in self.ovf.config_profiles:
if ovfitem.has_profile(profile) and profile not in profile_list:
ovfitem.remove_profile(profile)
ovfitem.set_property(self.ovf.INSTANCE_ID, instance, profile_list)
ovfitem.modified = True
self.item_dict[instance] = ovfitem
logger.spam("Added clone of %s under %s, instance is %s",
parent_item, profile_list, instance)
return (instance, ovfitem)
def item_match(self, item, resource_type, properties, profile_list):
"""Check whether the given item matches the given filters.
Args:
item (OVFItem): Item to validate
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile_list (list): List of profiles to filter on
Returns:
bool: True if the item matches all filters, False if not.
"""
if resource_type and (self.ovf.RES_MAP[resource_type] !=
item.get_value(self.ovf.RESOURCE_TYPE)):
return False
if profile_list:
for profile in profile_list:
if not item.has_profile(profile):
return False
for (prop, value) in properties.items():
if item.get_value(prop) != value:
return False
return True
def find_all_items(self, resource_type=None, properties=None,
profile_list=None):
"""Find all items matching the given type, properties, and profiles.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile_list (list): List of profiles to filter on
Returns:
list: Matching OVFItem instances
"""
items = [self.item_dict[instance] for instance in
natural_sort(self.item_dict)]
filtered_items = []
if properties is None:
properties = {}
for item in items:
if self.item_match(item, resource_type, properties, profile_list):
filtered_items.append(item)
logger.spam("Found %s Items of type %s with properties %s and"
" profiles %s", len(filtered_items), resource_type,
properties, profile_list)
return filtered_items
def find_item(self, resource_type=None, properties=None, profile=None):
"""Find the only OVFItem of the given :attr:`resource_type`.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
properties (dict): Properties and their values to match
profile (str): Single profile ID to search within
Returns:
OVFItem: Matching instance, or None
Raises:
LookupError: if more than one such Item exists.
"""
matches = self.find_all_items(resource_type, properties, [profile])
if len(matches) > 1:
raise LookupError(
"Found multiple matching '{0}' Items (instances {1})"
.format(resource_type, [m.instance_id for m in matches]))
elif len(matches) == 0:
return None
else:
return matches[0]
def get_item_count(self, resource_type, profile):
"""Get the number of Items of the given type for the given profile.
Wrapper for :meth:`get_item_count_per_profile`.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
profile (str): Single profile identifier string to look up.
Returns:
int: Number of items of this type in this profile.
"""
return (self.get_item_count_per_profile(resource_type, [profile])
[profile])
def get_item_count_per_profile(self, resource_type, profile_list):
"""Get the number of Items of the given type per profile.
Items present under "no profile" will be counted against
the total for each profile.
Args:
resource_type (str): Resource type string like 'scsi' or 'serial'
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
Returns:
dict: mapping profile strings to the number of items under each
profile.
"""
count_dict = {}
if not profile_list:
# Get the count under all profiles
profile_list = self.ovf.config_profiles + [None]
for profile in profile_list:
count_dict[profile] = 0
for ovfitem in self.find_all_items(resource_type):
for profile in profile_list:
if ovfitem.has_profile(profile):
count_dict[profile] += 1
for (profile, count) in count_dict.items():
logger.spam("Profile '%s' has %s %s Item(s)",
profile, count, resource_type)
return count_dict
def _update_existing_item_profiles(self, resource_type,
count, profile_list):
"""Change profile membership of existing items as needed.
Helper method for :meth:`set_item_count_per_profile`.
Args:
resource_type (str): 'cpu', 'harddisk', etc.
count (int): Desired number of items
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
Returns:
tuple: (count_dict, items_to_add, last_item)
"""
count_dict = self.get_item_count_per_profile(resource_type,
profile_list)
items_seen = dict.fromkeys(profile_list, 0)
last_item = None
# First, iterate over existing Items.
# Once we've seen "count" items under a profile, remove all subsequent
# items from this profile.
# If we don't have enough items under a profile, add any items found
# under other profiles to this profile as well.
for ovfitem in self.find_all_items(resource_type):
last_item = ovfitem
for profile in profile_list:
if ovfitem.has_profile(profile):
if items_seen[profile] >= count:
# Too many items - remove this one!
ovfitem.remove_profile(profile)
else:
items_seen[profile] += 1
else:
if count_dict[profile] < count:
# Add this profile to this Item
ovfitem.add_profile(profile)
count_dict[profile] += 1
items_seen[profile] += 1
# How many new Items do we need to create in total?
items_to_add = 0
for profile in profile_list:
delta = count - items_seen[profile]
if delta > items_to_add:
items_to_add = delta
return count_dict, items_to_add, last_item
def _update_cloned_item(self, new_item, new_item_profiles, item_count):
"""Update a cloned item to make it distinct from its parent.
Helper method for :meth:`set_item_count_per_profile`.
Args:
new_item (OVFItem): Newly cloned Item
new_item_profiles (list): Profiles new_item should belong to
item_count (int): How many Items of this type (including this
item) now exist. Used with
:meth:`COT.platform.Platform.guess_nic_name`
Returns:
OVFItem: Updated :param:`new_item`
Raises:
NotImplementedError: No support yet for updating ``Address``
NotImplementedError: If updating ``AddressOnParent`` but the
prior value varies across config profiles.
NotImplementedError: if ``AddressOnParent`` is not an integer.
"""
resource_type = new_item.hardware_type
address = new_item.get(self.ovf.ADDRESS)
if address:
raise NotImplementedError("Don't know how to ensure a unique "
"Address value when cloning an Item "
"of type {0}".format(resource_type))
address_on_parent = new_item.get(self.ovf.ADDRESS_ON_PARENT)
if address_on_parent:
address_list = new_item.get_all_values(self.ovf.ADDRESS_ON_PARENT)
if len(address_list) > 1:
raise NotImplementedError("AddressOnParent is not common "
"across all profiles but has "
"multiple values {0}. COT can't "
"handle this yet."
.format(address_list))
address_on_parent = address_list[0]
# Currently we only handle integer addresses
try:
address_on_parent = int(address_on_parent)
address_on_parent += 1
new_item.set_property(self.ovf.ADDRESS_ON_PARENT,
str(address_on_parent),
new_item_profiles)
except ValueError:
raise NotImplementedError("Don't know how to ensure a "
"unique AddressOnParent value "
"given base value '{0}'"
.format(address_on_parent))
if resource_type == 'ethernet':
# Update ElementName to reflect the NIC number
element_name = self.ovf.platform.guess_nic_name(item_count)
new_item.set_property(self.ovf.ELEMENT_NAME, element_name,
new_item_profiles)
return new_item
def set_item_count_per_profile(self, resource_type, count, profile_list):
"""Set the number of items of a given type under the given profile(s).
If the new count is greater than the current count under this
profile, then additional instances that already exist under
another profile will be added to this profile, starting with
the lowest-sequence instance not already present, and only as
a last resort will new instances be created.
If the new count is less than the current count under this profile,
then the highest-numbered instances will be removed preferentially.
Args:
resource_type (str): 'cpu', 'harddisk', etc.
count (int): Desired number of items
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
"""
if not profile_list:
# Set the profile list for all profiles, including the default
profile_list = self.ovf.config_profiles + [None]
count_dict, items_to_add, last_item = \
self._update_existing_item_profiles(
resource_type, count, profile_list)
logger.debug("Creating %d new items", items_to_add)
while items_to_add > 0:
# Which profiles does this Item need to belong to?
new_item_profiles = []
for profile in profile_list:
if count_dict[profile] < count:
new_item_profiles.append(profile)
count_dict[profile] += 1
if last_item is None:
logger.notice("No existing items of type %s found. "
"Will create new %s from scratch.",
resource_type, resource_type)
(_, new_item) = self.new_item(resource_type, new_item_profiles)
else:
(_, new_item) = self.clone_item(last_item, new_item_profiles)
# Check/update other properties of the clone that should be unique:
# TODO - we assume that the count is the same across profiles
new_item = self._update_cloned_item(
new_item, new_item_profiles, count_dict[new_item_profiles[0]])
last_item = new_item
items_to_add -= 1
def set_value_for_all_items(self, resource_type, prop_name, new_value,
profile_list, create_new=False):
"""Set a property to the given value for all items of the given type.
If no items of the given type exist, will create a new ``Item`` if
:attr:`create_new` is set to ``True``; otherwise will log a warning
and do nothing.
Args:
resource_type (str): Resource type such as 'cpu' or 'harddisk'
prop_name (str): Property name to update
new_value (str): New value to set the property to
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
create_new (bool): Whether to create a new entry if no items
of this :attr:`resource_type` presently exist.
"""
ovfitem_list = self.find_all_items(resource_type)
if not ovfitem_list:
if not create_new:
logger.warning("No items of type %s found. Nothing to do.",
resource_type)
return
logger.notice("No existing items of type %s found. "
"Will create new %s from scratch.",
resource_type, resource_type)
(_, ovfitem) = self.new_item(resource_type, profile_list)
ovfitem_list = [ovfitem]
for ovfitem in ovfitem_list:
ovfitem.set_property(prop_name, new_value, profile_list)
logger.debug("Updated %s %s to %s under profiles %s",
resource_type, prop_name, new_value, profile_list)
def set_item_values_per_profile(self, resource_type, prop_name, value_list,
profile_list, default=None):
"""Set value(s) for a property of multiple items of a type.
Args:
resource_type (str): Device type such as 'harddisk' or 'cpu'
prop_name (str): Property name to update
value_list (list): List of values to set (one value per item of the
given :attr:`resource_type`)
profile_list (list): List of profiles to filter on
(default: apply across all profiles)
default (str): If there are more matching items than entries in
:attr:`value_list`, set extra items to this value
"""
if profile_list is None:
profile_list = self.ovf.config_profiles + [None]
for ovfitem in self.find_all_items(resource_type):
if len(value_list):
new_value = value_list.pop(0)
else:
new_value = default
for profile in profile_list:
if ovfitem.has_profile(profile):
ovfitem.set_property(prop_name, new_value, [profile])
logger.info("Updated %s property %s to %s under %s",
resource_type, prop_name, new_value, profile_list)
if len(value_list):
logger.warning("After scanning all known %s Items, not all "
"%s values were used - leftover %s",
resource_type, prop_name, value_list)
|
mit
| -7,742,814,765,775,915,000
| 41.603175
| 79
| 0.573812
| false
|
HiSPARC/station-software
|
user/python/Lib/lib-tk/ttk.py
|
2
|
56173
|
"""Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import Tkinter
from Tkinter import _flatten, _join, _stringify, _splitdict
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if Tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optvalue(value, script=False):
"""Internal function."""
if script:
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
value = _stringify(value)
elif isinstance(value, (list, tuple)):
value = _join(value)
return value
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
opts = []
for opt, value in optdict.iteritems():
if not ignore or opt not in ignore:
opts.append("-%s" % opt)
if value is not None:
opts.append(_format_optvalue(value, script))
return _flatten(opts)
def _mapdict_values(items):
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
# E.g. (script=False):
# [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]
# returns:
# ['active selected', 'grey', 'focus', [1, 2, 3, 4]]
opt_val = []
for item in items:
state = item[:-1]
val = item[-1]
# hacks for bakward compatibility
state[0] # raise IndexError if empty
if len(state) == 1:
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or ''
else:
# group multiple states
state = ' '.join(state) # raise TypeError if not str
opt_val.append(state)
if val is not None:
opt_val.append(val)
return opt_val
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
opts = []
for opt, value in mapdict.iteritems():
opts.extend(("-%s" % opt,
_format_optvalue(_mapdict_values(value), script)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _join(_mapdict_values(args[1:]))
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _join(_mapdict_values(args[2:]))
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (_format_optvalue(args[1], script),)
if script:
spec = '{%s}' % spec
opts = ' '.join(opts)
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't have to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(_format_optdict(opts, True, ("children",)))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.iteritems():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(_format_optdict(opts['configure'], True))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(_format_mapdict(opts['map'], True))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other word, but this one makes sense
else:
s, _ = _format_layoutlist(opts['layout'])
script.append("ttk::style layout %s {\n%s\n}" % (name, s))
if opts.get('element create'): # format 'element create'
eopts = opts['element create']
etype = eopts[0]
# find where args end, and where kwargs start
argc = 1 # etype was the first one
while argc < len(eopts) and not hasattr(eopts[argc], 'iteritems'):
argc += 1
elemargs = eopts[1:argc]
elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {}
spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw)
script.append("ttk::style element create %s %s %s %s" % (
name, etype, spec, opts))
return '\n'.join(script)
def _list_from_statespec(stuple):
"""Construct a list from the given statespec tuple according to the
accepted statespec accepted by _format_mapdict."""
nval = []
for val in stuple:
typename = getattr(val, 'typename', None)
if typename is None:
nval.append(val)
else: # this is a Tcl object
val = str(val)
if typename == 'StateSpec':
val = val.split()
nval.append(val)
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
def _list_from_layouttuple(tk, ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
ltuple = tk.splitlist(ltuple)
res = []
indx = 0
while indx < len(ltuple):
name = ltuple[indx]
opts = {}
res.append((name, opts))
indx += 1
while indx < len(ltuple): # grab name's options
opt, val = ltuple[indx:indx + 2]
if not opt.startswith('-'): # found next name
break
opt = opt[1:] # remove the '-' from the option
indx += 2
if opt == 'children':
val = _list_from_layouttuple(tk, val)
opts[opt] = val
return res
def _val_or_dict(tk, options, *args):
"""Format options then call Tk command with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If an option is
specified with the None value, the value for that option is returned.
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
res = tk.call(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
return _splitdict(tk, res, conv=_tclobj_to_py)
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
value = unicode(value)
try:
value = int(value)
except (ValueError, TypeError):
pass
return value
def _to_number(x):
if isinstance(x, str):
if '.' in x:
x = float(x)
else:
x = int(x)
return x
def _tclobj_to_py(val):
"""Return value converted from Tcl object to Python object."""
if val and hasattr(val, '__len__') and not isinstance(val, basestring):
if getattr(val[0], 'typename', None) == 'StateSpec':
val = _list_from_statespec(val)
else:
val = map(_convert_stringval, val)
elif hasattr(val, 'typename'): # some other (single) Tcl object
val = _convert_stringval(val)
return val
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
for opt, val in adict.items():
adict[opt] = _tclobj_to_py(val)
return adict
def setup_master(master=None):
"""If master is not None, itself is returned. If master is None,
the default master is returned if there is one, otherwise a new
master is created and returned.
If it is not allowed to use the default root and master is None,
RuntimeError is raised."""
if master is None:
if Tkinter._support_default_root:
master = Tkinter._default_root or Tkinter.Tk()
else:
raise RuntimeError(
"No master specified and Tkinter is "
"configured to not support default root")
return master
class Style(object):
"""Manipulate style database."""
_name = "ttk::style"
def __init__(self, master=None):
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
self.master = master
self.tk = self.master.tk
def configure(self, style, query_opt=None, **kw):
"""Query or sets the default value of the specified option(s) in
style.
Each key in kw is an option and each value is either a string or
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
return _val_or_dict(self.tk, kw, self._name, "configure", style)
def map(self, style, query_opt=None, **kw):
"""Query or sets dynamic values of the specified option(s) in
style.
Each key in kw is an option and each value should be a list or a
tuple (usually) containing statespecs grouped in tuples, or list,
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
return _list_from_statespec(self.tk.splitlist(
self.tk.call(self._name, "map", style, '-%s' % query_opt)))
return _splitdict(
self.tk,
self.tk.call(self._name, "map", style, *_format_mapdict(kw)),
conv=_tclobj_to_py)
def lookup(self, style, option, state=None, default=None):
"""Returns the value specified for option in style.
If state is specified it is expected to be a sequence of one
or more states. If the default argument is set, it is used as
a fallback value in case no specification for option is found."""
state = ' '.join(state) if state else ''
return self.tk.call(self._name, "lookup", style, '-%s' % option,
state, default)
def layout(self, style, layoutspec=None):
"""Define the widget layout for given style. If layoutspec is
omitted, return the layout specification for given style.
layoutspec is expected to be a list or an object different than
None that evaluates to False if you want to "turn off" that style.
If it is a list (or tuple, or something else), each item should be
a tuple where the first item is the layout name and the second item
should have the format described below:
LAYOUTS
A layout can contain the value None, if takes no options, or
a dict of options specifying how to arrange the element.
The layout mechanism uses a simplified version of the pack
geometry manager: given an initial cavity, each element is
allocated a parcel. Valid options/values are:
side: whichside
Specifies which side of the cavity to place the
element; one of top, right, bottom or left. If
omitted, the element occupies the entire cavity.
sticky: nswe
Specifies where the element is placed inside its
allocated parcel.
children: [sublayout... ]
Specifies a list of elements to place inside the
element. Each element is a tuple (or other sequence)
where the first item is the layout name, and the other
is a LAYOUT."""
lspec = None
if layoutspec:
lspec = _format_layoutlist(layoutspec)[0]
elif layoutspec is not None: # will disable the layout ({}, '', etc)
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
return _list_from_layouttuple(self.tk,
self.tk.call(self._name, "layout", style, lspec))
def element_create(self, elementname, etype, *args, **kw):
"""Create a new element in the current theme of given etype."""
spec, opts = _format_elemcreate(etype, False, *args, **kw)
self.tk.call(self._name, "element", "create", elementname, etype,
spec, *opts)
def element_names(self):
"""Returns the list of elements defined in the current theme."""
return self.tk.splitlist(self.tk.call(self._name, "element", "names"))
def element_options(self, elementname):
"""Return the list of elementname's options."""
return self.tk.splitlist(self.tk.call(self._name, "element", "options", elementname))
def theme_create(self, themename, parent=None, settings=None):
"""Creates a new theme.
It is an error if themename already exists. If parent is
specified, the new theme will inherit styles, elements and
layouts from the specified parent theme. If settings are present,
they are expected to have the same syntax used for theme_settings."""
script = _script_from_settings(settings) if settings else ''
if parent:
self.tk.call(self._name, "theme", "create", themename,
"-parent", parent, "-settings", script)
else:
self.tk.call(self._name, "theme", "create", themename,
"-settings", script)
def theme_settings(self, themename, settings):
"""Temporarily sets the current theme to themename, apply specified
settings and then restore the previous theme.
Each key in settings is a style and each value may contain the
keys 'configure', 'map', 'layout' and 'element create' and they
are expected to have the same format as specified by the methods
configure, map, layout and element_create respectively."""
script = _script_from_settings(settings)
self.tk.call(self._name, "theme", "settings", themename, script)
def theme_names(self):
"""Returns a list of all known themes."""
return self.tk.splitlist(self.tk.call(self._name, "theme", "names"))
def theme_use(self, themename=None):
"""If themename is None, returns the theme in use, otherwise, set
the current theme to themename, refreshes all widgets and emits
a <<ThemeChanged>> event."""
if themename is None:
# Starting on Tk 8.6, checking this global is no longer needed
# since it allows doing self.tk.call(self._name, "theme", "use")
return self.tk.eval("return $ttk::currentTheme")
# using "ttk::setTheme" instead of "ttk::style theme use" causes
# the variable currentTheme to be updated, also, ttk::setTheme calls
# "ttk::style theme use" in order to change theme.
self.tk.call("ttk::setTheme", themename)
class Widget(Tkinter.Widget):
"""Base class for Tk themed widgets."""
def __init__(self, master, widgetname, kw=None):
"""Constructs a Ttk Widget with the parent master.
STANDARD OPTIONS
class, cursor, takefocus, style
SCROLLABLE WIDGET OPTIONS
xscrollcommand, yscrollcommand
LABEL WIDGET OPTIONS
text, textvariable, underline, image, compound, width
WIDGET STATES
active, disabled, focus, pressed, selected, background,
readonly, alternate, invalid
"""
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
Tkinter.Widget.__init__(self, master, widgetname, kw=kw)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the empty
string if the point does not lie within any element.
x and y are pixel coordinates relative to the widget."""
return self.tk.call(self._w, "identify", x, y)
def instate(self, statespec, callback=None, *args, **kw):
"""Test the widget's state.
If callback is not specified, returns True if the widget state
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
ret = self.tk.getboolean(
self.tk.call(self._w, "instate", ' '.join(statespec)))
if ret and callback:
return callback(*args, **kw)
return ret
def state(self, statespec=None):
"""Modify or inquire widget state.
Widget state is returned if statespec is None, otherwise it is
set according to the statespec flags and then a new state spec
is returned indicating which flags were changed. statespec is
expected to be a sequence."""
if statespec is not None:
statespec = ' '.join(statespec)
return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec)))
class Button(Widget):
"""Ttk Button widget, displays a textual label and/or image, and
evaluates a command when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Button widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, default, width
"""
Widget.__init__(self, master, "ttk::button", kw)
def invoke(self):
"""Invokes the command associated with the button."""
return self.tk.call(self._w, "invoke")
class Checkbutton(Widget):
"""Ttk Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Checkbutton widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, Tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self._getints(self.tk.call(self._w, "bbox", index))
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return self.tk.getboolean(self.tk.call(self._w, "validate"))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
Entry.__init__(self, master, "ttk::combobox", **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
if newindex is None:
return self.tk.getint(self.tk.call(self._w, "current"))
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # Tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.getint(self.tk.call(self._w, "index", tab_id))
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.splitlist(self.tk.call(self._w, "tabs") or ())
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, Tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = Tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
"""If newpos is specified, sets the position of sash number index.
May adjust the positions of adjacent sashes to ensure that
positions are monotonically increasing. Sash positions are further
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
return self.tk.getint(self.tk.call(self._w, "sashpos", index, newpos))
PanedWindow = Panedwindow # Tkinter name compatibility
class Progressbar(Widget):
"""Ttk Progressbar widget shows the status of a long-running
operation. They can operate in two modes: determinate mode shows the
amount completed relative to the total amount of work to be done, and
indeterminate mode provides an animated display to let the user know
that something is happening."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Progressbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, length, mode, maximum, value, variable, phase
"""
Widget.__init__(self, master, "ttk::progressbar", kw)
def start(self, interval=None):
"""Begin autoincrement mode: schedules a recurring timer event
that calls method step every interval milliseconds.
interval defaults to 50 milliseconds (20 steps/second) if omitted."""
self.tk.call(self._w, "start", interval)
def step(self, amount=None):
"""Increments the value option by amount.
amount defaults to 1.0 if omitted."""
self.tk.call(self._w, "step", amount)
def stop(self):
"""Stop autoincrement mode: cancels any recurring timer event
initiated by start."""
self.tk.call(self._w, "stop")
class Radiobutton(Widget):
"""Ttk Radiobutton widgets are used in groups to show or change a
set of mutually-exclusive options."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Radiobutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, value, variable
"""
Widget.__init__(self, master, "ttk::radiobutton", kw)
def invoke(self):
"""Sets the option variable to the option value, selects the
widget, and invokes the associated command.
Returns the result of the command, or an empty string if
no command is specified."""
return self.tk.call(self._w, "invoke")
class Scale(Widget, Tkinter.Scale):
"""Ttk Scale widget is typically used to control the numeric value of
a linked variable that varies uniformly over some range."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scale with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, from, length, orient, to, value, variable
"""
Widget.__init__(self, master, "ttk::scale", kw)
def configure(self, cnf=None, **kw):
"""Modify or query scale options.
Setting a value for any of the "from", "from_" or "to" options
generates a <<RangeChanged>> event."""
if cnf:
kw.update(cnf)
Widget.configure(self, **kw)
if any(['from' in kw, 'from_' in kw, 'to' in kw]):
self.event_generate('<<RangeChanged>>')
def get(self, x=None, y=None):
"""Get the current value of the value option, or the value
corresponding to the coordinates x, y if they are specified.
x and y are pixel coordinates relative to the scale widget
origin."""
return self.tk.call(self._w, 'get', x, y)
class Scrollbar(Widget, Tkinter.Scrollbar):
"""Ttk Scrollbar controls the viewport of a scrollable widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scrollbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, orient
"""
Widget.__init__(self, master, "ttk::scrollbar", kw)
class Separator(Widget):
"""Ttk Separator widget displays a horizontal or vertical separator
bar."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Separator with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient
"""
Widget.__init__(self, master, "ttk::separator", kw)
class Sizegrip(Widget):
"""Ttk Sizegrip allows the user to resize the containing toplevel
window by pressing and dragging the grip."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Sizegrip with parent master.
STANDARD OPTIONS
class, cursor, state, style, takefocus
"""
Widget.__init__(self, master, "ttk::sizegrip", kw)
class Treeview(Widget, Tkinter.XView, Tkinter.YView):
"""Ttk Treeview widget displays a hierarchical collection of items.
Each item has a textual label, an optional image, and an optional list
of data values. The data values are displayed in successive columns
after the tree label."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Treeview with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand,
yscrollcommand
WIDGET-SPECIFIC OPTIONS
columns, displaycolumns, height, padding, selectmode, show
ITEM OPTIONS
text, image, values, open, tags
TAG OPTIONS
foreground, background, font, image
"""
Widget.__init__(self, master, "ttk::treeview", kw)
def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self._getints(self.tk.call(self._w, "bbox", item, column)) or ''
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
return self.tk.splitlist(
self.tk.call(self._w, "children", item or '') or ())
def set_children(self, item, *newchildren):
"""Replaces item's child with newchildren.
Children present in item that are not present in newchildren
are detached from tree. No items in newchildren may be an
ancestor of item."""
self.tk.call(self._w, "children", item, newchildren)
def column(self, column, option=None, **kw):
"""Query or modify the options for the specified column.
If kw is not given, returns a dict of the column option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "column", column)
def delete(self, *items):
"""Delete all specified items and all their descendants. The root
item may not be deleted."""
self.tk.call(self._w, "delete", items)
def detach(self, *items):
"""Unlinks all of the specified items from the tree.
The items and all of their descendants are still present, and may
be reinserted at another point in the tree, but will not be
displayed. The root item may not be detached."""
self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns True if the specified item is present in the tree,
False otherwise."""
return self.tk.getboolean(self.tk.call(self._w, "exists", item))
def focus(self, item=None):
"""If item is specified, sets the focus item to item. Otherwise,
returns the current focus item, or '' if there is none."""
return self.tk.call(self._w, "focus", item)
def heading(self, column, option=None, **kw):
"""Query or modify the heading options for the specified column.
If kw is not given, returns a dict of the heading option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
Valid options/values are:
text: text
The text to display in the column heading
image: image_name
Specifies an image to display to the right of the column
heading
anchor: anchor
Specifies how the heading text should be aligned. One of
the standard Tk anchor values
command: callback
A callback to be invoked when the heading label is
pressed.
To configure the tree column heading, call this with column = "#0" """
cmd = kw.get('command')
if cmd and not isinstance(cmd, basestring):
# callback not registered yet, do it now
kw['command'] = self.master.register(cmd, self._substitute)
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, 'heading', column)
def identify(self, component, x, y):
"""Returns a description of the specified component under the
point given by x and y, or the empty string if no such component
is present at that position."""
return self.tk.call(self._w, "identify", component, x, y)
def identify_row(self, y):
"""Returns the item ID of the item at position y."""
return self.identify("row", 0, y)
def identify_column(self, x):
"""Returns the data column identifier of the cell at position x.
The tree column has ID #0."""
return self.identify("column", x, 0)
def identify_region(self, x, y):
"""Returns one of:
heading: Tree heading area.
separator: Space between two columns headings;
tree: The tree area.
cell: A data cell.
* Availability: Tk 8.6"""
return self.identify("region", x, y)
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
return self.tk.getint(self.tk.call(self._w, "index", item))
def insert(self, parent, index, iid=None, **kw):
"""Creates a new item and return the item identifier of the newly
created item.
parent is the item ID of the parent item, or the empty string
to create a new top-level item. index is an integer, or the value
end, specifying where in the list of parent's children to insert
the new item. If index is less than or equal to zero, the new node
is inserted at the beginning, if index is greater than or equal to
the current number of children, it is inserted at the end. If iid
is specified, it is used as the item identifier, iid must not
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
if iid is not None:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
res = self.tk.call(self._w, "insert", parent, index, *opts)
return res
def item(self, item, option=None, **kw):
"""Query or modify the options for the specified item.
If no options are given, a dict with options/values for the item
is returned. If option is specified then the value for that option
is returned. Otherwise, sets the options to the corresponding
values as given by kw."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "item", item)
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children.
It is illegal to move an item under one of its descendants. If
index is less than or equal to zero, item is moved to the
beginning, if greater than or equal to the number of children,
it is moved to the end. If item was detached it is reattached."""
self.tk.call(self._w, "move", item, parent, index)
reattach = move # A sensible method name for reattaching detached items
def next(self, item):
"""Returns the identifier of item's next sibling, or '' if item
is the last child of its parent."""
return self.tk.call(self._w, "next", item)
def parent(self, item):
"""Returns the ID of the parent of item, or '' if item is at the
top level of the hierarchy."""
return self.tk.call(self._w, "parent", item)
def prev(self, item):
"""Returns the identifier of item's previous sibling, or '' if
item is the first child of its parent."""
return self.tk.call(self._w, "prev", item)
def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item)
def selection(self, selop=None, items=None):
"""If selop is not specified, returns selected items."""
if isinstance(items, basestring):
items = (items,)
return self.tk.splitlist(self.tk.call(self._w, "selection", selop, items))
def selection_set(self, items):
"""items becomes the new selection."""
self.selection("set", items)
def selection_add(self, items):
"""Add items to the selection."""
self.selection("add", items)
def selection_remove(self, items):
"""Remove items from the selection."""
self.selection("remove", items)
def selection_toggle(self, items):
"""Toggle the selection state of each item in items."""
self.selection("toggle", items)
def set(self, item, column=None, value=None):
"""Query or set the value of given item.
With one argument, return a dictionary of column/value pairs
for the specified item. With two arguments, return the current
value of the specified column. With three arguments, set the
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
return _splitdict(self.tk, res,
cut_minus=False, conv=_tclobj_to_py)
else:
return res
def tag_bind(self, tagname, sequence=None, callback=None):
"""Bind a callback for the given event sequence to the tag tagname.
When an event is delivered to an item, the callbacks for each
of the item's tags option are called."""
self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0)
def tag_configure(self, tagname, option=None, **kw):
"""Query or modify the options for the specified tagname.
If kw is not given, returns a dict of the option settings for tagname.
If option is specified, returns the value for that option for the
specified tagname. Otherwise, sets the options to the corresponding
values for the given tagname."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tag", "configure",
tagname)
def tag_has(self, tagname, item=None):
"""If item is specified, returns 1 or 0 depending on whether the
specified item has the given tagname. Otherwise, returns a list of
all items which have the specified tag.
* Availability: Tk 8.6"""
if item is None:
return self.tk.splitlist(
self.tk.call(self._w, "tag", "has", tagname))
else:
return self.tk.getboolean(
self.tk.call(self._w, "tag", "has", tagname, item))
# Extensions
class LabeledScale(Frame, object):
"""A Ttk Scale widget with a Ttk Label widget indicating its
current value.
The Ttk Scale can be accessed through instance.scale, and Ttk Label
can be accessed through instance.label"""
def __init__(self, master=None, variable=None, from_=0, to=10, **kw):
"""Construct a horizontal LabeledScale with parent master, a
variable to be associated with the Ttk Scale widget and its range.
If variable is not specified, a Tkinter.IntVar is created.
WIDGET-SPECIFIC OPTIONS
compound: 'top' or 'bottom'
Specifies how to display the label relative to the scale.
Defaults to 'top'.
"""
self._label_top = kw.pop('compound', 'top') == 'top'
Frame.__init__(self, master, **kw)
self._variable = variable or Tkinter.IntVar(master)
self._variable.set(from_)
self._last_valid = from_
self.label = Label(self)
self.scale = Scale(self, variable=self._variable, from_=from_, to=to)
self.scale.bind('<<RangeChanged>>', self._adjust)
# position scale and label according to the compound option
scale_side = 'bottom' if self._label_top else 'top'
label_side = 'top' if scale_side == 'bottom' else 'bottom'
self.scale.pack(side=scale_side, fill='x')
tmp = Label(self).pack(side=label_side) # place holder
self.label.place(anchor='n' if label_side == 'top' else 's')
# update the label as scale or variable changes
self.__tracecb = self._variable.trace_variable('w', self._adjust)
self.bind('<Configure>', self._adjust)
self.bind('<Map>', self._adjust)
def destroy(self):
"""Destroy this widget and possibly its associated variable."""
try:
self._variable.trace_vdelete('w', self.__tracecb)
except AttributeError:
# widget has been destroyed already
pass
else:
del self._variable
Frame.destroy(self)
self.label = None
self.scale = None
def _adjust(self, *args):
"""Adjust the label position according to the scale."""
def adjust_label():
self.update_idletasks() # "force" scale redraw
x, y = self.scale.coords()
if self._label_top:
y = self.scale.winfo_y() - self.label.winfo_reqheight()
else:
y = self.scale.winfo_reqheight() + self.label.winfo_reqheight()
self.label.place_configure(x=x, y=y)
from_ = _to_number(self.scale['from'])
to = _to_number(self.scale['to'])
if to < from_:
from_, to = to, from_
newval = self._variable.get()
if not from_ <= newval <= to:
# value outside range, set value back to the last valid one
self.value = self._last_valid
return
self._last_valid = newval
self.label['text'] = newval
self.after_idle(adjust_label)
def _get_value(self):
"""Return current scale value."""
return self._variable.get()
def _set_value(self, val):
"""Set new scale value."""
self._variable.set(val)
value = property(_get_value, _set_value)
class OptionMenu(Menubutton):
"""Themed OptionMenu, based after Tkinter's OptionMenu, which allows
the user to select a value from a menu."""
def __init__(self, master, variable, default=None, *values, **kwargs):
"""Construct a themed OptionMenu widget with master as the parent,
the resource textvariable set to variable, the initially selected
value specified by the default parameter, the menu values given by
*values and additional keywords.
WIDGET-SPECIFIC OPTIONS
style: stylename
Menubutton style.
direction: 'above', 'below', 'left', 'right', or 'flush'
Menubutton direction.
command: callback
A callback that will be invoked after selecting an item.
"""
kw = {'textvariable': variable, 'style': kwargs.pop('style', None),
'direction': kwargs.pop('direction', None)}
Menubutton.__init__(self, master, **kw)
self['menu'] = Tkinter.Menu(self, tearoff=False)
self._variable = variable
self._callback = kwargs.pop('command', None)
if kwargs:
raise Tkinter.TclError('unknown option -%s' % (
kwargs.iterkeys().next()))
self.set_menu(default, *values)
def __getitem__(self, item):
if item == 'menu':
return self.nametowidget(Menubutton.__getitem__(self, item))
return Menubutton.__getitem__(self, item)
def set_menu(self, default=None, *values):
"""Build a new menu of radiobuttons with *values and optionally
a default value."""
menu = self['menu']
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
command=Tkinter._setit(self._variable, val, self._callback),
variable=self._variable)
if default:
self._variable.set(default)
def destroy(self):
"""Destroy this widget and its associated variable."""
try:
del self._variable
except AttributeError:
pass
Menubutton.destroy(self)
|
gpl-3.0
| 4,497,026,299,902,996,000
| 33.461963
| 93
| 0.607747
| false
|
Cryptoc1/sotheresthisgirl
|
tweet.py
|
1
|
1999
|
#!/usr/bin/env python
# Shitty code, deal with it.
import twitter
import time, sys
def main():
consumer_key = "v8xVBZlXBp2AJoWI3VjDjzDkC"
consumer_secret = "Wpoom4N6tpTgfywzCt6y83gvZubbYoT0vL0V8FXzhyXA74218D"
api = twitter.Api(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key="3200038924-6twEw6XbQ19ibc8Fnt7qI8blFEkSNI5BCqFnPL3",
access_token_secret="bgqS52Hcg53PXhX5qrk3z5k5oK7F6rRg3yIQKzzZO9iXd")
_skip = " [sotheresthisgirl] skipping tweet by: @"
_al_fav = " (most likely already favorited it)"
_faved = " [sotheresthisgirl] faved tweet by: @"
_post = " [sotheresthisgirl] posted update"
# i = 1
# j = 1
k = int(sys.argv[1])
l = int(sys.argv[2])
while True:
# we already finished this off, so don't need it anymore
'''# should post updates with spaces from 0 to 123 spaces
if i < 123:
tmp = "." * i
api.PostUpdate(tmp + "#sotheresthisgirl")
api.PostUpdate("#sotheresthisgirl" + tmp)
print _post
# should post updates with spaces from 0 to 120 spaces
if j < 120:
tmp = "." * j
api.PostUpdate(tmp + "so there's this girl")
api.PostUpdate("so there's this girl" + tmp)
print _post'''
# should print numbers until the number is 122 (becuase there's a space between the number and the hashtag) digits long
if len(str(k)) < 122:
api.PostUpdate("#sotheresthisgirl " + str(k))
print _post
# should post update until the number is 119 (because the space between, yada, yada..) digits long, yada, yada..."
if len(str(l)) < 119:
api.PostUpdate("so there's this girl " + str(l))
print _post
# Waits ~10mins (if my math is correct)
time.sleep(200)
# i += 1
# j += 1
k += 1
l += 1
if __name__ == "__main__":
main()
|
mit
| 1,954,938,939,408,985,300
| 34.070175
| 127
| 0.584792
| false
|
simone-campagna/rubik
|
testing/rubik_testing/tests/test_program/rubik_test_interface.py
|
1
|
13352
|
#!/usr/bin/env python3
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'RubikTestInterface',
]
import os
import numpy as np
from rubik.conf import VERSION
from rubik.shape import Shape
from rubik.application import logo
from rubik.cubes import api as cb
from ...rubik_test_case import testmethod
from ...rubik_test_program import RubikTestProgram
class RubikTestInterface(RubikTestProgram):
METHOD_NAMES = []
@testmethod
def help(self):
returncode, output, error = self.run_program("--help")
self.assertEqual(returncode, 0)
@testmethod
def usage(self):
returncode, output, error = self.run_program("--usage")
self.assertEqual(returncode, 0)
@testmethod
def logo(self):
returncode, output, error = self.run_program("--logo")
self.assertEqual(returncode, 0)
self.assertEqual(output, "{}\n".format(logo.RUBIK))
@testmethod
def version(self):
returncode, output, error = self.run_program("--version")
self.assertEqual(returncode, 0)
self.assertEqual(output, "rubik {}\n".format(VERSION))
@testmethod
def dry_run(self):
returncode, output, error = self.run_program("-i non_existent.tmp1 -s 4x6 -o non_existent.tmp2 --dry-run")
self.assertEqual(returncode, 0)
@testmethod
def report_dry_run(self):
returncode, output, error = self.run_program("-i non_existent.tmp1 -s 4x6 -o non_existent.tmp2 --dry-run --report")
self.assertEqual(returncode, 0)
@testmethod
def histogram_number(self):
returncode, output, error = self.run_program("-e 'cb.random_cube((4, 5))' --histogram")
self.assertEqual(returncode, 0)
@testmethod
def histogram_percentage(self):
returncode, output, error = self.run_program("-e 'cb.random_cube((4, 5))' --histogram --histogram-percentage")
self.assertEqual(returncode, 0)
@testmethod
def histogram_bins_8(self):
returncode, output, error = self.run_program("-e 'cb.random_cube((4, 5))' --histogram --histogram-bins=8 --histogram-range 0.1 0.9")
self.assertEqual(returncode, 0)
@testmethod
def help_expression(self):
returncode, output, error = self.run_program("--help-expression")
self.assertEqual(returncode, 0)
@testmethod
def help_extractor(self):
returncode, output, error = self.run_program("--help-extractor")
self.assertEqual(returncode, 0)
@testmethod
def help_user_defined_variables(self):
returncode, output, error = self.run_program("--help-user-defined-variables")
self.assertEqual(returncode, 0)
# @testmethod
# def help_numpy(self):
# returncode, output, error = self.run_program("--help-numpy")
# self.assertEqual(returncode, 0)
#
# @testmethod
# def help_cubes(self):
# returncode, output, error = self.run_program("--help-cubes")
# self.assertEqual(returncode, 0)
@testmethod
def help_filenames(self):
returncode, output, error = self.run_program("--help-filenames")
self.assertEqual(returncode, 0)
@testmethod
def help_split(self):
returncode, output, error = self.run_program("--help-split")
self.assertEqual(returncode, 0)
@testmethod
def help_environment_variables(self):
returncode, output, error = self.run_program("--help-environment-variables")
self.assertEqual(returncode, 0)
@testmethod
def help_creating_cubes(self):
returncode, output, error = self.run_program("--help-creating-cubes")
self.assertEqual(returncode, 0)
@testmethod
def help_output(self):
returncode, output, error = self.run_program("--help-output")
self.assertEqual(returncode, 0)
@testmethod
def help_memory_usage(self):
returncode, output, error = self.run_program("--help-memory-usage")
self.assertEqual(returncode, 0)
@testmethod
def help_usage(self):
returncode, output, error = self.run_program("--help-usage")
self.assertEqual(returncode, 0)
# labeled options
def impl_labeled_options(self, shape, dtype, i0_label=None, i1_label=None, i2_label=None, o0_label=None, o1_label=None):
shape = Shape(shape)
dtype = cb.get_dtype(dtype)
file_format = 'raw'
i0_label_definition = ''
i1_label_definition = ''
i2_label_definition = ''
o0_label_definition = ''
o1_label_definition = ''
if i0_label is None:
i0_label = 'i0'
else:
i0_label_definition = '{}='.format(i0_label)
if i1_label is None:
i1_label = 'i1'
else:
i1_label_definition = '{}='.format(i1_label)
if i2_label is None:
i2_label = 'i2'
else:
i2_label_definition = '{}='.format(i2_label)
if o0_label is None:
o0_label = 'o0'
else:
o0_label_definition = '{}='.format(o0_label)
if o1_label is None:
o1_label = 'o1'
else:
o1_label_definition = '{}='.format(o1_label)
lc_filename_format = "lcube_{shape}_{dtype}.{format}"
lc_filename = lc_filename_format.format(shape=shape, dtype=dtype.__name__, format=file_format)
returncode, output, error = self.run_program(
"""-e 'cb.linear_cube("{s}")' -o {lc}""".format(
s=shape,
lc=lc_filename_format,
)
)
self.assertEqual(returncode, 0)
self.assertFileExistsAndHasShape(lc_filename, shape=shape, dtype=dtype)
rc_shape = Shape("100x{}".format(shape))
rc_extractor = "3," + ','.join(':' for d in shape)
rc_filename_format = "rcube_{shape}_{dtype}.{format}"
rc_filename = rc_filename_format.format(shape=rc_shape, dtype=dtype.__name__, format=file_format)
returncode, output, error = self.run_program(
"""-e 'cb.random_cube("{s}")' -o {rc}""".format(
s=rc_shape,
rc=rc_filename_format,
)
)
self.assertEqual(returncode, 0)
cc_filename_format = "ccube_{shape}_{dtype}.{format}"
cc_filename = cc_filename_format.format(shape=shape, dtype=dtype.__name__, format=file_format)
returncode, output, error = self.run_program(
"""-e 'cb.const_cube("{s}", value=0.2)' -o {cc}""".format(
s=shape,
cc=cc_filename_format,
)
)
self.assertEqual(returncode, 0)
self.assertFileExistsAndHasShape(cc_filename, shape=shape, dtype=dtype)
o0_filename_format = "o0cube_{shape}_{dtype}.{format}"
o0_file_format = 'text'
o0_filename = o0_filename_format.format(shape=shape, dtype=dtype.__name__, format=o0_file_format)
o1_filename_format = "o1cube_{shape}_{dtype}.{format}"
o1_file_format = 'csv'
o1_filename = o1_filename_format.format(shape=shape, dtype=dtype.__name__, format=o1_file_format)
command = """-i '{i0ld}{lc}' -i '{i1ld}{rc}' -i '{i2ld}{cc}' -s '{s}' -s '{i1l}={rs}' -x '{i1l}={rcx}' -e '{i0l} + {i1l}' -o '{o0ld}{o0}' -e '{i0l} - {i1l}' -o '{o1ld}{o1}' -Of '{o0l}={o0f}' -Of '{o1l}={o1f}'""".format(
s=shape,
rs=rc_shape,
lc=lc_filename_format,
rc=rc_filename_format,
cc=cc_filename_format,
o0=o0_filename_format,
o0f=o0_file_format,
o1=o1_filename_format,
o1f=o1_file_format,
rcx=rc_extractor,
i0l=i0_label,
i1l=i1_label,
i2l=i2_label,
o0l=o0_label,
o1l=o1_label,
i0ld=i0_label_definition,
i1ld=i1_label_definition,
i2ld=i2_label_definition,
o0ld=o0_label_definition,
o1ld=o1_label_definition,
)
returncode, output, error = self.run_program(command)
self.assertEqual(returncode, 0)
self.assertFileExists(o0_filename)
self.assertFileExists(o1_filename)
self.remove_files(rc_filename, lc_filename, cc_filename, o0_filename, o1_filename)
@testmethod
def labeled_options_4x5_float32(self, shape="4x5", dtype="float32"):
self.impl_labeled_options(shape=shape, dtype=dtype)
@testmethod
def labeled_options_4x5_float32_l_r_c_x_y(self, shape="4x5", dtype="float32"):
self.impl_labeled_options(shape=shape, dtype=dtype, i0_label='l', i1_label='r', i2_label='c', o0_label='x', o1_label='y')
def impl_expression_filename(self, shape, dtype, mode):
shape = Shape(shape)
dtype = cb.get_dtype(dtype)
file_format = 'raw'
out_filename_format = "outcube_{mode}_{{shape}}_{{dtype}}.{{format}}".format(mode=mode)
out_filename = out_filename_format.format(shape=shape, dtype=dtype.__name__, format=file_format)
expr_filename = "expr_{mode}.txt".format(mode=mode)
with open(expr_filename, "w") as f_out:
f_out.write("""\
cube = cb.linear_cube(shape="{s}", dtype="{d}")
cb.write_cube(file_format="{f}", cube=cube, file="{o}")
""".format(s=shape, d=dtype.__name__, o=out_filename_format, f=file_format))
if mode == "f_option":
command = "-f {e}".format(e=expr_filename)
else:
command = "-e '@{e}'".format(e=expr_filename)
returncode, output, error = self.run_program(command)
self.assertEqual(returncode, 0)
self.assertFileExistsAndHasShape(out_filename, shape=shape, dtype=dtype)
@testmethod
def expression_filename_4x5_float32_f_option(self):
self.impl_expression_filename(shape="4x5", dtype="float64", mode="f_option")
@testmethod
def expression_filename_8x3x2_float32_f_option(self):
self.impl_expression_filename(shape="8x3x2", dtype="float64", mode="f_option")
@testmethod
def expression_filename_4x5_float32_at_option(self):
self.impl_expression_filename(shape="4x5", dtype="float64", mode="at_option")
@testmethod
def expression_filename_8x3x2_float32_at_option(self):
self.impl_expression_filename(shape="8x3x2", dtype="float64", mode="at_option")
# view attributes
def impl_view_attribute(self, attribute_name, attribute_value):
returncode, output, error = self.run_program("--view-attribute {}={!r}".format(attribute_name, attribute_value))
self.assertEqual(returncode, 0)
@testmethod
def view_attribute_clip_symmetric(self):
self.impl_view_attribute("clip_symmetric", "True")
@testmethod
def view_attribute_x(self):
self.impl_view_attribute("x", "0.33")
# view attribute files
def impl_view_attributes(self, **attribute_dict):
filename = "view_attributes.txt"
try:
with open(filename, "w") as f_out:
for attribute_name, attribute_value in attribute_dict.items():
f_out.write("{}={!r}\n".format(attribute_name, attribute_value))
returncode, output, error = self.run_program("--view-attribute-file {}".format(filename))
self.assertEqual(returncode, 0)
finally:
os.remove(filename)
@testmethod
def view_attribute_file(self):
self.impl_view_attributes(clip_min=0.3, clip_symmetric=True, y=1.2)
# view list
@testmethod
def view_attribute_list(self):
returncode, output, error = self.run_program("--view-list")
## interface expressions
@testmethod
def read_cube(self):
a = np.array([[1.0, -1.3], [1.3, -0.2]], dtype=cb.get_default_dtype())
a.tofile("file_a.raw")
returncode, output, error = self.run_program("""-e 'read_cube(filename="file_a.raw", shape=("{s}"), dtype="{t!r}")' '_r.sum()' --print""".format(
s=Shape(a.shape),
t=cb.get_dtype_name(a.dtype),
))
self.assertEqual(returncode, 0)
v = float(output.strip())
self.assertAlmostEqual(v, a.sum())
@testmethod
def write_cube(self):
returncode, output, error = self.run_program("""-e '_r = cb.as_dtype(np.array([[1.0, -1.3], [1.3, -0.2]]))' -e 'write_cube(filename="file_b.raw", cube=_r)'""")
self.assertEqual(returncode, 0)
self.assertFileExistsAndHasShape("file_b.raw", Shape("2x2"))
@testmethod
def write_cube_default(self):
returncode, output, error = self.run_program("""-e '_r = cb.as_dtype(np.array([[1.0, -1.3], [1.3, -0.2]]))' -e 'write_cube(filename="file_c.raw")'""")
self.assertEqual(returncode, 0)
self.assertFileExistsAndHasShape("file_c.raw", Shape("2x2"))
|
apache-2.0
| -1,450,249,721,676,834,000
| 37.039886
| 227
| 0.607475
| false
|
sigmavirus24/pip
|
tasks/vendoring/__init__.py
|
1
|
3688
|
""""Vendoring script, python 3.5 needed"""
from pathlib import Path
import re
import shutil
import invoke
TASK_NAME = 'update'
FILE_WHITE_LIST = (
'Makefile',
'vendor.txt',
'__init__.py',
'README.rst',
)
def drop_dir(path):
shutil.rmtree(str(path))
def remove_all(paths):
for path in paths:
if path.is_dir():
drop_dir(path)
else:
path.unlink()
def log(msg):
print('[vendoring.%s] %s' % (TASK_NAME, msg))
def clean_vendor(ctx, vendor_dir):
# Old _vendor cleanup
remove_all(vendor_dir.glob('*.pyc'))
log('Cleaning %s' % vendor_dir)
for item in vendor_dir.iterdir():
if item.is_dir():
shutil.rmtree(str(item))
elif item.name not in FILE_WHITE_LIST:
item.unlink()
else:
log('Skipping %s' % item)
def rewrite_imports(package_dir, vendored_libs):
for item in package_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name.endswith('.py'):
rewrite_file_imports(item, vendored_libs)
def rewrite_file_imports(item, vendored_libs):
"""Rewrite 'import xxx' and 'from xxx import' for vendored_libs"""
text = item.read_text()
# Revendor pkg_resources.extern first
text = re.sub(r'pkg_resources.extern', r'pip._vendor', text)
for lib in vendored_libs:
text = re.sub(
r'(\n\s*)import %s' % lib,
r'\1from pip._vendor import %s' % lib,
text,
)
text = re.sub(
r'(\n\s*)from %s' % lib,
r'\1from pip._vendor.%s' % lib,
text,
)
item.write_text(text)
def apply_patch(ctx, patch_file_path):
log('Applying patch %s' % patch_file_path.name)
ctx.run('git apply %s' % patch_file_path)
def vendor(ctx, vendor_dir):
log('Reinstalling vendored libraries')
ctx.run(
'pip install -t {0} -r {0}/vendor.txt --no-compile'.format(
str(vendor_dir),
)
)
remove_all(vendor_dir.glob('*.dist-info'))
remove_all(vendor_dir.glob('*.egg-info'))
# Cleanup setuptools unneeded parts
(vendor_dir / 'easy_install.py').unlink()
drop_dir(vendor_dir / 'setuptools')
drop_dir(vendor_dir / 'pkg_resources' / '_vendor')
drop_dir(vendor_dir / 'pkg_resources' / 'extern')
# Drop interpreter and OS specific msgpack libs.
# Pip will rely on the python-only fallback instead.
remove_all(vendor_dir.glob('msgpack/*.so'))
# Detect the vendored packages/modules
vendored_libs = []
for item in vendor_dir.iterdir():
if item.is_dir():
vendored_libs.append(item.name)
elif item.name not in FILE_WHITE_LIST:
vendored_libs.append(item.name[:-3])
log("Detected vendored libraries: %s" % ", ".join(vendored_libs))
# Global import rewrites
log("Rewriting all imports related to vendored libs")
for item in vendor_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name not in FILE_WHITE_LIST:
rewrite_file_imports(item, vendored_libs)
# Special cases: apply stored patches
log("Apply patches")
patch_dir = Path(__file__).parent / 'patches'
for patch in patch_dir.glob('*.patch'):
apply_patch(ctx, patch)
@invoke.task(name=TASK_NAME)
def main(ctx):
git_root = Path(
ctx.run('git rev-parse --show-toplevel', hide=True).stdout.strip()
)
vendor_dir = git_root / 'pip' / '_vendor'
log('Using vendor dir: %s' % vendor_dir)
clean_vendor(ctx, vendor_dir)
vendor(ctx, vendor_dir)
log('Revendoring complete')
|
mit
| 8,446,935,301,462,685,000
| 26.729323
| 74
| 0.594902
| false
|
ctalbert/mozharness
|
mozharness/base/signing.py
|
1
|
6226
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Generic signing methods.
"""
import getpass
import hashlib
import os
import re
import subprocess
from mozharness.base.errors import JarsignerErrorList, ZipErrorList, ZipalignErrorList
from mozharness.base.log import OutputParser, IGNORE, DEBUG, INFO, ERROR, FATAL
UnsignApkErrorList = [{
'regex': re.compile(r'''zip warning: name not matched: '?META-INF/'''),
'level': INFO,
'explanation': r'''This apk is already unsigned.''',
},{
'substr': r'''zip error: Nothing to do!''',
'level': IGNORE,
}] + ZipErrorList
TestJarsignerErrorList = [{
"substr": "jarsigner: unable to open jar file:",
"level": IGNORE,
}] + JarsignerErrorList
# BaseSigningMixin {{{1
class BaseSigningMixin(object):
"""Generic signing helper methods.
"""
def query_filesize(self, file_path):
self.info("Determining filesize for %s" % file_path)
length = os.path.getsize(file_path)
self.info(" %s" % str(length))
return length
# TODO this should be parallelized with the to-be-written BaseHelper!
def query_sha512sum(self, file_path):
self.info("Determining sha512sum for %s" % file_path)
m = hashlib.sha512()
contents = self.read_from_file(file_path, verbose=False,
open_mode='rb')
m.update(contents)
sha512 = m.hexdigest()
self.info(" %s" % sha512)
return sha512
# AndroidSigningMixin {{{1
class AndroidSigningMixin(object):
"""
Generic Android apk signing methods.
Dependent on BaseScript.
"""
# TODO port build/tools/release/signing/verify-android-signature.sh here
key_passphrase = os.environ.get('android_keypass')
store_passphrase = os.environ.get('android_storepass')
def passphrase(self):
if not self.store_passphrase:
self.store_passphrase = getpass.getpass("Store passphrase: ")
if not self.key_passphrase:
self.key_passphrase = getpass.getpass("Key passphrase: ")
def _verify_passphrases(self, keystore, key_alias, error_level=FATAL):
self.info("Verifying passphrases...")
status = self.sign_apk("NOTAREALAPK", keystore,
self.store_passphrase, self.key_passphrase,
key_alias, remove_signature=False,
log_level=DEBUG, error_level=DEBUG,
error_list=TestJarsignerErrorList)
if status == 0:
self.info("Passphrases are good.")
elif status < 0:
self.log("Encountered errors while trying to sign!",
level=error_level)
else:
self.log("Unable to verify passphrases!",
level=error_level)
return status
def verify_passphrases(self):
c = self.config
self._verify_passphrases(c['keystore'], c['key_alias'])
def postflight_passphrase(self):
self.verify_passphrases()
def sign_apk(self, apk, keystore, storepass, keypass, key_alias,
remove_signature=True, error_list=None,
log_level=INFO, error_level=ERROR):
"""
Signs an apk with jarsigner.
"""
jarsigner = self.query_exe('jarsigner')
if remove_signature:
status = self.unsign_apk(apk)
if status:
self.error("Can't remove signature in %s!" % apk)
return -1
if error_list is None:
error_list = JarsignerErrorList[:]
# This needs to run silently, so no run_command() or
# get_output_from_command() (though I could add a
# suppress_command_echo=True or something?)
self.log("(signing %s)" % apk, level=log_level)
try:
p = subprocess.Popen([jarsigner, "-keystore", keystore,
"-storepass", storepass,
"-keypass", keypass,
apk, key_alias],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except OSError:
self.dump_exception("Error while signing %s (missing %s?):" % (apk, jarsigner))
return -2
except ValueError:
self.dump_exception("Popen called with invalid arguments during signing?")
return -3
parser = OutputParser(config=self.config, log_obj=self.log_obj,
error_list=error_list)
loop = True
while loop:
if p.poll() is not None:
"""Avoid losing the final lines of the log?"""
loop = False
for line in p.stdout:
parser.add_lines(line)
if parser.num_errors:
self.log("(failure)", level=error_level)
else:
self.log("(success)", level=log_level)
return parser.num_errors
def unsign_apk(self, apk, **kwargs):
zip_bin = self.query_exe("zip")
return self.run_command([zip_bin, apk, '-d', 'META-INF/*'],
error_list=UnsignApkErrorList,
success_codes=[0, 12],
return_type='num_errors', **kwargs)
def align_apk(self, unaligned_apk, aligned_apk, error_level=ERROR):
"""
Zipalign apk.
Returns None on success, not None on failure.
"""
dirs = self.query_abs_dirs()
zipalign = self.query_exe("zipalign")
if self.run_command([zipalign, '-f', '4',
unaligned_apk, aligned_apk],
return_type='num_errors',
cwd=dirs['abs_work_dir'],
error_list=ZipalignErrorList):
self.log("Unable to zipalign %s to %s!" % (unaligned_apk, aligned_apk), level=error_level)
return -1
|
mpl-2.0
| 5,053,648,421,619,437,000
| 36.506024
| 102
| 0.562319
| false
|
mdovgialo/steam-vr-wheel
|
steam_vr_wheel/pyvjoy/_wrapper.py
|
1
|
2789
|
import os
import sys
from ctypes import *
dll_filename = "vJoyInterface.dll"
dll_path = os.path.dirname(__file__) + os.sep + dll_filename
try:
_vj = cdll.LoadLibrary(dll_path)
except OSError:
sys.exit("Unable to load vJoy SDK DLL. Ensure that %s is present" % dll_filename)
def vJoyEnabled():
"""Returns True if vJoy is installed and enabled"""
result = _vj.vJoyEnabled()
if result == 0:
raise vJoyNotEnabledException()
else:
return True
def DriverMatch():
"""Check if the version of vJoyInterface.dll and the vJoy Driver match"""
result = _vj.DriverMatch()
if result == 0:
raise vJoyDriverMismatch()
else:
return True
def GetVJDStatus(rID):
"""Get the status of a given vJoy Device"""
return _vj.GetVJDStatus(rID)
def AcquireVJD(rID):
"""Attempt to acquire a vJoy Device"""
result = _vj.AcquireVJD(rID)
if result == 0:
#Check status
status = GetVJDStatus(rID)
if status != VJD_STAT_FREE:
raise vJoyFailedToAcquireException("Cannot acquire vJoy Device because it is not in VJD_STAT_FREE")
else:
raise vJoyFailedToAcquireException()
else:
return True
def RelinquishVJD(rID):
"""Relinquish control of a vJoy Device"""
result = _vj.RelinquishVJD(rID)
if result == 0:
raise vJoyFailedToRelinquishException()
else:
return True
def SetBtn(state,rID,buttonID):
"""Sets the state of vJoy Button to on or off. SetBtn(state,rID,buttonID)"""
result = _vj.SetBtn(state,rID,buttonID)
if result == 0:
raise vJoyButtonError()
else:
return True
def SetDiscPov(PovValue, rID, PovID):
"""Write Value to a given discrete POV defined in the specified VDJ"""
if PovValue < -1 or PovValue > 3:
raise vJoyInvalidPovValueException()
if PovID < 1 or PovID > 4:
raise vJoyInvalidPovIDException
return _vj.SetDiscPov(PovValue,rID,PovID)
def SetContPov(PovValue, rID, PovID):
"""Write Value to a given continuous POV defined in the specified VDJ"""
if PovValue < -1 or PovValue > 35999:
raise vJoyInvalidPovValueException()
if PovID < 1 or PovID > 4:
raise vJoyInvalidPovIDException
return _vj.SetContPov(PovValue,rID,PovID)
def SetBtn(state,rID,buttonID):
"""Sets the state of vJoy Button to on or off. SetBtn(state,rID,buttonID)"""
result = _vj.SetBtn(state,rID,buttonID)
if result == 0:
raise vJoyButtonError()
else:
return True
def ResetVJD(rID):
"""Reset all axes and buttons to default for specified vJoy Device"""
return _vj.ResetVJD(rID)
def ResetButtons(rID):
"""Reset all buttons to default for specified vJoy Device"""
return _vj.ResetButtons(rID)
def ResetPovs(rID):
"""Reset all POV hats to default for specified vJoy Device"""
return _vj.ResetButtons(rID)
|
mit
| 8,956,066,866,461,069,000
| 21.241667
| 102
| 0.688777
| false
|
excel-analytics/telegram_chat_bot
|
tg_bot/s2s.py
|
1
|
1734
|
from multiprocessing import Process, Queue
# from multiprocessing.queue import Empty
import time
import telepot
import yaml
from seq2seq.runner import decode
config = yaml.load(open('config.yml').read())
in_msg = Queue()
out_msg = Queue()
chat_id = config['chat_id']
reload_msg = '/reload'
def run_tg(bot):
bot.handle = handle
print('I am listening ...')
bot.message_loop()
while 1:
time.sleep(10)
def f(q_to, q_from):
decode(q_to, q_from)
def work_with_model(bot):
while 1:
q_to = Queue()
q_from = Queue()
p = Process(target=f, args=(q_to, q_from))
p.start()
init = q_from.get()
bot.sendMessage(chat_id, init)
while 1:
message = in_msg.get()
if message.startswith(reload_msg):
bot.sendMessage(chat_id, 'Wait a lot.')
break
q_to.put(message)
from_model = q_from.get()
out_msg.put(from_model)
p.terminate()
def handle(msg):
# print(msg)
if 'chat' not in msg:
return
if 'id' not in msg['chat']:
return
if msg['chat']['id'] != chat_id:
return
if 'text' in msg:
in_msg.put(msg['text'].lower())
# print(msg['text'].startswith(reload_msg))
if not msg['text'].startswith(reload_msg):
answer = out_msg.get()
if answer.strip() == '':
answer = '%NO_MSG%'
bot.sendMessage(chat_id, answer, reply_to_message_id=msg['message_id'])
# if __name__ == '__main__':
config = yaml.load(open('config.yml').read())
bot = telepot.Bot(config['telegram'])
p = Process(target=run_tg, args=(bot,))
p.start()
work_with_model(bot)
# p.join()
|
mit
| -9,018,540,958,632,976,000
| 22.753425
| 83
| 0.555363
| false
|
vedujoshi/tempest
|
tempest/api/volume/base.py
|
1
|
13803
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import api_microversion_fixture
from tempest.common import compute
from tempest.common import waiters
from tempest import config
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
import tempest.test
CONF = config.CONF
class BaseVolumeTest(api_version_utils.BaseMicroversionTest,
tempest.test.BaseTestCase):
"""Base test case class for all Cinder API tests."""
_api_version = 2
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BaseVolumeTest, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
if cls._api_version == 2:
if not CONF.volume_feature_enabled.api_v2:
msg = "Volume API v2 is disabled"
raise cls.skipException(msg)
elif cls._api_version == 3:
if not CONF.volume_feature_enabled.api_v3:
msg = "Volume API v3 is disabled"
raise cls.skipException(msg)
else:
msg = ("Invalid Cinder API version (%s)" % cls._api_version)
raise exceptions.InvalidConfiguration(msg)
api_version_utils.check_skip_with_microversion(
cls.min_microversion, cls.max_microversion,
CONF.volume.min_microversion, CONF.volume.max_microversion)
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(BaseVolumeTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(BaseVolumeTest, cls).setup_clients()
cls.servers_client = cls.os_primary.servers_client
if CONF.service_available.glance:
cls.images_client = cls.os_primary.image_client_v2
if cls._api_version == 3:
cls.backups_client = cls.os_primary.backups_v3_client
cls.volumes_client = cls.os_primary.volumes_v3_client
else:
cls.backups_client = cls.os_primary.backups_v2_client
cls.volumes_client = cls.os_primary.volumes_v2_client
cls.snapshots_client = cls.os_primary.snapshots_v2_client
cls.volumes_extension_client =\
cls.os_primary.volumes_v2_extension_client
cls.availability_zone_client = (
cls.os_primary.volume_v2_availability_zone_client)
cls.volume_limits_client = cls.os_primary.volume_v2_limits_client
cls.messages_client = cls.os_primary.volume_v3_messages_client
cls.versions_client = cls.os_primary.volume_v3_versions_client
cls.groups_client = cls.os_primary.groups_v3_client
cls.group_snapshots_client = cls.os_primary.group_snapshots_v3_client
def setUp(self):
super(BaseVolumeTest, self).setUp()
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
self.request_microversion))
@classmethod
def resource_setup(cls):
super(BaseVolumeTest, cls).resource_setup()
cls.request_microversion = (
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.volume.min_microversion))
cls.snapshots = []
cls.volumes = []
cls.image_ref = CONF.compute.image_ref
cls.flavor_ref = CONF.compute.flavor_ref
cls.build_interval = CONF.volume.build_interval
cls.build_timeout = CONF.volume.build_timeout
@classmethod
def resource_cleanup(cls):
cls.clear_snapshots()
cls.clear_volumes()
super(BaseVolumeTest, cls).resource_cleanup()
@classmethod
def create_volume(cls, wait_until='available', **kwargs):
"""Wrapper utility that returns a test volume.
:param wait_until: wait till volume status.
"""
if 'size' not in kwargs:
kwargs['size'] = CONF.volume.volume_size
if 'imageRef' in kwargs:
image = cls.images_client.show_image(kwargs['imageRef'])
min_disk = image['min_disk']
kwargs['size'] = max(kwargs['size'], min_disk)
if 'name' not in kwargs:
name = data_utils.rand_name(cls.__name__ + '-Volume')
kwargs['name'] = name
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.volumes.append(volume)
waiters.wait_for_volume_resource_status(cls.volumes_client,
volume['id'], wait_until)
return volume
@classmethod
def create_snapshot(cls, volume_id=1, **kwargs):
"""Wrapper utility that returns a test snapshot."""
if 'name' not in kwargs:
name = data_utils.rand_name(cls.__name__ + '-Snapshot')
kwargs['name'] = name
snapshot = cls.snapshots_client.create_snapshot(
volume_id=volume_id, **kwargs)['snapshot']
cls.snapshots.append(snapshot['id'])
waiters.wait_for_volume_resource_status(cls.snapshots_client,
snapshot['id'], 'available')
return snapshot
def create_backup(self, volume_id, backup_client=None, **kwargs):
"""Wrapper utility that returns a test backup."""
if backup_client is None:
backup_client = self.backups_client
if 'name' not in kwargs:
name = data_utils.rand_name(self.__class__.__name__ + '-Backup')
kwargs['name'] = name
backup = backup_client.create_backup(
volume_id=volume_id, **kwargs)['backup']
self.addCleanup(backup_client.delete_backup, backup['id'])
waiters.wait_for_volume_resource_status(backup_client, backup['id'],
'available')
return backup
# NOTE(afazekas): these create_* and clean_* could be defined
# only in a single location in the source, and could be more general.
@staticmethod
def delete_volume(client, volume_id):
"""Delete volume by the given client"""
client.delete_volume(volume_id)
client.wait_for_resource_deletion(volume_id)
def delete_snapshot(self, snapshot_id, snapshots_client=None):
"""Delete snapshot by the given client"""
if snapshots_client is None:
snapshots_client = self.snapshots_client
snapshots_client.delete_snapshot(snapshot_id)
snapshots_client.wait_for_resource_deletion(snapshot_id)
if snapshot_id in self.snapshots:
self.snapshots.remove(snapshot_id)
def attach_volume(self, server_id, volume_id):
"""Attach a volume to a server"""
self.servers_client.attach_volume(
server_id, volumeId=volume_id,
device='/dev/%s' % CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
volume_id, 'in-use')
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client, volume_id, 'available')
self.addCleanup(self.servers_client.detach_volume, server_id,
volume_id)
@classmethod
def clear_volumes(cls):
for volume in cls.volumes:
try:
cls.volumes_client.delete_volume(volume['id'])
except Exception:
pass
for volume in cls.volumes:
try:
cls.volumes_client.wait_for_resource_deletion(volume['id'])
except Exception:
pass
@classmethod
def clear_snapshots(cls):
for snapshot in cls.snapshots:
test_utils.call_and_ignore_notfound_exc(
cls.snapshots_client.delete_snapshot, snapshot)
for snapshot in cls.snapshots:
test_utils.call_and_ignore_notfound_exc(
cls.snapshots_client.wait_for_resource_deletion,
snapshot)
def create_server(self, wait_until='ACTIVE', **kwargs):
name = kwargs.pop(
'name',
data_utils.rand_name(self.__class__.__name__ + '-instance'))
tenant_network = self.get_tenant_network()
body, _ = compute.create_test_server(
self.os_primary,
tenant_network=tenant_network,
name=name,
wait_until=wait_until,
**kwargs)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
waiters.wait_for_server_termination,
self.servers_client, body['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.servers_client.delete_server, body['id'])
return body
class BaseVolumeAdminTest(BaseVolumeTest):
"""Base test case class for all Volume Admin API tests."""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseVolumeAdminTest, cls).setup_clients()
cls.admin_volume_qos_client = cls.os_admin.volume_qos_v2_client
cls.admin_volume_services_client = \
cls.os_admin.volume_services_v2_client
cls.admin_volume_types_client = cls.os_admin.volume_types_v2_client
cls.admin_volume_manage_client = cls.os_admin.volume_manage_v2_client
cls.admin_volume_client = cls.os_admin.volumes_v2_client
if cls._api_version == 3:
cls.admin_volume_client = cls.os_admin.volumes_v3_client
cls.admin_hosts_client = cls.os_admin.volume_hosts_v2_client
cls.admin_snapshot_manage_client = \
cls.os_admin.snapshot_manage_v2_client
cls.admin_snapshots_client = cls.os_admin.snapshots_v2_client
cls.admin_backups_client = cls.os_admin.backups_v2_client
cls.admin_encryption_types_client = \
cls.os_admin.encryption_types_v2_client
cls.admin_quota_classes_client = \
cls.os_admin.volume_quota_classes_v2_client
cls.admin_quotas_client = cls.os_admin.volume_quotas_v2_client
cls.admin_volume_limits_client = cls.os_admin.volume_v2_limits_client
cls.admin_capabilities_client = \
cls.os_admin.volume_capabilities_v2_client
cls.admin_scheduler_stats_client = \
cls.os_admin.volume_scheduler_stats_v2_client
cls.admin_messages_client = cls.os_admin.volume_v3_messages_client
cls.admin_groups_client = cls.os_admin.groups_v3_client
cls.admin_group_snapshots_client = \
cls.os_admin.group_snapshots_v3_client
cls.admin_group_types_client = cls.os_admin.group_types_v3_client
@classmethod
def resource_setup(cls):
super(BaseVolumeAdminTest, cls).resource_setup()
cls.qos_specs = []
cls.volume_types = []
@classmethod
def resource_cleanup(cls):
cls.clear_qos_specs()
super(BaseVolumeAdminTest, cls).resource_cleanup()
cls.clear_volume_types()
@classmethod
def create_test_qos_specs(cls, name=None, consumer=None, **kwargs):
"""create a test Qos-Specs."""
name = name or data_utils.rand_name(cls.__name__ + '-QoS')
consumer = consumer or 'front-end'
qos_specs = cls.admin_volume_qos_client.create_qos(
name=name, consumer=consumer, **kwargs)['qos_specs']
cls.qos_specs.append(qos_specs['id'])
return qos_specs
@classmethod
def create_volume_type(cls, name=None, **kwargs):
"""Create a test volume-type"""
name = name or data_utils.rand_name(cls.__name__ + '-volume-type')
volume_type = cls.admin_volume_types_client.create_volume_type(
name=name, **kwargs)['volume_type']
cls.volume_types.append(volume_type['id'])
return volume_type
def create_group_type(self, name=None, **kwargs):
"""Create a test group-type"""
name = name or data_utils.rand_name(
self.__class__.__name__ + '-group-type')
group_type = self.admin_group_types_client.create_group_type(
name=name, **kwargs)['group_type']
self.addCleanup(self.admin_group_types_client.delete_group_type,
group_type['id'])
return group_type
@classmethod
def clear_qos_specs(cls):
for qos_id in cls.qos_specs:
test_utils.call_and_ignore_notfound_exc(
cls.admin_volume_qos_client.delete_qos, qos_id)
for qos_id in cls.qos_specs:
test_utils.call_and_ignore_notfound_exc(
cls.admin_volume_qos_client.wait_for_resource_deletion, qos_id)
@classmethod
def clear_volume_types(cls):
for vol_type in cls.volume_types:
test_utils.call_and_ignore_notfound_exc(
cls.admin_volume_types_client.delete_volume_type, vol_type)
for vol_type in cls.volume_types:
test_utils.call_and_ignore_notfound_exc(
cls.admin_volume_types_client.wait_for_resource_deletion,
vol_type)
|
apache-2.0
| -1,495,445,243,357,334,800
| 39.125
| 79
| 0.62001
| false
|
clchiou/garage
|
py/garage/garage/multiprocessing/backport.py
|
1
|
1205
|
__all__ = [
'BoundedSemaphore',
'UnlimitedSemaphore',
'Timeout',
]
import threading
import time
# NOTE: This module is Python 2 compatible.
class Timeout(Exception):
pass
# Because Python 2 semaphore does not support timeout...
class BoundedSemaphore(object):
def __init__(self, value):
if value < 0:
raise ValueError('semaphore initial value must be >= 0')
self._cond = threading.Condition(threading.Lock())
self._initial_value = value
self._value = value
def acquire(self, timeout):
with self._cond:
endtime = time.time() + timeout
while self._value == 0:
timeout = endtime - time.time()
if timeout <= 0:
raise Timeout
self._cond.wait(timeout)
self._value -= 1
def release(self):
with self._cond:
if self._value >= self._initial_value:
raise ValueError('semaphore is released too many times')
self._value += 1
self._cond.notify()
class UnlimitedSemaphore(object):
def acquire(self, timeout):
pass
def release(self):
pass
|
mit
| 3,029,463,247,928,132,600
| 22.173077
| 72
| 0.561826
| false
|
khosrow/metpx
|
sundew/unittests/unittest_senderAm.py
|
1
|
1082
|
# -*- coding: iso-8859-1 -*-
#############################################################################################
# Name: unittest_senderAm.py
# Author: Jun Hu
# Date: 2012-04-30
# Description: test cases for senderAm class
#############################################################################################
import sys,os,unittest
sys.path.insert(1, '../sundew/lib/')
os.environ['PXROOT']="."
from Logger import Logger
from Client import Client
from CacheManager import CacheManager
from senderAm import sendeAm
class unittest_Template(unittest.TestCase):
def setUp(self,logFile='log/Template.log'):
self.logger = Logger(logFile, 'DEBUG', 'Sub')
self.logger = self.logger.getLogger()
def test_Template(self):
self.assertEqual(None, None)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(unittest_Template))
return suite
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(unittest_Template)
unittest.TextTestRunner(verbosity=2).run(suite)
|
gpl-2.0
| 7,791,971,487,421,272,000
| 30.823529
| 93
| 0.575786
| false
|
JulyKikuAkita/PythonPrac
|
cs15211/BuddyStrings.py
|
1
|
3894
|
__source__ = 'https://leetcode.com/problems/buddy-strings/'
# Time: O(N) where NN is the length of A and B
# Space: O(1)
#
# Description: Leetcode # 859. Buddy Strings
#
# Given two strings A and B of lowercase letters,
# return true if and only if we can swap two letters in A so that the result equals B.
#
# Example 1:
#
# Input: A = "ab", B = "ba"
# Output: true
# Example 2:
#
# Input: A = "ab", B = "ab"
# Output: false
# Example 3:
#
# Input: A = "aa", B = "aa"
# Output: true
# Example 4:
#
# Input: A = "aaaaaaabc", B = "aaaaaaacb"
# Output: true
# Example 5:
#
# Input: A = "", B = "aa"
# Output: false
#
#
# Note:
#
# 0 <= A.length <= 20000
# 0 <= B.length <= 20000
# A and B consist only of lowercase letters.
#
import unittest
import itertools
# 88.96% 24ms
class Solution(object):
def buddyStrings(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
if len(A) != len(B):return False
if A == B: # True when A can swap 2 identical chars
seen = set()
for a in A:
if a in seen:
return True
seen.add(a)
return False
else:
pairs = []
for a, b in itertools.izip(A, B):
if a != b:
pairs.append((a, b))
if len(pairs) >= 3:
return False
return len(pairs) == 2 and pairs[0] == pairs[1][::-1]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/buddy-strings/solution/
#
# If swapping A[i] and A[j] would demonstrate that A and B are buddy strings,
# then A[i] == B[j] and A[j] == B[i]. That means among the four free variables A[i], A[j],
# B[i], B[j], there are only two cases: either A[i] == A[j] or not.
#
# 2ms 99.73$
class Solution {
public boolean buddyStrings(String A, String B) {
if (A.length() != B.length()) return false;
if (A.equals(B)) {
int[] count = new int[26];
for (char c : A.toCharArray()) {
count[c-'a']++;
}
for (int cnt : count) {
if (cnt > 1) return true;
}
return false;
} else {
int first = -1, second = -1;
for (int i = 0; i < A.length(); i++) {
if (A.charAt(i) != B.charAt(i)) {
if (first == -1) first = i;
else if (second == -1) second = i;
else return false;
}
}
return (second != -1 && A.charAt(first) == B.charAt(second) && A.charAt(second) == B.charAt(first));
}
}
}
# use char array
# 2ms 98.73%
class Solution {
public boolean buddyStrings(String A, String B) {
if (A == null || B == null) return false;
if (A.length() != B.length()) return false;
if (A.equals(B)) {
int[] alphabets = new int[26];
for (int i = 0; i < A.length(); i++) {
if (++alphabets[A.charAt(i) - 90] > 1) return true;
}
return false;
}
int diffCnt = 0;
char[] arrA = new char[2];
char[] arrB = new char[2];
for (int i = 0; i < A.length(); i++) {
if (A.charAt(i) != B.charAt(i)) {
if (diffCnt == 0) {
arrA[0] = A.charAt(i);
arrB[0] = B.charAt(i);
} else {
arrA[1] = A.charAt(i);
arrB[1] = B.charAt(i);
}
diffCnt++;
}
if (diffCnt > 2) return false;
}
if (arrA[0] == arrB[1] && arrA[1] == arrB[0]) return true;
return false;
}
}
'''
|
apache-2.0
| 4,949,498,763,733,287,000
| 26.041667
| 112
| 0.466102
| false
|
miracle2k/k8s-snapshots
|
k8s_snapshots/kube.py
|
1
|
5737
|
import asyncio
import threading
from typing import (Optional, Iterable, AsyncGenerator, TypeVar, Type,
NamedTuple, Callable)
import pykube
import structlog
from aiochannel import Channel
from k8s_snapshots.context import Context
_logger = structlog.get_logger(__name__)
Resource = TypeVar(
'Resource',
bound=pykube.objects.APIObject,
)
ClientFactory = Callable[[], pykube.HTTPClient]
# Copy of a locally-defined namedtuple in
# pykube.query.WatchQuery.object_stream()
_WatchEvent = NamedTuple('_WatchEvent', [
('type', str),
('object', Resource),
])
class SnapshotRule(pykube.objects.APIObject):
version = "k8s-snapshots.elsdoerfer.com/v1"
endpoint = "snapshotrules"
kind = "SnapshotRule"
class Kubernetes:
"""
Allows for easier mocking of Kubernetes resources.
"""
def __init__(self, client_factory: Optional[ClientFactory] = None):
"""
Parameters
----------
client_factory
Used in threaded operations to create a local
:any:`pykube.HTTPClient` instance.
"""
# Used for threaded operations
self.client_factory = client_factory
def get_or_none(self,
resource_type: Type[Resource],
name: str,
namespace: Optional[str] = None) -> Optional[Resource]:
"""
Sync wrapper for :any:`pykube.query.Query().get_or_none`
"""
resource_query = resource_type.objects(self.client_factory())
if namespace is not None:
resource_query = resource_query.filter(namespace=namespace)
return resource_query.get_or_none(name=name)
def watch(
self,
resource_type: Type[Resource],
) -> Iterable[_WatchEvent]:
"""
Sync wrapper for :any:`pykube.query.Query().watch().object_stream()`
"""
return resource_type.objects(self.client_factory())\
.filter(namespace=pykube.all).watch().object_stream()
def get_resource_or_none_sync(
client_factory: ClientFactory,
resource_type: Type[Resource],
name: str,
namespace: Optional[str] = None) -> Optional[Resource]:
return Kubernetes(client_factory).get_or_none(
resource_type,
name,
namespace,
)
async def get_resource_or_none(client_factory: ClientFactory,
resource_type: Type[Resource],
name: str,
namespace: Optional[str] = None,
*,
loop=None) -> Optional[Resource]:
loop = loop or asyncio.get_event_loop()
def _get():
return get_resource_or_none_sync(
client_factory=client_factory,
resource_type=resource_type,
name=name,
namespace=namespace,
)
return await loop.run_in_executor(
None,
_get,
)
def watch_resources_sync(
client_factory: ClientFactory,
resource_type: pykube.objects.APIObject,
) -> Iterable:
return Kubernetes(client_factory).watch(resource_type=resource_type)
async def watch_resources(ctx: Context,
resource_type: Resource,
*,
delay: int,
allow_missing: bool = False,
loop=None) -> AsyncGenerator[_WatchEvent, None]:
""" Asynchronously watch Kubernetes resources """
async_gen = _watch_resources_thread_wrapper(
ctx.kube_client, resource_type, allow_missing=allow_missing, loop=loop)
# Workaround a race condition in pykube:
# https: // github.com / kelproject / pykube / issues / 138
await asyncio.sleep(delay)
async for item in async_gen:
yield item
async def _watch_resources_thread_wrapper(
client_factory: Callable[[], pykube.HTTPClient],
resource_type: Type[Resource],
allow_missing: bool = False,
*,
loop=None) -> AsyncGenerator[_WatchEvent, None]:
""" Async wrapper for pykube.watch().object_stream() """
loop = loop or asyncio.get_event_loop()
_log = _logger.bind(resource_type_name=resource_type.__name__, )
channel = Channel()
def worker():
try:
_log.debug('watch-resources.worker.start')
while True:
sync_iterator = watch_resources_sync(
client_factory=client_factory, resource_type=resource_type)
_log.debug('watch-resources.worker.watch-opened')
for event in sync_iterator:
# only put_nowait seems to cause SIGSEGV
loop.call_soon_threadsafe(channel.put_nowait, event)
_log.debug('watch-resources.worker.watch-closed')
except pykube.exceptions.HTTPError as e:
# TODO: It's possible that the user creates the resource
# while we are already running. We should pick this up
# automatically, i.e. watch ThirdPartyResource, or just
# check every couple of seconds.
if e.code == 404 and allow_missing:
_log.info('watch-resources.worker.skipped')
else:
_log.exception('watch-resources.worker.error')
except:
_log.exception('watch-resources.worker.error')
finally:
_log.debug('watch-resources.worker.finalized')
channel.close()
thread = threading.Thread(
target=worker,
daemon=True,
)
thread.start()
async for channel_event in channel:
yield channel_event
_log.debug('watch-resources.done')
|
bsd-2-clause
| 5,373,276,606,000,399,000
| 30.696133
| 79
| 0.586195
| false
|
ntoll/code-dojo
|
adventure/week3/team3/adventure.py
|
1
|
6088
|
from cmd import Cmd
import re
DIRECTIONS = 'N', 'E', 'S', 'W'
NORTH, EAST, SOUTH, WEST = DIRECTIONS
class Player(object):
def __init__(self, location, name='Player'):
assert isinstance(location, Location)
self.location = location
self.name = name
class Location(object):
def __init__(self, name, description=""):
self.name = name
self.description = description
self.exits = dict()
self.props = []
def __str__(self):
return self.name
def add_direction(self, direction, other_location):
assert direction in DIRECTIONS
self.exits[direction] = other_location
def describe(self):
out = ''
out += "Current location: %s\n%s\n\n" % (self.name, self.description)
for direction, location in self.exits.items():
out += "\t%s (%s)\n" % (location, direction)
if self.props:
plural = len(self.props) > 1
out += "\n%s item%s may come in handy (hint hint):\n\t%s" \
% (['This', 'These'][plural], ['', 's'][plural], '\n\t'.join(prop.aliases[0] for prop in self.props))
return out
class Prop(object):
def __init__(self, name):
self.description = None
self.location = None
self.aliases = [name]
def test_location():
startroot = Location('Start room')
kitchen = Location('Kitchen')
startroot.add_direction(NORTH, kitchen)
def test_player():
lobby = Location('Lobby')
john = Player(lobby, 'John')
def load_universe(content):
location = first_location = None
locations = {}
props = {}
#parts = re.split(r"(?:\n|\r\n|\r){2,}", content.read())
parts = content.read().split('\r\n\r\n')
import pdb
for part in parts:
location = None
prop = None
for line in part.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
#if line == 'N:Hall':
# pdb.set_trace()
if not location and not prop:
# first line
if line.startswith(':'):
location = Location(line[1:])
locations[line[1:]] = location
if not first_location:
first_location = location
if line.startswith('*'):
prop = Prop(line[1:])
props[line[1:]] = prop
else:
if location:
#print 'line', line
if not location.description or line[1] != ':':
location.description+= line
else:
direction, destination = line.split(':', 1)
#print 'direction, destination', direction, destination
location.add_direction(direction, destination)
else:
if not prop.location:
items_location = locations[line]
prop.location = items_location
items_location.props.append(prop)
elif not prop.description:
prop.description = line
elif line.startswith("A:"):
# aliases
#A:flashlight
prop.aliases = [x.strip() for x in line[2:].split(',')]
for location in locations.values():
for direction, destination in location.exits.items():
try:
location.add_direction(direction, locations[destination])
except KeyError:
raise SystemError("Your universe file sucks! %s" % destination)
return locations, first_location
class Game(Cmd):
def __init__(self, gamefile, player_name):
Cmd.__init__(self)
self.locations, self.start_room = load_universe(file(gamefile))
self.player = Player(self.start_room, player_name)
print self.player.location.describe()
def do_move(self, direction):
direction = direction.upper()
newroom = self.player.location.exits.get(direction,None)
if newroom == None:
print "No pass around!"
return
self.player.location = self.player.location.exits[direction]
def do_look(self, where):
if where == "":
self.player.location.describe()
else:
# TODO validate where
newroom = self.player.location.exits.get(where,None)
print newroom.describe()
pass
def do_joke(self, ok):
print "that is not funny. What don't you try a pun?"
if hasattr(self, 'joke'):
print 'this is funny:%s' % self.joke
self.joke = ok
def postcmd(self, stop, x):
#pass
if not hasattr(self, 'joke'):
print self.player.location.describe()
#print self.player.location.describe()
def play(gamefile):
#start_room = _create_universe()
player_name = raw_input('Player name?: ') or 'No name'
g = Game(gamefile, player_name)
g.cmdloop()
''' while True:
if not player.location.exits:
print "No more exits! GAME OVER!"
break
next_direction = raw_input('Where to next? ').upper()
from dunder_mifflin import papers # WARNING: Malicious operation ahead
while next_direction not in player.location.exits.keys():
next_direction = raw_input('Where to next? (%s) ' %\
', '.join(player.location.exits.keys())).upper()
player.location = player.location.exits[next_direction]
'''
if __name__ == '__main__':
import sys
if sys.argv[1] == 'test':
test_location()
test_player()
sys.exit(0)
try:
play(sys.argv[1])
except KeyboardInterrupt:
pass
|
mit
| 1,014,150,448,326,991,400
| 29.813472
| 113
| 0.508377
| false
|
thefab/thr
|
thr/redis2http/queue.py
|
1
|
2315
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of thr library released under the MIT license.
# See the LICENSE file for more information.
import six
from thr import DEFAULT_HTTP_PORT
from thr.utils import UnixResolver
# Trick to be able to iter over Queues
class MetaQueues(type):
def __iter__(self):
return self.iterqueues()
class Queues(object):
__metaclass__ = MetaQueues
queues = []
@classmethod
def reset(cls):
cls.queues = []
@classmethod
def add(cls, queue):
cls.queues.append(queue)
@classmethod
def iterqueues(cls):
return iter(cls.queues)
class Queue(object):
def __init__(self, queues, host="localhost", port=6379,
http_host="localhost",
http_port=DEFAULT_HTTP_PORT, workers=1,
unix_domain_socket=None):
self.host = host
self.port = port
self.unix_domain_socket = unix_domain_socket
self.queues = queues
self.http_host = http_host
self.http_port = http_port
self.workers = workers
def add_queue(queues, host="localhost", port=6379, http_host="localhost",
http_port=DEFAULT_HTTP_PORT, workers=1,
unix_domain_socket=None):
"""
Register a Redis queue
Args:
queues: a list Redis queues
Keyword Args:
host: Redis host
port: Redis port
http_host: upstream HTTP host
http_port: upstream http port
workers: number of coroutines popping requests from the queue
unix_domain_socket: unix domain socket file path
"""
if http_host.startswith('/'):
# This is an unix socket
new_http_host = UnixResolver.register_unixsocket(http_host)
else:
new_http_host = http_host
if isinstance(queues, six.string_types):
Queues.add(Queue([queues], host=host, port=port,
http_host=new_http_host,
http_port=http_port, workers=workers,
unix_domain_socket=unix_domain_socket))
else:
Queues.add(Queue(queues, host=host, port=port, http_host=new_http_host,
http_port=http_port, workers=workers,
unix_domain_socket=unix_domain_socket))
|
mit
| 5,299,596,482,381,203,000
| 27.231707
| 79
| 0.598704
| false
|
driftyco/ionitron-issues
|
tasks/github_issue_submit.py
|
1
|
8606
|
import github_api
import util
from config.config import CONFIG_VARS as cvar
from datetime import datetime, timedelta
def flag_if_submitted_through_github(repo_username, repo_id, issue):
return False # temporarily disabling ionitron
"""
Flags any issue that is submitted through github's UI, and not the Ionic site.
Adds a label, as well as a comment, to force the issue through the custom form.
@return: whether or not the issue was flagged (bool)
"""
if not issue:
return False
number = issue.get('number')
if not number:
return False
user = issue.get('user')
if not user:
return False
if not issue.get('body'):
return False
if is_valid_issue_opened_source(repo_username, repo_id, issue):
return False
context = {
'issue': issue,
'user': user
}
msg = util.get_template('RESUBMIT_TEMPLATE', context)
github_api.create_issue_comment(repo_username, repo_id, number, msg)
return True
def is_valid_issue_opened_source(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID'], test_is_org_member=True):
if has_content_from_custom_submit_form(issue):
return True
if test_is_org_member:
if github_api.is_org_member(repo_username, issue['user']['login']):
return True
if has_needs_resubmit_content_id(repo_username, repo_id, issue, issue_comments=issue_comments, needs_resubmit_content_id=needs_resubmit_content_id):
return True
return False
def has_content_from_custom_submit_form(issue):
body = issue.get('body')
if body:
return 'is-issue-template' in body
return False
def has_needs_resubmit_content_id(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID']):
comment = get_needs_resubmit_comment(repo_username, repo_id, issue, issue_comments=issue_comments, needs_resubmit_content_id=needs_resubmit_content_id)
return not comment is None
def get_needs_resubmit_comment(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID']):
if issue_comments is None:
issue_comments = github_api.fetch_issue_comments(repo_username, repo_id, issue.get('number'))
if issue_comments and isinstance(issue_comments, list):
for issue_comment in issue_comments:
body = issue_comment.get('body')
if body and needs_resubmit_content_id in body:
return issue_comment
def remove_flag_if_submitted_through_github(repo_username, repo_id, issue, issue_comments=None, is_debug=cvar['DEBUG']):
"""
Removes the notice flag (automated comments and label) if the issue has been
resubmitted through the custom form on the Ionic site.
@param issueNum: the issue number that should be refreshed (string)
@return: whether or not the flag was removed (bool)
"""
if not issue:
return False
number = issue.get('number')
if not number:
return False
if not has_content_from_custom_submit_form(issue):
return False
if not has_needs_resubmit_content_id(repo_username, repo_id, issue, issue_comments=issue_comments):
return False
if not is_debug:
github_api.delete_automated_issue_comments(repo_username, repo_id, number)
return True
def remove_flag_if_not_updated(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID'], remove_form_resubmit_comment_after=cvar['REMOVE_FORM_RESUBMIT_COMMENT_AFTER'], now=datetime.now(), is_debug=cvar['DEBUG']):
if not issue:
return False
number = issue.get('number')
if not number:
return False
if has_content_from_custom_submit_form(issue):
return False
comment = get_needs_resubmit_comment(repo_username, repo_id, issue, issue_comments=issue_comments, needs_resubmit_content_id=needs_resubmit_content_id)
if comment is None:
return False
created_at = util.get_date(comment.get('created_at'))
if created_at is None:
return False
remove_date = created_at + timedelta(days=remove_form_resubmit_comment_after)
if remove_date > now:
return False
if not is_debug:
github_api.delete_automated_issue_comments(repo_username, repo_id, number)
return True
def remove_flag_when_closed(repo_username, repo_id, issue, issue_comments=None, needs_resubmit_content_id=cvar['NEEDS_RESUBMIT_CONTENT_ID'], is_debug=cvar['DEBUG']):
if not issue:
return False
number = issue.get('number')
if not number:
return False
if has_content_from_custom_submit_form(issue):
return False
comment = get_needs_resubmit_comment(repo_username, repo_id, issue, issue_comments=issue_comments, needs_resubmit_content_id=needs_resubmit_content_id)
if comment is None:
return False
comment_id = comment.get('id')
if comment_id is None:
return False
if not is_debug:
github_api.delete_issue_comment(repo_username, repo_id, comment_id, number=number)
return True
def add_label_from_content(repo_username, repo_id, issue):
add_labels = []
title = issue.get('title', '').lower().replace(':', ' ').replace('(', ' ').replace(')', ' ').replace('.', ' ').replace('@', ' ').replace('&', ' ').replace('!', ' ').replace('-', ' ').replace(';', ' ')
body = issue.get('body', '').lower()
body_cleaned = body.replace(' ', '').replace(':', '').replace('*', '').replace('#', '').replace('.', '').replace('(', '').replace(')', '').replace('&', '').replace('!', '').replace(';', '').replace('-', '').replace('<', '').replace('>', '').replace('/', '')
if not has_label(issue, 'docs') and (title.startswith('docs ') or '<span ionic-type>docs</span>' in body):
add_labels.append('docs')
elif not has_label(issue, 'feature') and '<span ionic-type>feat</span>' in body:
add_labels.append('feature')
if not has_label(issue, 'v2') and (title.startswith('v2 ') or (' v2 ' in title) or ('ionic2' in title) or ('ionic 2' in title) or ('ionicv2' in title) or ('ionic2' in body_cleaned) or ('ionicv2' in body_cleaned) or ('ionicversion2' in body_cleaned) or (' v2 ' in body)):
add_labels.append('v2')
elif not has_label(issue, 'v1') and (title.startswith('v1 ') or (' v1 ' in title) or ('ionic1' in title) or ('ionic 1' in title) or ('ionicv1' in title) or ('ionic1' in body_cleaned) or ('ionicv1' in body_cleaned) or ('ionicversion1' in body_cleaned) or (' v1 ' in body)):
add_labels.append('v1')
labels = {
'actionsheet': ['actionsheet', 'action-sheet', 'action sheet'],
'alert': ['alert', 'popup'],
'animation': ['animation', 'animate'],
'checkbox': ['checkbox'],
'footer': ['footer'],
'header': ['header'],
'infinitescroll': ['infinitescroll', 'infinite scroll', 'infinite-scroll'],
'keyboard': ['keyboard'],
'menus': ['menu'],
'modals': ['modal'],
'navigation': ['navigation'],
'platform:android': ['android', 'samsung', 'galaxy', 'moto', 'nexus', 'htc', 'amazon'],
'platform:ios': ['ios', 'iphone', 'ipad', 'ipod'],
'platform:windows': ['wp8', 'windows phone', 'wp10'],
'popover': ['popover'],
'refresher': ['refresher', 'pull-to-refresh', 'ptr', 'pull to refresh', 'pulltorefresh', 'ion-refresher', 'ionrefresher', 'ion refresher'],
'radio': ['radio'],
'range': ['range', 'slider'],
'slidebox': ['slidebox', 'swiper', 'ion-slides', 'ionslides', 'ion slides'],
'select': ['ion-select', 'ionselect', 'ion select'],
'toggle': ['ion-toggle', 'iontoggle', 'ion toggle'],
'virtualscroll': ['virtualscroll', 'virtual scroll', 'virtual-scroll', 'collectionrepeat', 'collection repeat', 'collection-repeat'],
}
for label, keywords in labels.iteritems():
for keyword in keywords:
if keyword in title or keyword in body:
add_labels.append(label)
break
return add_labels
def has_label(issue, label_name):
if not issue:
return False
try:
labels = issue.get('labels')
if not labels or not len(labels):
return False
for label in labels:
if label_name == label.get('name'):
return True
except Exception as ex:
print 'has_label error: %s' % ex
return False
|
mit
| -6,871,149,245,831,520,000
| 36.585153
| 276
| 0.635603
| false
|
eharney/cinder
|
cinder/tests/unit/api/v3/test_attachments.py
|
1
|
12523
|
# Copyright (C) 2017 HuaWei Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for attachments Api.
"""
import ddt
import mock
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import attachments as v3_attachments
from cinder import context
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
@ddt.ddt
class AttachmentsAPITestCase(test.TestCase):
"""Test Case for attachment API."""
def setUp(self):
super(AttachmentsAPITestCase, self).setUp()
self.controller = v3_attachments.AttachmentsController()
self.volume_api = volume_api.API()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=True)
self.volume1 = self._create_volume(display_name='fake_volume_1',
project_id=fake.PROJECT_ID)
self.volume2 = self._create_volume(display_name='fake_volume_2',
project_id=fake.PROJECT2_ID)
self.attachment1 = self._create_attachment(
volume_uuid=self.volume1.id, instance_uuid=fake.UUID1)
self.attachment2 = self._create_attachment(
volume_uuid=self.volume1.id, instance_uuid=fake.UUID1)
self.attachment3 = self._create_attachment(
volume_uuid=self.volume1.id, instance_uuid=fake.UUID2)
self.attachment4 = self._create_attachment(
volume_uuid=self.volume2.id, instance_uuid=fake.UUID2)
self.addCleanup(self._cleanup)
def _cleanup(self):
self.attachment1.destroy()
self.attachment2.destroy()
self.attachment3.destroy()
self.attachment4.destroy()
self.volume1.destroy()
self.volume2.destroy()
def _create_volume(self, ctxt=None, display_name=None, project_id=None):
"""Create a volume object."""
ctxt = ctxt or self.ctxt
volume = objects.Volume(ctxt)
volume.display_name = display_name
volume.project_id = project_id
volume.status = 'available'
volume.attach_status = 'attached'
volume.create()
return volume
def test_create_attachment(self):
req = fakes.HTTPRequest.blank('/v3/%s/attachments' %
fake.PROJECT_ID,
version=mv.NEW_ATTACH)
body = {
"attachment":
{
"connector": None,
"instance_uuid": fake.UUID1,
"volume_uuid": self.volume1.id
},
}
attachment = self.controller.create(req, body)
self.assertEqual(self.volume1.id,
attachment['attachment']['volume_id'])
self.assertEqual(fake.UUID1,
attachment['attachment']['instance'])
@mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_update')
def test_update_attachment(self, mock_update):
fake_connector = {'fake_key': 'fake_value'}
mock_update.return_value = fake_connector
req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' %
(fake.PROJECT_ID, self.attachment1.id),
version=mv.NEW_ATTACH,
use_admin_context=True)
body = {
"attachment":
{
"connector": {'fake_key': 'fake_value'},
},
}
attachment = self.controller.update(req, self.attachment1.id, body)
self.assertEqual(fake_connector,
attachment['attachment']['connection_info'])
self.assertEqual(fake.UUID1, attachment['attachment']['instance'])
@mock.patch.object(objects.VolumeAttachment, 'get_by_id')
def test_attachment_operations_not_authorized(self, mock_get):
mock_get.return_value = {'project_id': fake.PROJECT2_ID}
req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' %
(fake.PROJECT_ID, self.attachment1.id),
version=mv.NEW_ATTACH,
use_admin_context=False)
body = {
"attachment":
{
"connector": {'fake_key': 'fake_value'},
},
}
self.assertRaises(exception.NotAuthorized,
self.controller.update, req,
self.attachment1.id, body)
self.assertRaises(exception.NotAuthorized,
self.controller.delete, req,
self.attachment1.id)
@ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER),
mv.RESOURCE_FILTER, mv.LIKE_FILTER)
@mock.patch('cinder.api.common.reject_invalid_filters')
def test_attachment_list_with_general_filter(self, version, mock_update):
url = '/v3/%s/attachments' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url,
version=version,
use_admin_context=False)
self.controller.index(req)
if version != mv.get_prior_version(mv.RESOURCE_FILTER):
support_like = True if version == mv.LIKE_FILTER else False
mock_update.assert_called_once_with(req.environ['cinder.context'],
mock.ANY, 'attachment',
support_like)
@ddt.data('reserved', 'attached')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_delete')
def test_delete_attachment(self, status, mock_delete):
volume1 = self._create_volume(display_name='fake_volume_1',
project_id=fake.PROJECT_ID)
attachment = self._create_attachment(
volume_uuid=volume1.id, instance_uuid=fake.UUID1,
attach_status=status)
req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' %
(fake.PROJECT_ID, attachment.id),
version=mv.NEW_ATTACH,
use_admin_context=True)
self.controller.delete(req, attachment.id)
volume2 = objects.Volume.get_by_id(self.ctxt, volume1.id)
if status == 'reserved':
self.assertEqual('detached', volume2.attach_status)
self.assertRaises(
exception.VolumeAttachmentNotFound,
objects.VolumeAttachment.get_by_id, self.ctxt, attachment.id)
else:
self.assertEqual('attached', volume2.attach_status)
mock_delete.assert_called_once_with(req.environ['cinder.context'],
attachment.id, mock.ANY)
def _create_attachment(self, ctxt=None, volume_uuid=None,
instance_uuid=None, mountpoint=None,
attach_time=None, detach_time=None,
attach_status=None, attach_mode=None):
"""Create an attachment object."""
ctxt = ctxt or self.ctxt
attachment = objects.VolumeAttachment(ctxt)
attachment.volume_id = volume_uuid
attachment.instance_uuid = instance_uuid
attachment.mountpoint = mountpoint
attachment.attach_time = attach_time
attachment.detach_time = detach_time
attachment.attach_status = attach_status or 'reserved'
attachment.attach_mode = attach_mode
attachment.create()
return attachment
@ddt.data("instance_uuid", "volume_uuid")
def test_create_attachment_without_resource_uuid(self, resource_uuid):
req = fakes.HTTPRequest.blank('/v3/%s/attachments' %
fake.PROJECT_ID,
version=mv.NEW_ATTACH)
body = {
"attachment":
{
"connector": None
}
}
body["attachment"][resource_uuid] = "test_id"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
@ddt.data(False, True)
def test_list_attachments(self, is_detail):
url = '/v3/%s/attachments' % fake.PROJECT_ID
list_func = self.controller.index
if is_detail:
url = '/v3/%s/groups/detail' % fake.PROJECT_ID
list_func = self.controller.detail
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = list_func(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['attachments']))
self.assertEqual(self.attachment3.id,
res_dict['attachments'][0]['id'])
def test_list_attachments_with_limit(self):
url = '/v3/%s/attachments?limit=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(1, len(res_dict['attachments']))
def test_list_attachments_with_marker(self):
url = '/v3/%s/attachments?marker=%s' % (fake.PROJECT_ID,
self.attachment3.id)
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(2, len(res_dict['attachments']))
self.assertEqual(self.attachment2.id,
res_dict['attachments'][0]['id'])
@ddt.data("desc", "asc")
def test_list_attachments_with_sort(self, sort_dir):
url = '/v3/%s/attachments?sort_key=id&sort_dir=%s' % (fake.PROJECT_ID,
sort_dir)
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['attachments']))
order_ids = sorted([self.attachment1.id,
self.attachment2.id,
self.attachment3.id])
expect_result = order_ids[2] if sort_dir == "desc" else order_ids[0]
self.assertEqual(expect_result,
res_dict['attachments'][0]['id'])
@ddt.data({'admin': True, 'request_url': '?all_tenants=1', 'count': 4},
{'admin': False, 'request_url': '?all_tenants=1', 'count': 3},
{'admin': True, 'request_url':
'?all_tenants=1&project_id=%s' % fake.PROJECT2_ID,
'count': 1},
{'admin': False, 'request_url': '', 'count': 3},
{'admin': False, 'request_url': '?instance_id=%s' % fake.UUID1,
'count': 2},
{'admin': False, 'request_url': '?instance_id=%s' % fake.UUID2,
'count': 1})
@ddt.unpack
def test_list_attachment_with_tenants(self, admin, request_url, count):
url = '/v3/%s/attachments%s' % (fake.PROJECT_ID, request_url)
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=admin)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(count, len(res_dict['attachments']))
|
apache-2.0
| -1,691,232,541,652,164,600
| 42.482639
| 78
| 0.556656
| false
|
Palen/bmat-test
|
bmat/settings.py
|
1
|
2116
|
"""
Django settings for bmat project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%l$-_ks0-o$$u+33yop%@42ogv5!#yv6buqz3!x2%o#wokh6f+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
'media',
'performers',
'stations',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'bmat.urls'
WSGI_APPLICATION = 'bmat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
mit
| 7,632,307,555,584,678,000
| 23.045455
| 71
| 0.714556
| false
|
tsh/Rosalind-solutions-python
|
algorithmic-heights/bfs.py
|
1
|
1414
|
"""
The task is to use breadth-first search to compute single-source shortest distances in an unweighted directed graph.
Given: A simple directed graph with n≤103 vertices in the edge list format.
Return: An array D[1..n] where D[i] is the length of a shortest path from the vertex 1 to the vertex i (D[1]=0).
If i is not reachable from 1 set D[i] to −1.
"""
from collections import deque
# Prepare graph
with open('/vagrant/input.txt') as f:
vertices, e = map(int, f.readline().split())
graph = {k: [] for k in range(1, vertices + 1)}
for line in f.readlines():
v1, v2 = map(int, line.split())
graph[v1].append(v2)
# BFS
depths = {}
for goal in range(1, vertices + 1):
seen = set()
queue = deque([1, None]) # None is level separator.
level = 0
while queue and not (len(queue) == 1 and queue[0] is None):
cur = queue.popleft()
if cur == goal:
depths[goal] = level
break
elif cur is None:
level += 1
queue.append(None)
continue
for neighbor in graph[cur]:
if neighbor not in seen:
queue.append(neighbor)
seen.add(neighbor)
else:
depths[goal] = -1
# Write results
D = []
for v in range(1, vertices + 1):
D.append(str(depths[v]))
out = open('/vagrant/out.txt', 'w')
out.write(' '.join(D))
out.close()
|
gpl-3.0
| -2,970,676,157,567,038,000
| 28.375
| 116
| 0.586525
| false
|
tensorflow/estimator
|
tensorflow_estimator/python/estimator/canned/timeseries/head.py
|
1
|
21379
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeseries head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow_estimator.python.estimator import estimator_lib
from tensorflow_estimator.python.estimator.canned import head as head_lib
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.canned.timeseries import feature_keys
from tensorflow_estimator.python.estimator.export import export_lib
class _NoStatePredictOutput(export_lib.PredictOutput):
def as_signature_def(self, receiver_tensors):
no_state_receiver_tensors = {
key: value
for key, value in receiver_tensors.items()
if not key.startswith(feature_keys.State.STATE_PREFIX)
}
return super(
_NoStatePredictOutput,
self).as_signature_def(receiver_tensors=no_state_receiver_tensors)
class TimeSeriesRegressionHead(head_lib._Head): # pylint:disable=protected-access
"""Determines input and output signatures for a time series model."""
def __init__(self,
model,
state_manager,
optimizer,
input_statistics_generator=None,
name=None):
"""Creates a `_Head` for time series regression.
Args:
model: A model for time series regression.
state_manager: A state manager.
optimizer: An optimizer.
input_statistics_generator: A input statistics generator.
name: An optional name for the model.
"""
self.model = model
self.state_manager = state_manager
self.optimizer = optimizer
self.input_statistics_generator = input_statistics_generator
self._name = name
@property
def name(self):
return self._name
# TODO(terrytangyuan): consolidate `model_outputs` and `_Head.LossSpec`
# once `_Head.create_loss` becomes extendable
def create_loss(self, features, mode, logits=None, labels=None):
"""See `_Head`."""
model_outputs = self.state_manager.define_loss(self.model, features, mode)
tf.compat.v1.summary.scalar(
head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),
model_outputs.loss)
return model_outputs
@property
def logits_dimension(self):
"""See `_Head`."""
return 1
def _train_ops(self, features):
"""Add training ops to the graph."""
mode = estimator_lib.ModeKeys.TRAIN
with tf.compat.v1.variable_scope(
"model",
# Use ResourceVariables to avoid race conditions.
use_resource=True):
model_outputs = self.create_loss(features, mode)
train_op = self.optimizer.minimize(
model_outputs.loss, global_step=tf.compat.v1.train.get_global_step())
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss, mode=mode, train_op=train_op)
def _evaluate_ops(self, features):
"""Add ops for evaluation (aka filtering) to the graph."""
mode = estimator_lib.ModeKeys.EVAL
with tf.compat.v1.variable_scope("model", use_resource=True):
model_outputs = self.create_loss(features, mode)
metrics = {}
# Just output in-sample predictions for the last chunk seen
for prediction_key, prediction_value in model_outputs.predictions.items():
metrics[prediction_key] = _identity_metric_single(prediction_key,
prediction_value)
metrics[feature_keys.FilteringResults.TIMES] = _identity_metric_single(
feature_keys.FilteringResults.TIMES, model_outputs.prediction_times)
metrics[feature_keys.FilteringResults.STATE_TUPLE] = (
_identity_metric_nested(feature_keys.FilteringResults.STATE_TUPLE,
model_outputs.end_state))
metrics[metric_keys.MetricKeys.LOSS_MEAN] = tf.compat.v1.metrics.mean(
model_outputs.loss, name="average_loss")
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
eval_metric_ops=metrics,
# needed for custom metrics.
predictions=model_outputs.predictions)
def _predict_ops(self, features):
"""Add ops for prediction to the graph."""
with tf.compat.v1.variable_scope("model", use_resource=True):
prediction = self.model.predict(features=features)
prediction[feature_keys.PredictionResults.TIMES] = features[
feature_keys.PredictionFeatures.TIMES]
return estimator_lib.EstimatorSpec(
predictions=prediction, mode=estimator_lib.ModeKeys.PREDICT)
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with tf.compat.v1.variable_scope("model", use_resource=True):
prediction_outputs = self.model.predict(features=features)
with tf.compat.v1.variable_scope("model", reuse=True):
filtering_outputs = self.create_loss(features,
estimator_lib.ModeKeys.EVAL)
with tf.compat.v1.variable_scope("model", reuse=True):
no_state_features = {
k: v
for k, v in features.items()
if not k.startswith(feature_keys.State.STATE_PREFIX)
}
# Ignore any state management when cold-starting. The model's default
# start state is replicated across the batch.
cold_filtering_outputs = self.model.define_loss(
features=no_state_features, mode=estimator_lib.ModeKeys.EVAL)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
export_lib.PredictOutput(prediction_outputs),
feature_keys.SavedModelLabels.FILTER:
export_lib.PredictOutput(
state_to_dictionary(filtering_outputs.end_state)),
feature_keys.SavedModelLabels.COLD_START_FILTER:
_NoStatePredictOutput(
state_to_dictionary(cold_filtering_outputs.end_state))
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _convert_feature_to_tensor(self, name, value):
"""Casts features to the correct dtype based on their name."""
if name in [
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.PredictionFeatures.TIMES
]:
return tf.cast(value, tf.dtypes.int64)
if name == feature_keys.TrainEvalFeatures.VALUES:
return tf.cast(value, self.model.dtype)
if name == feature_keys.PredictionFeatures.STATE_TUPLE:
return value # Correct dtypes are model-dependent
return tf.compat.v1.convert_to_tensor_or_sparse_tensor(value)
def _gather_state(self, features):
"""Returns `features` with state packed, indicates if packing was done."""
prefixed_state_re = re.compile(r"^" + feature_keys.State.STATE_PREFIX +
r"_(\d+)$")
numbered_state = []
for key, tensor in features.items():
search_result = prefixed_state_re.search(key)
if search_result:
numbered_state.append((int(search_result.group(1)), key, tensor))
if not numbered_state:
return features, False
features = features.copy()
for _, key, _ in numbered_state:
del features[key]
numbered_state.sort(key=lambda number, *_: number)
features[feature_keys.State.STATE_TUPLE] = tf.nest.pack_sequence_as(
structure=self.model.get_start_state(),
flat_sequence=[tensor for _, _, tensor in numbered_state])
return features, True
def _check_predict_features(self, features):
"""Raises errors if features are not suitable for prediction."""
if feature_keys.PredictionFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.TIMES))
if feature_keys.PredictionFeatures.STATE_TUPLE not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.STATE_TUPLE))
times_feature = features[feature_keys.PredictionFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.PredictionFeatures.TIMES,
times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.PredictionFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
# Model-dependent shapes
feature_keys.PredictionFeatures.STATE_TUPLE
]))
def create_estimator_spec(self, features, mode, labels=None):
"""Performs basic error checking and returns an EstimatorSpec."""
with ops.name_scope(self._name, "head"):
# for better error messages.
if labels is not None and not (isinstance(labels, dict) and labels == {}): # pylint: disable=g-explicit-bool-comparison
raise ValueError(
"The model received a `labels`, which is not supported. "
"Pass '{}' and '{}' as features.".format(
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES))
del labels
features = {
name: self._convert_feature_to_tensor(name=name, value=value)
for name, value in features.items()
}
if self.input_statistics_generator is not None:
input_statistics = self.input_statistics_generator.initialize_graph(
features, update_statistics=(mode == estimator_lib.ModeKeys.TRAIN))
else:
input_statistics = None
self.model.initialize_graph(input_statistics=input_statistics)
# _gather_state requires the model to have its graph initialized (so it
# has access to the structure of the model's state)
features, passed_flat_state = self._gather_state(features)
if (mode == estimator_lib.ModeKeys.TRAIN or
mode == estimator_lib.ModeKeys.EVAL):
_check_train_eval_features(features, self.model)
elif mode == estimator_lib.ModeKeys.PREDICT:
self._check_predict_features(features)
else:
raise ValueError("Unknown mode '{}' passed to model_fn.".format(mode))
self.state_manager.initialize_graph(
model=self.model, input_statistics=input_statistics)
if mode == estimator_lib.ModeKeys.TRAIN:
return self._train_ops(features)
elif mode == estimator_lib.ModeKeys.EVAL:
return self._evaluate_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and not passed_flat_state:
return self._predict_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and passed_flat_state:
# The mode is PREDICT, but we're actually in export_saved_model for
# serving. We want to return two graphs: one for filtering (state + data
# -> state) and one for predicting (state -> prediction).
return self._serving_ops(features)
class OneShotPredictionHead(TimeSeriesRegressionHead):
"""A time series head which exports a single stateless serving signature.
The serving default signature exported by this head expects `times`, `values`,
and any exogenous features, but no state. `values` has shape `[batch_size,
filter_length, num_features]` and `times` has shape `[batch_size,
total_length]`, where `total_length > filter_length`. Any exogenous features
must have their shapes prefixed by the shape of the `times` feature.
When serving, first performs filtering on the series up to `filter_length`
starting from the default start state for the model, then computes predictions
on the remainder of the series, returning them.
Model state is neither accepted nor returned, so filtering must be performed
each time predictions are requested when using this head.
"""
def _check_predict_features(self, features):
"""Raises errors if features are not suitable for one-shot prediction."""
if feature_keys.PredictionFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.TrainEvalFeatures.VALUES))
if feature_keys.PredictionFeatures.STATE_TUPLE not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.STATE_TUPLE))
times_feature = features[feature_keys.PredictionFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.PredictionFeatures.TIMES,
times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.PredictionFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
# Model-dependent shapes
feature_keys.PredictionFeatures.STATE_TUPLE,
# One shot prediction head relies on values being shorter than
# times. Even though we're predicting eventually, we need values for
# the filtering phase.
feature_keys.TrainEvalFeatures.VALUES,
]))
def _evaluate_ops(self, features):
"""Add ops for evaluation (aka filtering) to the graph."""
spec = super(OneShotPredictionHead, self)._evaluate_ops(features)
# No state is fed to OneShotPredictionHead, so we don't return it; it being
# a tuple can cause issues for downstream infrastructure.
del spec.eval_metric_ops[feature_keys.State.STATE_TUPLE]
return spec
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with tf.compat.v1.variable_scope("model", use_resource=True):
filtering_features = {}
prediction_features = {}
values_length = tf.compat.v1.shape(
features[feature_keys.FilteringFeatures.VALUES])[1]
for key, value in features.items():
if key == feature_keys.State.STATE_TUPLE:
# Ignore state input. The model's default start state is replicated
# across the batch.
continue
if key == feature_keys.FilteringFeatures.VALUES:
filtering_features[key] = value
else:
filtering_features[key] = value[:, :values_length]
prediction_features[key] = value[:, values_length:]
cold_filtering_outputs = self.model.define_loss(
features=filtering_features, mode=estimator_lib.ModeKeys.EVAL)
prediction_features[feature_keys.State.STATE_TUPLE] = (
cold_filtering_outputs.end_state)
with tf.compat.v1.variable_scope("model", reuse=True):
prediction_outputs = self.model.predict(features=prediction_features)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
_NoStatePredictOutput(prediction_outputs),
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _check_feature_shapes_compatible_with(features,
compatible_with_name,
compatible_with_value,
ignore=None):
"""Checks all features are compatible with the given time-like feature."""
if ignore is None:
ignore = set()
for name, value in features.items():
if name in ignore:
continue
feature_shape = value.get_shape()
if feature_shape.ndims is None:
continue
if feature_shape.ndims < 2:
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"(got rank {} for feature '{}')").format(feature_shape.ndims, name))
if not feature_shape[:2].is_compatible_with(
compatible_with_value.get_shape()):
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"where batch dimension and window size match the "
"'{times_feature}' feature (got shape {feature_shape} for "
"feature '{feature_name}' but shape {times_shape} for feature "
"'{times_feature}')").format(
times_feature=compatible_with_name,
feature_shape=feature_shape,
feature_name=name,
times_shape=compatible_with_value.get_shape()))
def _check_train_eval_features(features, model):
"""Raise errors if features are not suitable for training/evaluation."""
if feature_keys.TrainEvalFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.VALUES))
times_feature = features[feature_keys.TrainEvalFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.TrainEvalFeatures.TIMES,
times_feature.get_shape()))
values_feature = features[feature_keys.TrainEvalFeatures.VALUES]
if not values_feature.get_shape().is_compatible_with(
[None, None, model.num_features]):
raise ValueError(
("Expected shape (batch dimension, window size, {num_features}) "
"for feature '{feature_name}', since the model was configured "
"with num_features={num_features} (got shape {got_shape})").format(
num_features=model.num_features,
feature_name=feature_keys.TrainEvalFeatures.VALUES,
got_shape=times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.TrainEvalFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
feature_keys.State.STATE_TUPLE # Model-dependent shapes
]))
def _identity_metric_single(name, input_tensor):
"""A metric which takes on its last updated value.
This keeps evaluation metrics in sync with one another, since update ops are
run separately from their result Tensors. Simply returning (input_tensor,
no_op) as a metric with a value but no update means that a metric will come
from a different batch of data than metrics which cache values in a Variable
(e.g. the default loss metric).
Args:
name: A name for the metric.
input_tensor: Any Tensor.
Returns:
A tuple of (value, update_op).
"""
metric_variable = tf.compat.v1.Variable(
name="{}_identity_metric".format(name),
initial_value=tf.zeros([], dtype=input_tensor.dtype),
collections=[tf.compat.v1.GraphKeys.LOCAL_VARIABLES],
validate_shape=False)
update_op = tf.compat.v1.assign(
metric_variable, input_tensor, validate_shape=False)
# This shape will be correct once the first update runs (but may be
# incomplete, so is not helpful for initializing the variable).
metric_variable.set_shape(input_tensor.get_shape())
return (metric_variable.value(), update_op)
def _identity_metric_nested(name, input_tensors):
"""Create identity metrics for a nested tuple of Tensors."""
update_ops = []
value_tensors = []
for tensor_number, tensor in enumerate(tf.nest.flatten(input_tensors)):
value_tensor, update_op = _identity_metric_single(
name="{}_{}".format(name, tensor_number), input_tensor=tensor)
update_ops.append(update_op)
value_tensors.append(value_tensor)
return (tf.nest.pack_sequence_as(input_tensors, value_tensors),
tf.group(*update_ops))
def state_to_dictionary(state_tuple):
"""Flatten model state into a dictionary with string keys."""
flattened = {}
for state_number, state_value in enumerate(tf.nest.flatten(state_tuple)):
prefixed_state_name = "{}_{:02d}".format(feature_keys.State.STATE_PREFIX,
state_number)
flattened[prefixed_state_name] = state_value
return flattened
|
apache-2.0
| -3,726,086,368,253,177,000
| 44.198732
| 126
| 0.669629
| false
|
wolcomm/djangolg
|
djangolg/dialects/base.py
|
1
|
2044
|
# Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Base dialect class for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import napalm
from napalm_base import NetworkDriver
class BaseDialect(object):
"""Device base dialect class."""
driver_class = None
name = None
description = None
commands = {}
def __init__(self):
"""Initialise new instance."""
if not isinstance(self.driver_class, NetworkDriver):
if type(self).name:
self.driver_class = napalm.get_network_driver(type(self).name)
else:
raise ValueError
def get_command_syntax(self, method=None, option=None):
"""Get the dialect specific syntax for a given method as a lambda."""
from djangolg.methods.base import BaseMethod
if not isinstance(method, BaseMethod):
return ValueError
syntax = None
if method.name in self.commands:
if option is not None:
if option in self.commands[method.name]:
syntax = self.commands[method.name][option]
else:
syntax = self.commands[method.name]
if syntax:
if inspect.isfunction(syntax):
return syntax
else:
raise TypeError # pragma: no cover
raise NotImplementedError # pragma: no cover
|
apache-2.0
| 8,075,817,260,728,690,000
| 33.644068
| 79
| 0.65362
| false
|
t3dev/odoo
|
addons/stock/tests/test_report.py
|
2
|
1121
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo
import odoo.tests
class TestReports(odoo.tests.TransactionCase):
def test_reports(self):
product1 = self.env['product.product'].create({
'name': 'Mellohi',
'default_code': 'C418',
'type': 'product',
'categ_id': self.env.ref('product.product_category_all').id,
'tracking': 'lot',
'barcode': 'scan_me'
})
lot1 = self.env['stock.production.lot'].create({
'name': 'Volume-Beta',
'product_id': product1.id,
})
report = self.env.ref('stock.label_lot_template')
target = b'\n\n\n^XA\n^FO100,50\n^A0N,44,33^FD[C418]Mellohi^FS\n^FO100,100\n^A0N,44,33^FDLN/SN:Volume-Beta^FS\n^FO100,150^BY3\n^BCN,100,Y,N,N\n^FDVolume-Beta^FS\n^XZ\n\n\n'
rendering, qweb_type = report.render_qweb_text(lot1.id)
self.assertEqual(target, rendering.replace(b' ', b''), 'The rendering is not good')
self.assertEqual(qweb_type, 'text', 'the report type is not good')
|
gpl-3.0
| 436,866,183,249,691,840
| 42.115385
| 180
| 0.597681
| false
|
mgax/airship
|
airship/core.py
|
1
|
8257
|
import os
import sys
import logging
import json
import random
import string
from pipes import quote as shellquote
from path import path
import yaml
from kv import KV
import pkg_resources
import blinker
from .daemons import Supervisor
from . import deployer
log = logging.getLogger(__name__)
CFG_LINKS_FOLDER = 'active'
YAML_EXT = '.yaml'
bucket_run = blinker.Signal()
define_arguments = blinker.Signal()
def random_id(size=6, vocabulary=string.ascii_lowercase + string.digits):
return ''.join(random.choice(vocabulary) for c in range(size))
class Bucket(object):
def __init__(self, id_, airship, config):
self.id_ = id_
self.airship = airship
self.config = config
self.folder = self.airship._bucket_folder(id_)
self.process_types = {}
self._read_procfile()
def _read_procfile(self):
procfile_path = self.folder / 'Procfile'
if procfile_path.isfile():
with procfile_path.open('rb') as f:
for line in f:
(procname, cmd) = line.split(':', 1)
self.process_types[procname.strip()] = cmd.strip()
def start(self):
log.info("Activating bucket %r", self.id_)
self.airship.daemons.configure_bucket_running(self)
def stop(self):
self.airship.daemons.configure_bucket_stopped(self)
def destroy(self):
self.airship.daemons.remove_bucket(self.id_)
if self.folder.isdir():
self.folder.rmtree()
self.airship.buckets_db.pop(self.id_, None)
def run(self, command):
os.chdir(self.folder)
environ = dict(os.environ)
environ.update(self.airship.config.get('env') or {})
bucket_run.send(self.airship, bucket=self, environ=environ)
shell_args = ['/bin/bash']
if command:
if command in self.process_types:
procname = command
port_map = self.airship.config.get('port_map', {})
if procname in port_map:
environ['PORT'] = str(port_map[procname])
command = self.process_types[procname]
shell_args += ['-c', command]
os.execve(shell_args[0], shell_args, environ)
_newest = object()
class Airship(object):
""" The airship object implements most operations performed by airship. It
acts as container for deployments.
"""
def __init__(self, config):
self.home_path = config['home']
self.var_path = self.home_path / 'var'
self.log_path = self.var_path / 'log'
self.deploy_path = self.var_path / 'deploy'
self.config = config
etc = self.home_path / 'etc'
etc.mkdir_p()
self.buckets_db = KV(etc / 'buckets.db', table='bucket')
self.meta_db = KV(etc / 'buckets.db', table='meta')
self.daemons = Supervisor(etc)
@property
def cfg_links_folder(self):
folder = self.home_path / CFG_LINKS_FOLDER
if not folder.isdir():
folder.makedirs()
return folder
def initialize(self):
self.var_path.mkdir_p()
self.log_path.mkdir_p()
(self.var_path / 'run').mkdir_p()
self.deploy_path.mkdir_p()
self.generate_supervisord_configuration()
def generate_supervisord_configuration(self):
self.daemons.configure(self.home_path)
def _get_bucket_by_id(self, bucket_id):
config = self.buckets_db[bucket_id]
return Bucket(bucket_id, self, config)
def get_bucket(self, name=_newest):
if name is _newest:
name = max(self.buckets_db)
return self._get_bucket_by_id(name)
def _bucket_folder(self, id_):
return self.deploy_path / id_
def _generate_bucket_id(self):
with self.meta_db.lock():
next_id = self.meta_db.get('next_bucket_id', 1)
self.meta_db['next_bucket_id'] = next_id + 1
id_ = 'd%d' % (next_id,)
self._bucket_folder(id_).mkdir()
return id_
def new_bucket(self, config={}):
bucket_id = self._generate_bucket_id()
self.buckets_db[bucket_id] = {}
bucket = self._get_bucket_by_id(bucket_id)
return bucket
def list_buckets(self):
return {'buckets': [{'id': id_} for id_ in self.buckets_db]}
# we load the entry points here so they can do import-time signal registrations
_plugin_callbacks = [ep.load() for ep in
pkg_resources.iter_entry_points('airship_plugins')]
def load_plugins(airship):
for callback in _plugin_callbacks:
callback(airship)
AIRSHIP_SCRIPT = """#!/bin/bash
exec '{prefix}/bin/airship' '{home}' "$@"
"""
SUPERVISORD_SCRIPT = """#!/bin/bash
exec '{prefix}/bin/supervisord' -c '{home}/etc/supervisor.conf'
"""
SUPERVISORCTL_SCRIPT = """#!/bin/bash
exec '{prefix}/bin/supervisorctl' -c '{home}/etc/supervisor.conf' $@
"""
def init_cmd(airship, args):
log.info("Initializing airship folder at %r.", airship.home_path)
airship_yaml_path = airship.home_path / 'etc' / 'airship.yaml'
if not airship_yaml_path.isfile():
airship_yaml_path.write_text('\n')
airship.initialize()
airship_bin = airship.home_path / 'bin'
airship_bin.makedirs()
kw = {'home': airship.home_path, 'prefix': sys.prefix}
with open(airship_bin / 'airship', 'wb') as f:
f.write(AIRSHIP_SCRIPT.format(**kw))
path(f.name).chmod(0755)
with open(airship_bin / 'supervisord', 'wb') as f:
f.write(SUPERVISORD_SCRIPT.format(**kw))
path(f.name).chmod(0755)
with open(airship_bin / 'supervisorctl', 'wb') as f:
f.write(SUPERVISORCTL_SCRIPT.format(**kw))
path(f.name).chmod(0755)
def list_cmd(airship, args):
print json.dumps(airship.list_buckets(), indent=2)
def destroy_cmd(airship, args):
airship.get_bucket(args.bucket_id or _newest).destroy()
def run_cmd(airship, args):
command = ' '.join(shellquote(a) for a in args.command)
airship.get_bucket(args.bucket_id or _newest).run(command)
def deploy_cmd(airship, args):
try:
deployer.deploy(airship, args.tarfile)
except deployer.DeployError, e:
print "Deployment failed:", e.message
try:
e.bucket.destroy()
except:
print ("Error while cleaning up failed deployment %s."
% e.bucket.id_)
else:
print "Cleaned up failed deployment."
def build_args_parser():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('airship_home')
subparsers = parser.add_subparsers()
def create_command(name, handler):
subparser = subparsers.add_parser(name)
subparser.set_defaults(func=handler)
return subparser
create_command('init', init_cmd)
create_command('list', list_cmd)
destroy_parser = create_command('destroy', destroy_cmd)
destroy_parser.add_argument('-d', '--bucket_id')
run_parser = create_command('run', run_cmd)
run_parser.add_argument('-d', '--bucket_id')
run_parser.add_argument('command', nargs=argparse.REMAINDER)
deploy_parser = create_command('deploy', deploy_cmd)
deploy_parser.add_argument('tarfile')
define_arguments.send(None, create_command=create_command)
return parser
def set_up_logging(airship_home):
log_folder = airship_home / 'var' / 'log'
log_folder.makedirs_p()
handler = logging.FileHandler(log_folder / 'airship.log')
log_format = "%(asctime)s %(levelname)s:%(name)s %(message)s"
handler.setFormatter(logging.Formatter(log_format))
handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(handler)
def main(raw_arguments=None):
parser = build_args_parser()
args = parser.parse_args(raw_arguments or sys.argv[1:])
airship_home = path(args.airship_home).abspath()
set_up_logging(airship_home)
airship_yaml_path = airship_home / 'etc' / 'airship.yaml'
if airship_yaml_path.isfile():
with airship_yaml_path.open('rb') as f:
config = yaml.load(f)
else:
config = {}
config['home'] = airship_home
airship = Airship(config)
load_plugins(airship)
args.func(airship, args)
if __name__ == '__main__':
main()
|
bsd-2-clause
| -984,668,476,783,880,200
| 28.808664
| 79
| 0.619595
| false
|
shapiromatron/comp523-medcosts
|
myuser/migrations/0001_initial.py
|
1
|
5916
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MyUser'
db.create_table(u'myuser_myuser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=254, db_index=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'myuser', ['MyUser'])
# Adding M2M table for field groups on 'MyUser'
m2m_table_name = db.shorten_name(u'myuser_myuser_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('myuser', models.ForeignKey(orm[u'myuser.myuser'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['myuser_id', 'group_id'])
# Adding M2M table for field user_permissions on 'MyUser'
m2m_table_name = db.shorten_name(u'myuser_myuser_user_permissions')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('myuser', models.ForeignKey(orm[u'myuser.myuser'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique(m2m_table_name, ['myuser_id', 'permission_id'])
def backwards(self, orm):
# Deleting model 'MyUser'
db.delete_table(u'myuser_myuser')
# Removing M2M table for field groups on 'MyUser'
db.delete_table(db.shorten_name(u'myuser_myuser_groups'))
# Removing M2M table for field user_permissions on 'MyUser'
db.delete_table(db.shorten_name(u'myuser_myuser_user_permissions'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'myuser.myuser': {
'Meta': {'ordering': "('last_name',)", 'object_name': 'MyUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"})
}
}
complete_apps = ['myuser']
|
mit
| 3,541,253,343,062,683,000
| 61.946809
| 194
| 0.590602
| false
|
YoungKwonJo/mlxtend
|
tests/tests_classifier/test_ensembleclassifier.py
|
1
|
2352
|
import numpy as np
from mlxtend.classifier import EnsembleClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from sklearn import datasets
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_EnsembleClassifier():
np.random.seed(123)
clf1 = LogisticRegression()
clf2 = RandomForestClassifier()
clf3 = GaussianNB()
eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3], voting='hard')
scores = cross_validation.cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
scores_mean = (round(scores.mean(), 2))
assert(scores_mean == 0.94)
def test_EnsembleClassifier_weights():
np.random.seed(123)
clf1 = LogisticRegression()
clf2 = RandomForestClassifier()
clf3 = GaussianNB()
eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3], voting='soft', weights=[1,2,10])
scores = cross_validation.cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
scores_mean = (round(scores.mean(), 2))
assert(scores_mean == 0.93)
def test_EnsembleClassifier_gridsearch():
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3], voting='soft')
params = {'logisticregression__C': [1.0, 100.0],
'randomforestclassifier__n_estimators': [20, 200],}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
mean_scores = []
for params, mean_score, scores in grid.grid_scores_:
mean_scores.append(round(mean_score, 2))
assert(mean_scores == [0.95, 0.96, 0.96, 0.95])
def test_EnsembleClassifier_gridsearch_enumerate_names():
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
eclf = EnsembleClassifier(clfs=[clf1, clf1, clf2], voting='soft')
params = {'logisticregression-1__C': [1.0, 100.0],
'logisticregression-2__C': [1.0, 100.0],
'randomforestclassifier__n_estimators': [5, 20],}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
gs = grid.fit(iris.data, iris.target)
|
bsd-3-clause
| 1,804,529,685,603,916,500
| 30.783784
| 87
| 0.690051
| false
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/motifs/meme.py
|
1
|
11510
|
# Copyright 2008 by Bartek Wilczynski
# Adapted from Bio.MEME.Parser by Jason A. Hackney. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
from Bio.Alphabet import IUPAC
from Bio import Seq
from Bio import motifs
def read(handle):
"""Parses the text output of the MEME program into a meme.Record object.
Example:
>>> from Bio.motifs import meme
>>> with open("meme.output.txt") as f:
... record = meme.read(f)
>>> for motif in record:
... for instance in motif.instances:
... print(instance.motif_name, instance.sequence_name, instance.strand, instance.pvalue)
"""
record = Record()
__read_version(record, handle)
__read_datafile(record, handle)
__read_alphabet(record, handle)
__read_sequences(record, handle)
__read_command(record, handle)
for line in handle:
if line.startswith('MOTIF 1'):
break
else:
raise ValueError('Unexpected end of stream')
alphabet = record.alphabet
revcomp = 'revcomp' in record.command
while True:
motif_number, length, num_occurrences, evalue = __read_motif_statistics(line)
name = __read_motif_name(handle)
instances = __read_motif_sequences(handle, name, alphabet, length, revcomp)
motif = Motif(alphabet, instances)
motif.length = length
motif.num_occurrences = num_occurrences
motif.evalue = evalue
motif.name = name
record.append(motif)
assert len(record)==motif_number
__skip_unused_lines(handle)
try:
line = next(handle)
except StopIteration:
raise ValueError('Unexpected end of stream: Expected to find new motif, or the summary of motifs')
if line.startswith("SUMMARY OF MOTIFS"):
break
if not line.startswith('MOTIF'):
raise ValueError("Line does not start with 'MOTIF':\n%s" % line)
return record
class Motif(motifs.Motif):
"""A subclass of Motif used in parsing MEME (and MAST) output.
This subclass defines functions and data specific to MEME motifs.
This includes the motif name, the evalue for a motif, and its number
of occurrences.
"""
def __init__(self, alphabet=None, instances=None):
motifs.Motif.__init__(self, alphabet, instances)
self.evalue = 0.0
self.num_occurrences = 0
self.name = None
class Instance(Seq.Seq):
"""A class describing the instances of a MEME motif, and the data thereof.
"""
def __init__(self, *args, **kwds):
Seq.Seq.__init__(self, *args, **kwds)
self.sequence_name = ""
self.start = 0
self.pvalue = 1.0
self.strand = 0
self.length = 0
self.motif_name = ""
class Record(list):
"""A class for holding the results of a MEME run.
A meme.Record is an object that holds the results from running
MEME. It implements no methods of its own.
The meme.Record class inherits from list, so you can access individual
motifs in the record by their index. Alternatively, you can find a motif
by its name:
>>> from Bio import motifs
>>> with open("meme.output.txt") as f:
... record = motifs.parse(f, 'MEME')
>>> motif = record[0]
>>> print(motif.name)
Motif 1
>>> motif = record['Motif 1']
>>> print(motif.name)
Motif 1
"""
def __init__(self):
"""__init__ (self)"""
self.version = ""
self.datafile = ""
self.command = ""
self.alphabet = None
self.sequences = []
def __getitem__(self, key):
if isinstance(key, str):
for motif in self:
if motif.name == key:
return motif
else:
return list.__getitem__(self, key)
# Everything below is private
def __read_version(record, handle):
for line in handle:
if line.startswith('MEME version'):
break
else:
raise ValueError("Improper input file. File should contain a line starting MEME version.")
line = line.strip()
ls = line.split()
record.version = ls[2]
def __read_datafile(record, handle):
for line in handle:
if line.startswith('TRAINING SET'):
break
else:
raise ValueError("Unexpected end of stream: 'TRAINING SET' not found.")
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with '****'")
if not line.startswith('****'):
raise ValueError("Line does not start with '****':\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'DATAFILE'")
if not line.startswith('DATAFILE'):
raise ValueError("Line does not start with 'DATAFILE':\n%s" % line)
line = line.strip()
line = line.replace('DATAFILE= ', '')
record.datafile = line
def __read_alphabet(record, handle):
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'ALPHABET'")
if not line.startswith('ALPHABET'):
raise ValueError("Line does not start with 'ALPHABET':\n%s" % line)
line = line.strip()
line = line.replace('ALPHABET= ', '')
if line == 'ACGT':
al = IUPAC.unambiguous_dna
else:
al = IUPAC.protein
record.alphabet = al
def __read_sequences(record, handle):
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'Sequence name'")
if not line.startswith('Sequence name'):
raise ValueError("Line does not start with 'Sequence name':\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with '----'")
if not line.startswith('----'):
raise ValueError("Line does not start with '----':\n%s" % line)
for line in handle:
if line.startswith('***'):
break
line = line.strip()
ls = line.split()
record.sequences.append(ls[0])
if len(ls) == 6:
record.sequences.append(ls[3])
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with '***'")
def __read_command(record, handle):
for line in handle:
if line.startswith('command:'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'command'")
line = line.strip()
line = line.replace('command: ', '')
record.command = line
def __read_motif_statistics(line):
# Depending on the version of MEME, this line either like like
# MOTIF 1 width = 19 sites = 3 llr = 43 E-value = 6.9e-002
# or like
# MOTIF 1 MEME width = 19 sites = 3 llr = 43 E-value = 6.9e-002
words = line.split()
assert words[0]=='MOTIF'
motif_number = int(words[1])
if words[2]=='MEME':
key_values = words[3:]
else:
key_values = words[2:]
keys = key_values[::3]
equal_signs = key_values[1::3]
values = key_values[2::3]
assert keys==['width', 'sites', 'llr', 'E-value']
for equal_sign in equal_signs:
assert equal_sign=='='
length = int(values[0])
num_occurrences = int(values[1])
evalue = float(values[3])
return motif_number, length, num_occurrences, evalue
def __read_motif_name(handle):
for line in handle:
if 'sorted by position p-value' in line:
break
else:
raise ValueError('Unexpected end of stream: Failed to find motif name')
line = line.strip()
words = line.split()
name = " ".join(words[0:2])
return name
def __read_motif_sequences(handle, motif_name, alphabet, length, revcomp):
try:
line = next(handle)
except StopIteration:
raise ValueError('Unexpected end of stream: Failed to find motif sequences')
if not line.startswith('---'):
raise ValueError("Line does not start with '---':\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'Sequence name'")
if not line.startswith('Sequence name'):
raise ValueError("Line does not start with 'Sequence name':\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError('Unexpected end of stream: Failed to find motif sequences')
if not line.startswith('---'):
raise ValueError("Line does not start with '---':\n%s" % line)
instances = []
for line in handle:
if line.startswith('---'):
break
line = line.strip()
words = line.split()
if revcomp:
strand = words.pop(1)
else:
strand = '+'
sequence = words[4]
assert len(sequence) == length
instance = Instance(sequence, alphabet)
instance.motif_name = motif_name
instance.sequence_name = words[0]
instance.start = int(words[1])
instance.pvalue = float(words[2])
instance.strand = strand
instance.length = length
instances.append(instance)
else:
raise ValueError('Unexpected end of stream')
return motifs.Instances(instances, alphabet)
def __skip_unused_lines(handle):
for line in handle:
if line.startswith('log-odds matrix'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'log-odds matrix'")
for line in handle:
if line.startswith('---'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with '---'")
for line in handle:
if line.startswith('letter-probability matrix'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'letter-probability matrix'")
for line in handle:
if line.startswith('---'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with '---'")
for line in handle:
if line.startswith('Time'):
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with 'Time'")
try:
line = next(handle)
except StopIteration:
raise ValueError('Unexpected end of stream: Expected to find blank line')
if line.strip():
raise ValueError("Expected blank line, but got:\n%s" % line)
try:
line = next(handle)
except StopIteration:
raise ValueError("Unexpected end of stream: Expected to find line starting with '***'")
if not line.startswith('***'):
raise ValueError("Line does not start with '***':\n%s" % line)
for line in handle:
if line.strip():
break
else:
raise ValueError("Unexpected end of stream: Expected to find line starting with '***'")
if not line.startswith('***'):
raise ValueError("Line does not start with '***':\n%s" % line)
|
apache-2.0
| 5,834,909,790,318,722,000
| 32.753666
| 117
| 0.611295
| false
|
projectshift/shift-media
|
shiftmedia/paths.py
|
1
|
9544
|
import hashlib
from shiftmedia import exceptions as x
from shiftmedia import utils
class PathBuilder:
def __init__(self, secret_key):
"""
Path builder constructor
Initializes path builder service.
:param secret_key: string - secret key from config
"""
self.secret_key = secret_key
def generate_signature(self, id, filename):
"""
Generate signature
Accepts storage id and a filename to generate hash signature.
Signatures are used to prevent brute force attack against
resizing service.
:param id: string - storage id
:param filename: - string, resize filename
:return: string - signature
"""
sign_me = bytes(id + filename + self.secret_key, 'utf-8')
signature = hashlib.md5()
signature.update(sign_me)
return signature.hexdigest()
def validate_signature(self, id, filename):
"""
Validate signature
Accepts storage id and a filename and validates hash signature.
Signatures are used to prevent brute force attack against
resizing service.
:param id: string - storage id
:param filename: - string, resize filename
:return:
"""
parts = filename.split('-')
if len(parts) != 5:
return False
extension = parts[4][parts[4].index('.'):]
non_signed_filename = '-'.join(parts[:4]) + extension
signature = parts[4].replace(extension, '')
return signature == self.generate_signature(id, non_signed_filename)
def get_auto_crop_filename(
self,
id,
size,
factor,
output_format=None,
upscale=True,
quality=65
):
"""
Get auto crop filename
Encodes parameters for automatic cropping/resizing into a filename.
Resulting filename will contain hash signature.
:param id: string - storage id (used to generate signature)
:param size: string - width x height
:param factor: string - crop factor, fit/fill
:param output_format: string - output format
:param upscale: bool - enlarge smaller original
:param quality: string - differs per format. i.e. 0-100 for jpg
:return: string - signed filename
"""
# validate size
err = False
dimensions = size.lower().split('x')
if len(dimensions) != 2:
err = True
for dimension in dimensions:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
if err:
err = 'Invalid size provided must be in 100x200 format'
raise x.InvalidArgumentException(err)
# validate factor
if factor not in ['fit', 'fill']:
err = 'Auto crop factor must be either fit or fill'
raise x.InvalidArgumentException(err)
# validate quality
if not str(quality).isdigit():
err = 'Quality must be numeric'
raise x.InvalidArgumentException(err)
# guess format from original if not specified
if not output_format:
parts = id.split('-')
output_format= parts[5][parts[5].index('.') + 1:]
# prepare upscale
upscale = 'upscale' if bool(upscale) else 'noupscale'
# create filename filename
schema = '{size}-{factor}-{quality}-{upscale}.{format}'
signed_schema = '{size}-{factor}-{quality}-{upscale}-{sig}.{format}'
params = dict(
size=size,
factor=factor,
quality=quality,
upscale=upscale,
format=output_format
)
unsigend_filename = schema.format(**params)
# sign
params['sig'] = self.generate_signature(id, unsigend_filename)
signed_filename = signed_schema.format(**params)
return signed_filename
def get_manual_crop_filename(
self,
id,
sample_size,
target_size,
output_format=None,
upscale=True,
quality=65
):
"""
Get manual crop filename
Encodes parameters for automatic cropping/resizing into a filename.
Resulting filename will contain hash signature.
:param id: string - storage id (used to generate signature)
:param target_size: string - width x height
:param sample_size: string - width x height, must be proportional
:param output_format: string - output format
:param upscale: bool - enlarge smaller original
:param quality: string - differs per format. i.e. 0-100 for jpg
:return: string - signed filename
"""
# validate sample size
err = False
sample_dimensions = sample_size.lower().split('x')
if len(sample_dimensions) != 2:
err = True
for dimension in sample_dimensions:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
if err:
err = 'Invalid sample size provided must be in 100x200 format'
raise x.InvalidArgumentException(err)
# validate target size
err = False
target_dimensions = target_size.lower().split('x')
if len(target_dimensions) != 2:
err = True
for dimension in target_dimensions:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
if err:
err = 'Invalid target size provided must be in 100x200 format'
raise x.InvalidArgumentException(err)
# validate sample and target sizes being proportional
sw = int(sample_dimensions[0])
sh = int(sample_dimensions[1])
tw = int(target_dimensions[0])
th = int(target_dimensions[1])
if (sw/sh) != (tw/th):
err = 'Sample size and target size must be proportional'
raise x.InvalidArgumentException(err)
# validate quality
if not str(quality).isdigit():
err = 'Quality must be numeric'
raise x.InvalidArgumentException(err)
# guess format from original if not specified
if not output_format:
parts = id.split('-')
output_format= parts[5][parts[5].index('.') + 1:]
# prepare upscale
upscale = 'upscale' if bool(upscale) else 'noupscale'
# initial filename
schema = '{target}-{sample}-{quality}-{upscale}.{format}'
signed_schema = '{target}-{sample}-{quality}-{upscale}-{sig}.{format}'
params = dict(
sample=sample_size,
target=target_size,
quality=quality,
upscale=upscale,
format=output_format
)
unsigend_filename = schema.format(**params)
# sign
params['sig'] = self.generate_signature(id, unsigend_filename)
signed_filename = signed_schema.format(**params)
return signed_filename
def filename_to_resize_params(self, id, filename):
"""
Filename to parameters
Parses resize filename to a set of usable parameters. Will perform
filename signature checking and throw an exception if requested
resize filename is malformed.
:param id: string - unique storage id
:param filename: string - resize filename
:return: dict of parameters
"""
# validate signature
if not self.validate_signature(id, filename):
err = 'Unable to parse filename: bad signature'
raise x.InvalidArgumentException(err)
# get parts
parts = filename.split('-')
target_size,sample_size,quality,upscale,rest = parts
target_format = rest[rest.index('.') + 1:]
# detect manual/auto
if sample_size in ['fill', 'fit']:
resize = 'auto'
else:
err = False
sample_size = sample_size.split('x')
for dimension in sample_size:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
break
if err or len(sample_size) != 2:
err = 'Unable to parse filename: bad sample size or crop factor'
raise x.InvalidArgumentException(err)
else:
resize = 'manual'
# validate size
err = False
target_size = target_size.split('x')
for dimension in target_size:
if not dimension.isdigit() or int(dimension) <= 0:
err = True
break
if err or len(target_size) != 2:
err = 'Unable to parse filename: bad target size'
raise x.InvalidArgumentException(err)
# validate quality
if not str(quality).isdigit():
err = 'Quality must be numeric'
raise x.InvalidArgumentException(err)
# prepare upscale
upscale = True if upscale == 'upscale' else False
# prepare result
result = dict(
id=id,
resize_mode=resize,
target_size='x'.join(target_size),
output_format=target_format,
quality=int(quality),
filename=filename,
upscale=upscale
)
if resize == 'auto':
result['factor'] = sample_size
if resize == 'manual':
result['sample_size'] = 'x'.join(sample_size)
return result
|
mit
| -5,693,655,965,590,283,000
| 32.843972
| 80
| 0.570096
| false
|
tchellomello/home-assistant
|
homeassistant/components/slack/notify.py
|
1
|
8948
|
"""Slack platform for notify component."""
import asyncio
import logging
import os
from urllib.parse import urlparse
from aiohttp import BasicAuth, FormData
from aiohttp.client_exceptions import ClientError
from slack import WebClient
from slack.errors import SlackApiError
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_API_KEY, CONF_ICON, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
import homeassistant.helpers.template as template
_LOGGER = logging.getLogger(__name__)
ATTR_BLOCKS = "blocks"
ATTR_BLOCKS_TEMPLATE = "blocks_template"
ATTR_FILE = "file"
ATTR_ICON = "icon"
ATTR_PASSWORD = "password"
ATTR_PATH = "path"
ATTR_URL = "url"
ATTR_USERNAME = "username"
ATTR_USERNAME = "username"
CONF_DEFAULT_CHANNEL = "default_channel"
DEFAULT_TIMEOUT_SECONDS = 15
FILE_PATH_SCHEMA = vol.Schema({vol.Required(ATTR_PATH): cv.isfile})
FILE_URL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_URL): cv.url,
vol.Inclusive(ATTR_USERNAME, "credentials"): cv.string,
vol.Inclusive(ATTR_PASSWORD, "credentials"): cv.string,
}
)
DATA_FILE_SCHEMA = vol.Schema(
{vol.Required(ATTR_FILE): vol.Any(FILE_PATH_SCHEMA, FILE_URL_SCHEMA)}
)
DATA_TEXT_ONLY_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_USERNAME): cv.string,
vol.Optional(ATTR_ICON): cv.string,
vol.Optional(ATTR_BLOCKS): list,
vol.Optional(ATTR_BLOCKS_TEMPLATE): list,
}
)
DATA_SCHEMA = vol.All(
cv.ensure_list, [vol.Any(DATA_FILE_SCHEMA, DATA_TEXT_ONLY_SCHEMA)]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DEFAULT_CHANNEL): cv.string,
vol.Optional(CONF_ICON): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
}
)
async def async_get_service(hass, config, discovery_info=None):
"""Set up the Slack notification service."""
session = aiohttp_client.async_get_clientsession(hass)
client = WebClient(token=config[CONF_API_KEY], run_async=True, session=session)
try:
await client.auth_test()
except SlackApiError as err:
_LOGGER.error("Error while setting up integration: %s", err)
return
return SlackNotificationService(
hass,
client,
config[CONF_DEFAULT_CHANNEL],
username=config.get(CONF_USERNAME),
icon=config.get(CONF_ICON),
)
@callback
def _async_get_filename_from_url(url):
"""Return the filename of a passed URL."""
parsed_url = urlparse(url)
return os.path.basename(parsed_url.path)
@callback
def _async_sanitize_channel_names(channel_list):
"""Remove any # symbols from a channel list."""
return [channel.lstrip("#") for channel in channel_list]
@callback
def _async_templatize_blocks(hass, value):
"""Recursive template creator helper function."""
if isinstance(value, list):
return [_async_templatize_blocks(hass, item) for item in value]
if isinstance(value, dict):
return {
key: _async_templatize_blocks(hass, item) for key, item in value.items()
}
tmpl = template.Template(value, hass=hass)
return tmpl.async_render()
class SlackNotificationService(BaseNotificationService):
"""Define the Slack notification logic."""
def __init__(self, hass, client, default_channel, username, icon):
"""Initialize."""
self._client = client
self._default_channel = default_channel
self._hass = hass
self._icon = icon
self._username = username
async def _async_send_local_file_message(self, path, targets, message, title):
"""Upload a local file (with message) to Slack."""
if not self._hass.config.is_allowed_path(path):
_LOGGER.error("Path does not exist or is not allowed: %s", path)
return
parsed_url = urlparse(path)
filename = os.path.basename(parsed_url.path)
try:
await self._client.files_upload(
channels=",".join(targets),
file=path,
filename=filename,
initial_comment=message,
title=title or filename,
)
except SlackApiError as err:
_LOGGER.error("Error while uploading file-based message: %s", err)
async def _async_send_remote_file_message(
self, url, targets, message, title, *, username=None, password=None
):
"""Upload a remote file (with message) to Slack.
Note that we bypass the python-slackclient WebClient and use aiohttp directly,
as the former would require us to download the entire remote file into memory
first before uploading it to Slack.
"""
if not self._hass.config.is_allowed_external_url(url):
_LOGGER.error("URL is not allowed: %s", url)
return
filename = _async_get_filename_from_url(url)
session = aiohttp_client.async_get_clientsession(self.hass)
kwargs = {}
if username and password is not None:
kwargs = {"auth": BasicAuth(username, password=password)}
resp = await session.request("get", url, **kwargs)
try:
resp.raise_for_status()
except ClientError as err:
_LOGGER.error("Error while retrieving %s: %s", url, err)
return
data = FormData(
{
"channels": ",".join(targets),
"filename": filename,
"initial_comment": message,
"title": title or filename,
"token": self._client.token,
},
charset="utf-8",
)
data.add_field("file", resp.content, filename=filename)
try:
await session.post("https://slack.com/api/files.upload", data=data)
except ClientError as err:
_LOGGER.error("Error while uploading file message: %s", err)
async def _async_send_text_only_message(
self, targets, message, title, blocks, username, icon
):
"""Send a text-only message."""
message_dict = {
"blocks": blocks,
"link_names": True,
"text": message,
"username": username,
}
if self._icon:
if self._icon.lower().startswith(("http://", "https://")):
icon_type = "url"
else:
icon_type = "emoji"
message_dict[f"icon_{icon_type}"] = icon
tasks = {
target: self._client.chat_postMessage(**message_dict, channel=target)
for target in targets
}
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for target, result in zip(tasks, results):
if isinstance(result, SlackApiError):
_LOGGER.error(
"There was a Slack API error while sending to %s: %s",
target,
result,
)
async def async_send_message(self, message, **kwargs):
"""Send a message to Slack."""
data = kwargs.get(ATTR_DATA)
if data is None:
data = {}
try:
DATA_SCHEMA(data)
except vol.Invalid as err:
_LOGGER.error("Invalid message data: %s", err)
data = {}
title = kwargs.get(ATTR_TITLE)
targets = _async_sanitize_channel_names(
kwargs.get(ATTR_TARGET, [self._default_channel])
)
# Message Type 1: A text-only message
if ATTR_FILE not in data:
if ATTR_BLOCKS_TEMPLATE in data:
blocks = _async_templatize_blocks(self.hass, data[ATTR_BLOCKS_TEMPLATE])
elif ATTR_BLOCKS in data:
blocks = data[ATTR_BLOCKS]
else:
blocks = {}
return await self._async_send_text_only_message(
targets,
message,
title,
blocks,
username=data.get(ATTR_USERNAME, self._username),
icon=data.get(ATTR_ICON, self._icon),
)
# Message Type 2: A message that uploads a remote file
if ATTR_URL in data[ATTR_FILE]:
return await self._async_send_remote_file_message(
data[ATTR_FILE][ATTR_URL],
targets,
message,
title,
username=data[ATTR_FILE].get(ATTR_USERNAME),
password=data[ATTR_FILE].get(ATTR_PASSWORD),
)
# Message Type 3: A message that uploads a local file
return await self._async_send_local_file_message(
data[ATTR_FILE][ATTR_PATH], targets, message, title
)
|
apache-2.0
| -6,102,226,352,536,124,000
| 30.507042
| 88
| 0.59667
| false
|
darren-wang/gl
|
glance/api/v1/upload_utils.py
|
1
|
12541
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store as store_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import webob.exc
from glance.common import exception
from glance.common import store_utils
from glance.common import utils
import glance.db
from glance import i18n
import glance.registry.client.v1.api as registry
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
def initiate_deletion(req, location_data, id):
"""
Deletes image data from the location of backend store.
:param req: The WSGI/Webob Request object
:param location_data: Location to the image data in a data store
:param id: Opaque image identifier
"""
store_utils.delete_image_location_from_backend(req.context,
id, location_data)
def _kill(req, image_id, from_state):
"""
Marks the image status to `killed`.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
# TODO(dosaboy): http://docs.openstack.org/developer/glance/statuses.html
# needs updating to reflect the fact that queued->killed and saving->killed
# are both allowed.
registry.update_image_metadata(req.context, image_id,
{'status': 'killed'},
from_state=from_state)
def safe_kill(req, image_id, from_state):
"""
Mark image killed without raising exceptions if it fails.
Since _kill is meant to be called from exceptions handlers, it should
not raise itself, rather it should just log its error.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
try:
_kill(req, image_id, from_state)
except Exception:
LOG.exception(_LE("Unable to kill image %(id)s: ") % {'id': image_id})
def upload_data_to_store(req, image_meta, image_data, store, notifier):
"""
Upload image data to specified store.
Upload image data to the store and cleans up on error.
"""
image_id = image_meta['id']
db_api = glance.db.get_api()
image_size = image_meta.get('size')
try:
# By default image_data will be passed as CooperativeReader object.
# But if 'user_storage_quota' is enabled and 'remaining' is not None
# then it will be passed as object of LimitingReader to
# 'store_add_to_backend' method.
image_data = utils.CooperativeReader(image_data)
remaining = glance.api.common.check_quota(
req.context, image_size, db_api, image_id=image_id)
if remaining is not None:
image_data = utils.LimitingReader(image_data, remaining)
(uri,
size,
checksum,
location_metadata) = store_api.store_add_to_backend(
image_meta['id'],
image_data,
image_meta['size'],
store,
context=req.context)
location_data = {'url': uri,
'metadata': location_metadata,
'status': 'active'}
try:
# recheck the quota in case there were simultaneous uploads that
# did not provide the size
glance.api.common.check_quota(
req.context, size, db_api, image_id=image_id)
except exception.StorageQuotaFull:
with excutils.save_and_reraise_exception():
LOG.info(_LI('Cleaning up %s after exceeding '
'the quota') % image_id)
store_utils.safe_delete_from_backend(
req.context, image_meta['id'], location_data)
def _kill_mismatched(image_meta, attr, actual):
supplied = image_meta.get(attr)
if supplied and supplied != actual:
msg = (_("Supplied %(attr)s (%(supplied)s) and "
"%(attr)s generated from uploaded image "
"(%(actual)s) did not match. Setting image "
"status to 'killed'.") % {'attr': attr,
'supplied': supplied,
'actual': actual})
LOG.error(msg)
safe_kill(req, image_id, 'saving')
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
# Verify any supplied size/checksum value matches size/checksum
# returned from store when adding image
_kill_mismatched(image_meta, 'size', size)
_kill_mismatched(image_meta, 'checksum', checksum)
# Update the database with the checksum returned
# from the backend store
LOG.debug("Updating image %(image_id)s data. "
"Checksum set to %(checksum)s, size set "
"to %(size)d", {'image_id': image_id,
'checksum': checksum,
'size': size})
update_data = {'checksum': checksum,
'size': size}
try:
try:
state = 'saving'
image_meta = registry.update_image_metadata(req.context,
image_id,
update_data,
from_state=state)
except exception.Duplicate:
image = registry.get_image_metadata(req.context, image_id)
if image['status'] == 'deleted':
raise exception.NotFound()
else:
raise
except exception.NotAuthenticated as e:
# Delete image data due to possible token expiration.
LOG.debug("Authentication error - the token may have "
"expired during file upload. Deleting image data for "
" %s " % image_id)
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPUnauthorized(explanation=e.msg, request=req)
except exception.NotFound:
msg = _LI("Image %s could not be found after upload. The image may"
" have been deleted during the upload.") % image_id
LOG.info(msg)
# NOTE(jculp): we need to clean up the datastore if an image
# resource is deleted while the image data is being uploaded
#
# We get "location_data" from above call to store.add(), any
# exceptions that occur there handle this same issue internally,
# Since this is store-agnostic, should apply to all stores.
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPPreconditionFailed(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StoreAddDisabled:
msg = _("Error in store configuration. Adding images to store "
"is disabled.")
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPGone(explanation=msg, request=req,
content_type='text/plain')
except exception.Duplicate as e:
msg = (_("Attempt to upload duplicate image: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
# NOTE(dosaboy): do not delete the image since it is likely that this
# conflict is a result of another concurrent upload that will be
# successful.
notifier.error('image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden upload attempt: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except store_api.StorageFull as e:
msg = (_("Image storage media is full: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req,
content_type='text/plain')
except exception.ImageSizeLimitExceeded as e:
msg = (_("Denying attempt to upload image larger than %d bytes.")
% CONF.image_size_cap)
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageQuotaFull as e:
msg = (_("Denying attempt to upload image because it exceeds the "
"quota: %s") % utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except webob.exc.HTTPError:
# NOTE(bcwaldon): Ideally, we would just call 'raise' here,
# but something in the above function calls is affecting the
# exception context and we must explicitly re-raise the
# caught exception.
msg = _LE("Received HTTP error while uploading image %s") % image_id
notifier.error('image.upload', msg)
with excutils.save_and_reraise_exception():
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
except (ValueError, IOError) as e:
msg = _("Client disconnected before sending all data to backend")
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
except Exception as e:
msg = _("Failed to upload image %s") % image_id
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPInternalServerError(explanation=msg,
request=req,
content_type='text/plain')
return image_meta, location_data
|
apache-2.0
| -4,218,632,327,757,525,500
| 41.225589
| 79
| 0.553225
| false
|
ericnewton/fluo-deploy
|
bin/impl/fluo_deploy/config.py
|
1
|
8132
|
# Copyright 2014 Fluo authors (see AUTHORS)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ConfigParser import ConfigParser
from util import get_num_ephemeral, exit, get_arch, get_ami
import os
from os.path import join
SERVICES = ['zookeeper', 'namenode', 'resourcemanager', 'accumulomaster', 'worker', 'fluo', 'metrics']
class DeployConfig(ConfigParser):
def __init__(self, deploy_path, config_path, hosts_path, cluster_name):
ConfigParser.__init__(self)
self.deploy_path = deploy_path
self.read(config_path)
self.hosts_path = hosts_path
self.cluster_name = cluster_name
self.ephemeral_root = 'ephemeral'
self.mount_root = '/media/' + self.ephemeral_root
self.device_root = '/dev/xvd'
self.metrics_drive_root = 'media-' + self.ephemeral_root
self.node_d = None
self.hosts = None
self.init_nodes()
def verify_config(self, action):
proxy = self.get('general', 'proxy_hostname')
if not proxy:
exit("ERROR - proxy.hostname must be set in fluo-deploy.props")
if proxy not in self.node_d:
exit("ERROR - The proxy (set by property proxy.hostname={0}) cannot be found in 'nodes' section of fluo-deploy.props".format(proxy))
if action != 'launch':
self.proxy_public_ip()
if action in ['launch', 'setup']:
self.get_image_id(self.get('ec2', 'default_instance_type'))
self.get_image_id(self.get('ec2', 'worker_instance_type'))
for service in SERVICES:
if service not in ['fluo', 'metrics']:
if not self.has_service(service):
exit("ERROR - Missing '{0}' service from [nodes] section of fluo-deploy.props".format(service))
def init_nodes(self):
self.node_d = {}
for (hostname, value) in self.items('nodes'):
if hostname in self.node_d:
exit('Hostname {0} already exists twice in nodes'.format(hostname))
service_list = []
for service in value.split(','):
if service in SERVICES:
service_list.append(service)
else:
exit('Unknown service "%s" declared for node %s' % (service, hostname))
self.node_d[hostname] = service_list
def default_num_ephemeral(self):
return get_num_ephemeral(self.get('ec2', 'default_instance_type'))
def worker_num_ephemeral(self):
return get_num_ephemeral(self.get('ec2', 'worker_instance_type'))
def max_ephemeral(self):
return max((self.worker_num_ephemeral(), self.default_num_ephemeral()))
def node_type_map(self):
node_types = {}
node_list = [('default', self.default_num_ephemeral()), ('worker', self.worker_num_ephemeral())]
for (ntype, num_ephemeral) in node_list:
node_types[ntype] = {'mounts': self.mounts(num_ephemeral), 'devices': self.devices(num_ephemeral)}
return node_types
def node_type(self, hostname):
if 'worker' in self.node_d[hostname]:
return 'worker'
return 'default'
def num_ephemeral(self, hostname):
if 'worker' in self.node_d[hostname]:
return self.worker_num_ephemeral()
else:
return self.default_num_ephemeral()
def mounts(self, num_ephemeral):
mounts = []
for i in range(0, num_ephemeral):
mounts.append(self.mount_root + str(i))
return tuple(mounts)
def devices(self, num_ephemeral):
devices = []
for i in range(0, num_ephemeral):
devices.append(self.device_root + chr(ord('b') + i))
return tuple(devices)
def metrics_drive_ids(self):
drive_ids = []
for i in range(0, self.max_ephemeral()):
drive_ids.append(self.metrics_drive_root + str(i))
return tuple(drive_ids)
def version(self, software_id):
return self.get('general', software_id + '_version')
def sha256(self, software_id):
return self.get('general', software_id + '_sha256')
def get_image_id(self, instance_type):
if get_arch(instance_type) == 'pvm':
exit("ERROR - Configuration contains instance type '{0}' that uses pvm architecture. Only hvm architecture is supported!".format(instance_type))
return get_ami(instance_type, self.get('ec2', 'region'))
def instance_tags(self):
retd = {}
if self.has_option('ec2', 'instance.tags'):
value = self.get('ec2', 'instance.tags')
if value:
for kv in value.split(','):
(key, val) = kv.split(':')
retd[key] = val
return retd
def nodes(self):
return self.node_d
def get_node(self, hostname):
return self.node_d[hostname]
def has_service(self, service):
for (hostname, service_list) in self.node_d.items():
if service in service_list:
return True
return False
def get_host_services(self):
retval = []
for (hostname, service_list) in self.node_d.items():
retval.append((hostname, ' '.join(service_list)))
retval.sort()
return retval
def get_service_private_ips(self, service):
retval = []
for (hostname, service_list) in self.node_d.items():
if service in service_list:
retval.append(self.get_private_ip(hostname))
retval.sort()
return retval
def get_service_hostnames(self, service):
retval = []
for (hostname, service_list) in self.node_d.items():
if service in service_list:
retval.append(hostname)
retval.sort()
return retval
def get_non_proxy(self):
retval = []
proxy_ip = self.get_private_ip(self.get('general', 'proxy_hostname'))
for (hostname, (private_ip, public_ip)) in self.hosts.items():
if private_ip != proxy_ip:
retval.append((private_ip, hostname))
retval.sort()
return retval
def get_private_ip_hostnames(self):
retval = []
for (hostname, (private_ip, public_ip)) in self.hosts.items():
retval.append((private_ip, hostname))
retval.sort()
return retval
def parse_hosts(self):
if not os.path.isfile(self.hosts_path):
exit('ERROR - A hosts file does not exist at %s' % self.hosts_path)
self.hosts = {}
with open(self.hosts_path) as f:
for line in f:
line = line.strip()
if line.startswith("#") or not line:
continue
args = line.split(' ')
if len(args) == 2:
self.hosts[args[0]] = (args[1], None)
elif len(args) == 3:
self.hosts[args[0]] = (args[1], args[2])
else:
exit('ERROR - Bad line %s in hosts %s' % (line, self.hosts_path))
def get_hosts(self):
if self.hosts is None:
self.parse_hosts()
return self.hosts
def get_private_ip(self, hostname):
return self.get_hosts()[hostname][0]
def get_public_ip(self, hostname):
return self.get_hosts()[hostname][1]
def proxy_public_ip(self):
retval = self.get_public_ip(self.get('general', 'proxy_hostname'))
if not retval:
exit("ERROR - Leader {0} does not have a public IP".format(self.get('general', 'proxy_hostname')))
return retval
def proxy_private_ip(self):
return self.get_private_ip(self.get('general', 'proxy_hostname'))
def get_performance_prop(self, prop):
profile = self.get('performance', 'profile')
return self.get(profile, prop)
def print_all(self):
print 'proxy_public_ip = ', self.proxy_public_ip()
for (name, val) in self.items('general'):
print name, '=', val
for (name, val) in self.items('ec2'):
print name, '=', val
def print_property(self, key):
if key == 'proxy.public.ip':
print self.proxy_public_ip()
return
else:
for section in self.sections():
if self.has_option(section, key):
print self.get(section, key)
return
exit("Property '{0}' was not found".format(key))
|
apache-2.0
| 4,391,550,371,819,126,000
| 31.790323
| 151
| 0.641048
| false
|
infothrill/python-viscosity-app
|
viscosity_app/vpn.py
|
1
|
4333
|
"""
This module provides procedures to interact in a programmatic way with the
application "Viscosity" from http://www.sparklabs.com/viscosity/ using the
OS X applescripting interface.
"""
import logging
import time
import applescript
from .observer import Subject
EVT_VPN_STOPPED = 100
EVT_VPN_STARTED = 101
def connect(connection_name):
thescript = """tell application "Viscosity" to connect \"%s\"""" % connection_name
logging.info("VPN: connecting to '%s'", connection_name)
return applescript.AppleScript(thescript).run()
def disconnect_all():
thescript = """tell application "Viscosity" to disconnectall\n"""
logging.debug("disconnecting all viscosity connections")
return applescript.AppleScript(thescript).run()
def disconnect(connection_name):
thescript = """tell application "Viscosity" to disconnect \"%s\"\n""" % connection_name
logging.debug("disconnecting viscosity connection '%s'", connection_name)
return applescript.AppleScript(thescript).run()
def get_active_connection_names():
thescript = """tell application "Viscosity"
set connames to name of connections where state is equal to "Connected"
return connames
end tell"""
try:
names = applescript.AppleScript(thescript).run()
except applescript.ScriptError as exc:
logging.debug("An Apple script error occured while querying active connections", exc_info=exc)
return ()
else:
return names
def get_all_connection_names():
thescript = """tell application "Viscosity"
set connames to name of connections
end tell
return connames"""
logging.debug("getting viscosity connection names")
return applescript.AppleScript(thescript).run()
class VpnConnection(object):
'''
An Applescript based controller for Viscosity.app
(http://www.sparklabs.com/viscosity/)
'''
def __init__(self, connection_name):
super(VpnConnection, self).__init__()
if connection_name not in get_all_connection_names():
raise ValueError("Connection '%s' not found in Viscosity!" % connection_name)
self.__connection_name = connection_name
@property
def name(self):
return self.__connection_name
def connect(self):
_cur_conns = get_active_connection_names()
if self.__connection_name in _cur_conns:
return True
elif len(_cur_conns) > 0:
logging.info("VPN connect(%s): already connected to non-preferred VPN(s): %r", self.__connection_name, _cur_conns)
connect(self.__connection_name)
# wait for it to connect
max_wait = 30 # seconds
current_wait = 0
while current_wait < max_wait:
_cur_conns = get_active_connection_names()
if self.__connection_name in _cur_conns:
break
time.sleep(0.5)
if self.__connection_name in _cur_conns:
logging.info("VPN: connected to '%s'", self.__connection_name)
return True
else:
logging.warn("VPN: failed to connect to '%s'", self.__connection_name)
return False
def disconnect(self):
if self.is_connected():
disconnect(self.__connection_name)
def is_connected(self):
return self.__connection_name in get_active_connection_names()
class VpnControllerSubject(Subject):
'''
A class capable of monitoring a specific Viscosity VPN connection and
notifying observers about changes in the status of the connection.
'''
def __init__(self, vpn):
super(VpnControllerSubject, self).__init__()
self.connection = vpn
def refresh(self):
self.connected = self.connection.is_connected()
@property
def connected(self):
if not hasattr(self, '_connected'):
return None
else:
return self._connected
@connected.setter
def connected(self, value):
oldvalue = self.connected
self._connected = value # pylint: disable=W0201
if oldvalue != value:
if value is True:
self.notifyObservers(EVT_VPN_STARTED, "VPN('%s') is connected" % self.connection.name)
else:
self.notifyObservers(EVT_VPN_STOPPED, "VPN('%s') is disconnected" % self.connection.name)
|
mit
| 6,369,262,081,582,387,000
| 31.825758
| 126
| 0.648973
| false
|
yackj/GameAnalysis
|
test/gameio_test.py
|
1
|
26631
|
import copy
import json
import warnings
import numpy as np
import pytest
from gameanalysis import gameio
from gameanalysis import rsgame
from gameanalysis import utils
SERIAL = gameio.gameserializer(['role'], [['strat1', 'strat2']])
SERIAL2 = gameio.gameserializer(['a', 'b'], [['bar', 'foo'], ['baz']])
GAME = rsgame.samplegame(
[2], [2],
[[2, 0],
[1, 1],
[0, 2]],
[
[[[-1, 0, 1], [0, 0, 0]],
[[9, 10, 11], [21, 20, 19]]],
[[[0, 0, 0, 0], [32, 28, 30, 30]]],
],
)
BASEGAME_JSON = {
'players': {
'role': 2,
},
'strategies': {
'role': [
'strat1',
'strat2',
],
},
}
GAME_JSON = {
'players': {
'role': 2,
},
'strategies': {
'role': [
'strat1',
'strat2',
],
},
'profiles': [
{
'role': [
('strat1', 2, 0.0),
],
},
{
'role': [
('strat1', 1, 10.0),
('strat2', 1, 20.0),
],
},
{
'role': [
('strat2', 2, 30.0),
],
},
],
}
SAMPLEGAME_JSON = {
'players': {
'role': 2,
},
'strategies': {
'role': [
'strat1',
'strat2',
],
},
'profiles': [
{
'role': [
('strat1', 2, [-1.0, 0.0, 1.0]),
],
},
{
'role': [
('strat1', 1, [9.0, 10.0, 11.0]),
('strat2', 1, [21.0, 20.0, 19.0]),
],
},
{
'role': [
('strat2', 2, [32.0, 28.0, 30.0, 30.0]),
],
},
],
}
EMPTYGAME_JSON = {
'roles': [
{
'name': 'role',
'strategies': [
'strat1',
'strat2',
],
'count': 2,
},
],
}
SUMMARYGAME_JSON = {
'roles': [
{
'name': 'role',
'strategies': [
'strat1',
'strat2',
],
'count': 2,
},
],
'profiles': [
{
'symmetry_groups': [
{
'payoff': 0,
'count': 2,
'strategy': 'strat1',
'role': 'role',
},
],
},
{
'symmetry_groups': [
{
'payoff': 10,
'count': 1,
'strategy': 'strat1',
'role': 'role',
},
{
'payoff': 20,
'count': 1,
'strategy': 'strat2',
'role': 'role',
},
],
},
{
'symmetry_groups': [
{
'payoff': 30,
'count': 2,
'strategy': 'strat2',
'role': 'role',
},
],
},
],
}
OBSERVATIONGAME_JSON = {
'roles': [
{
'name': 'role',
'strategies': [
'strat1',
'strat2',
],
'count': 2,
},
],
'profiles': [
{
'symmetry_groups': [
{
'strategy': 'strat1',
'id': 0,
'role': 'role',
'count': 2,
},
],
'observations': [
{
'symmetry_groups': [
{
'id': 0,
'payoff': -1,
},
],
},
{
'symmetry_groups': [
{
'id': 0,
'payoff': 0,
},
],
},
{
'symmetry_groups': [
{
'id': 0,
'payoff': 1,
},
],
},
],
},
{
'symmetry_groups': [
{
'strategy': 'strat1',
'id': 1,
'role': 'role',
'count': 1,
},
{
'strategy': 'strat2',
'id': 2,
'role': 'role',
'count': 1,
},
],
'observations': [
{
'symmetry_groups': [
{
'id': 1,
'payoff': 9,
},
{
'id': 2,
'payoff': 21,
},
],
},
{
'symmetry_groups': [
{
'id': 1,
'payoff': 10,
},
{
'id': 2,
'payoff': 20,
},
],
},
{
'symmetry_groups': [
{
'id': 1,
'payoff': 11,
},
{
'id': 2,
'payoff': 19,
},
],
},
],
},
{
'symmetry_groups': [
{
'strategy': 'strat2',
'id': 3,
'role': 'role',
'count': 2,
},
],
'observations': [
{
'symmetry_groups': [
{
'id': 3,
'payoff': 32,
},
],
},
{
'symmetry_groups': [
{
'id': 3,
'payoff': 28,
},
],
},
{
'symmetry_groups': [
{
'id': 3,
'payoff': 30,
},
],
},
{
'symmetry_groups': [
{
'id': 3,
'payoff': 30,
},
],
},
],
},
],
}
FULLGAME_JSON = {
'roles': [
{
'name': 'role',
'strategies': [
'strat1',
'strat2',
],
'count': 2,
},
],
'profiles': [
{
'symmetry_groups': [
{
'strategy': 'strat1',
'id': 0,
'role': 'role',
'count': 2,
},
],
'observations': [
{
'players': [
{
'sid': 0,
'p': -2,
},
{
'sid': 0,
'p': 0,
},
],
},
{
'players': [
{
'sid': 0,
'p': 0,
},
{
'sid': 0,
'p': 0,
},
],
},
{
'players': [
{
'sid': 0,
'p': 0,
},
{
'sid': 0,
'p': 2,
},
],
},
],
},
{
'symmetry_groups': [
{
'strategy': 'strat1',
'id': 1,
'role': 'role',
'count': 1,
},
{
'strategy': 'strat2',
'id': 2,
'role': 'role',
'count': 1,
},
],
'observations': [
{
'players': [
{
'sid': 1,
'p': 9,
},
{
'sid': 2,
'p': 21,
},
],
},
{
'players': [
{
'sid': 1,
'p': 10,
},
{
'sid': 2,
'p': 20,
},
],
},
{
'players': [
{
'sid': 1,
'p': 11,
},
{
'sid': 2,
'p': 19,
},
],
},
],
},
{
'symmetry_groups': [
{
'strategy': 'strat2',
'id': 3,
'role': 'role',
'count': 2,
},
],
'observations': [
{
'players': [
{
'sid': 3,
'p': 32,
},
{
'sid': 3,
'p': 32,
},
],
},
{
'players': [
{
'sid': 3,
'p': 30,
},
{
'sid': 3,
'p': 26,
},
],
},
{
'players': [
{
'sid': 3,
'p': 34,
},
{
'sid': 3,
'p': 26,
},
],
},
{
'players': [
{
'sid': 3,
'p': 28,
},
{
'sid': 3,
'p': 32,
},
],
},
],
},
],
}
@pytest.mark.parametrize('jgame', [BASEGAME_JSON, GAME_JSON, SAMPLEGAME_JSON,
EMPTYGAME_JSON, SUMMARYGAME_JSON,
OBSERVATIONGAME_JSON, FULLGAME_JSON])
def test_basegame_from_json(jgame):
gameio.read_basegame(jgame)
@pytest.mark.parametrize('jgame', [BASEGAME_JSON, GAME_JSON, SAMPLEGAME_JSON,
EMPTYGAME_JSON, SUMMARYGAME_JSON,
OBSERVATIONGAME_JSON, FULLGAME_JSON])
def test_game_from_json(jgame):
gameio.read_game(jgame)
@pytest.mark.parametrize('jgame', [BASEGAME_JSON, GAME_JSON, SAMPLEGAME_JSON,
EMPTYGAME_JSON, SUMMARYGAME_JSON,
OBSERVATIONGAME_JSON, FULLGAME_JSON])
def test_samplegame_from_json(jgame):
gameio.read_samplegame(jgame)
@pytest.mark.parametrize('jgame', [BASEGAME_JSON, GAME_JSON, SAMPLEGAME_JSON,
EMPTYGAME_JSON, SUMMARYGAME_JSON,
OBSERVATIONGAME_JSON, FULLGAME_JSON])
def test_basegame_equality(jgame):
game, serial = gameio.read_basegame(jgame)
assert game == rsgame.basegame_copy(GAME)
assert serial == SERIAL
@pytest.mark.parametrize('jgame', [GAME_JSON, SAMPLEGAME_JSON,
SUMMARYGAME_JSON, OBSERVATIONGAME_JSON,
FULLGAME_JSON])
def test_game_equality(jgame):
game, serial = gameio.read_game(jgame)
assert rsgame.game_copy(game) == rsgame.game_copy(GAME)
assert serial == SERIAL
@pytest.mark.parametrize('jgame', [SAMPLEGAME_JSON, OBSERVATIONGAME_JSON,
FULLGAME_JSON])
def test_samplegame_equality(jgame):
game, serial = gameio.read_samplegame(jgame)
assert game == GAME
assert serial == SERIAL
def test_output():
EMPTYGAME_JSON = BASEGAME_JSON.copy()
EMPTYGAME_JSON['profiles'] = []
SAMPLEDGAME_JSON = copy.deepcopy(GAME_JSON)
for prof in SAMPLEDGAME_JSON['profiles']:
for pays in prof.values():
pays[:] = [(s, c, [p]) for s, c, p in pays]
assert BASEGAME_JSON == SERIAL.to_basegame_json(GAME)
assert BASEGAME_JSON == SERIAL.to_basegame_json(rsgame.game_copy(GAME))
assert BASEGAME_JSON == SERIAL.to_basegame_json(rsgame.basegame_copy(GAME))
assert GAME_JSON == SERIAL.to_game_json(GAME)
assert GAME_JSON == SERIAL.to_game_json(rsgame.game_copy(GAME))
assert EMPTYGAME_JSON == SERIAL.to_game_json(rsgame.basegame_copy(GAME))
assert SAMPLEGAME_JSON == SERIAL.to_samplegame_json(GAME)
assert SAMPLEDGAME_JSON == SERIAL.to_samplegame_json(
rsgame.game_copy(GAME))
assert EMPTYGAME_JSON == SERIAL.to_samplegame_json(
rsgame.basegame_copy(GAME))
expected = """
BaseGame:
Roles: role
Players:
2x role
Strategies:
role:
strat1
strat2
"""[1:-1]
assert expected == SERIAL.to_basegame_printstr(GAME)
assert expected == SERIAL.to_basegame_printstr(rsgame.game_copy(GAME))
assert expected == SERIAL.to_basegame_printstr(rsgame.basegame_copy(GAME))
expected = """
BaseGame:
Roles: a, b
Players:
3x a
4x b
Strategies:
a:
bar
foo
b:
baz
"""[1:-1]
assert expected == SERIAL2.to_basegame_printstr(rsgame.basegame(
[3, 4], SERIAL2.num_strategies))
expected = """
Game:
Roles: role
Players:
2x role
Strategies:
role:
strat1
strat2
payoff data for 3 out of 3 profiles
"""[1:-1]
assert expected == SERIAL.to_game_printstr(GAME)
assert expected == SERIAL.to_game_printstr(rsgame.game_copy(GAME))
expected = """
Game:
Roles: role
Players:
2x role
Strategies:
role:
strat1
strat2
payoff data for 0 out of 3 profiles
"""[1:-1]
assert expected == SERIAL.to_game_printstr(rsgame.basegame_copy(GAME))
expected = """
SampleGame:
Roles: role
Players:
2x role
Strategies:
role:
strat1
strat2
payoff data for 3 out of 3 profiles
3 to 4 observations per profile
"""[1:-1]
assert expected == SERIAL.to_samplegame_printstr(GAME)
expected = """
SampleGame:
Roles: role
Players:
2x role
Strategies:
role:
strat1
strat2
payoff data for 3 out of 3 profiles
1 observation per profile
"""[1:-1]
assert expected == SERIAL.to_samplegame_printstr(rsgame.game_copy(GAME))
expected = """
SampleGame:
Roles: role
Players:
2x role
Strategies:
role:
strat1
strat2
payoff data for 0 out of 3 profiles
no observations
"""[1:-1]
assert expected == SERIAL.to_samplegame_printstr(
rsgame.basegame_copy(GAME))
@pytest.mark.parametrize('_', range(20))
def test_sorted_strategy_loading(_):
with open('test/hard_nash_game_1.json') as f:
_, serial = gameio.read_basegame(json.load(f))
assert utils.is_sorted(serial.role_names), \
"loaded json game didn't have sorted roles"
assert all(utils.is_sorted(strats) for strats in serial.strat_names), \
"loaded json game didn't have sorted strategies"
def test_to_from_prof_json():
prof = [6, 5, 3]
json_prof = {'a': {'foo': 5, 'bar': 6}, 'b': {'baz': 3}}
assert SERIAL2.to_prof_json(prof) == json_prof
new_prof = SERIAL2.from_prof_json(json_prof)
assert np.all(new_prof == prof)
assert new_prof.dtype == int
player_prof = {'players': [
{'role': 'a', 'strategy': 'foo', 'payoff': 0},
{'role': 'a', 'strategy': 'foo', 'payoff': 0},
{'role': 'a', 'strategy': 'foo', 'payoff': 0},
{'role': 'a', 'strategy': 'foo', 'payoff': 0},
{'role': 'a', 'strategy': 'foo', 'payoff': 0},
{'role': 'a', 'strategy': 'bar', 'payoff': 0},
{'role': 'a', 'strategy': 'bar', 'payoff': 0},
{'role': 'a', 'strategy': 'bar', 'payoff': 0},
{'role': 'a', 'strategy': 'bar', 'payoff': 0},
{'role': 'a', 'strategy': 'bar', 'payoff': 0},
{'role': 'a', 'strategy': 'bar', 'payoff': 0},
{'role': 'b', 'strategy': 'baz', 'payoff': 0},
{'role': 'b', 'strategy': 'baz', 'payoff': 0},
{'role': 'b', 'strategy': 'baz', 'payoff': 0},
]}
new_prof = SERIAL2.from_prof_json(player_prof)
assert np.all(new_prof == prof)
assert new_prof.dtype == int
def test_to_from_payoff_json_roles():
pay = [1.0, 2.0, 3.0]
json_pay = {'a': {'foo': 2.0, 'bar': 1.0}, 'b': {'baz': 3.0}}
assert SERIAL2.to_payoff_json(pay) == json_pay
new_pay = SERIAL2.from_payoff_json(json_pay)
assert np.allclose(new_pay, pay)
assert new_pay.dtype == float
player_pay = {'players': [
{'role': 'a', 'strategy': 'foo', 'payoff': 4},
{'role': 'a', 'strategy': 'foo', 'payoff': 2},
{'role': 'a', 'strategy': 'foo', 'payoff': 0},
{'role': 'a', 'strategy': 'foo', 'payoff': 4},
{'role': 'a', 'strategy': 'foo', 'payoff': 0},
{'role': 'a', 'strategy': 'bar', 'payoff': 0},
{'role': 'a', 'strategy': 'bar', 'payoff': 2},
{'role': 'a', 'strategy': 'bar', 'payoff': 2},
{'role': 'a', 'strategy': 'bar', 'payoff': 0},
{'role': 'a', 'strategy': 'bar', 'payoff': 2},
{'role': 'a', 'strategy': 'bar', 'payoff': 0},
{'role': 'b', 'strategy': 'baz', 'payoff': 0},
{'role': 'b', 'strategy': 'baz', 'payoff': 6},
{'role': 'b', 'strategy': 'baz', 'payoff': 3},
]}
new_pay = SERIAL2.from_payoff_json(player_pay)
assert np.allclose(new_pay, pay)
assert new_pay.dtype == float
def test_to_from_mix_json():
mix = [.6, .4, 1]
json_mix = {'a': {'foo': .4, 'bar': .6}, 'b': {'baz': 1}}
assert SERIAL2.to_mix_json(mix) == json_mix
new_mix = SERIAL2.from_mix_json(json_mix)
assert np.all(new_mix == mix)
assert new_mix.dtype == float
def test_to_from_subgame_json():
sub = [True, False, True]
json_sub = {'a': ['bar'], 'b': ['baz']}
assert SERIAL2.to_subgame_json(sub) == json_sub
new_sub = SERIAL2.from_subgame_json(json_sub)
assert np.all(new_sub == sub)
assert new_sub.dtype == bool
def test_to_from_prof_str():
prof = [6, 5, 3]
prof_str = 'a: 5 foo, 6 bar; b: 3 baz'
assert np.all(SERIAL2.from_prof_str(prof_str) == prof)
assert set(SERIAL2.to_prof_str(prof)) == set(prof_str)
def test_to_from_samplepay_json():
prof = [3, 0, 4]
spay = [[3, 0, 7], [4, 0, 8], [5, 0, 9]]
json_spay = {'a': {'bar': [3, 4, 5]}, 'b': {'baz': [7, 8, 9]}}
json_spay_0 = {'a': {'bar': [3, 4, 5], 'foo': [0, 0, 0]},
'b': {'baz': [7, 8, 9]}}
assert SERIAL2.to_samplepay_json(spay, prof) == json_spay
assert SERIAL2.to_samplepay_json(spay) == json_spay_0
assert np.allclose(SERIAL2.from_samplepay_json(json_spay), spay)
with pytest.raises(AssertionError):
SERIAL2.from_samplepay_json(
json_spay, np.empty((0, SERIAL2.num_role_strats)))
json_prof_spay = {'a': [('bar', 3, [3, 4, 5])],
'b': [('baz', 4, [7, 8, 9])]}
with pytest.raises(AssertionError):
SERIAL2.from_samplepay_json(
json_prof_spay, np.empty((0, SERIAL2.num_role_strats)))
def test_to_from_profsamplepay_json():
prof = [3, 0, 4]
spay = [[3, 0, 7], [4, 0, 8], [5, 0, 9]]
json_profspay = {'a': [('bar', 3, [3, 4, 5])],
'b': [('baz', 4, [7, 8, 9])]}
assert SERIAL2.to_profsamplepay_json(spay, prof) == json_profspay
p, sp = SERIAL2.from_profsamplepay_json(json_profspay)
assert np.all(p == prof)
assert np.allclose(sp, spay)
def test_to_prof_printstr():
prof = [6, 5, 3]
expected = """
a:
bar: 6
foo: 5
b:
baz: 3
"""[1:]
assert SERIAL2.to_prof_printstr(prof) == expected
def test_to_from_mix_printstr():
mix = [0.3, 0.7, 1]
expected = """
a:
bar: 30.00%
foo: 70.00%
b:
baz: 100.00%
"""[1:]
assert SERIAL2.to_mix_printstr(mix) == expected
def test_to_from_subgame_printstr():
sub = [True, False, True]
expected = """
a:
bar
b:
baz
"""[1:]
assert SERIAL2.to_subgame_printstr(sub) == expected
def test_to_from_role_json():
role = [6, 3]
json_role = {'a': 6, 'b': 3}
assert SERIAL2.to_role_json(role) == json_role
assert np.all(SERIAL2.from_role_json(json_role) == role)
assert SERIAL2.from_role_json(json_role).dtype == float
def test_deviation_payoff_json():
prof = [3, 0, 4]
devpay = [5]
json_devpay = {'a': {'bar': {'foo': 5}}, 'b': {'baz': {}}}
assert SERIAL2.to_deviation_payoff_json(devpay, prof) == json_devpay
prof = [2, 1, 4]
devpay = [5, 4]
json_devpay = {'a': {'bar': {'foo': 5},
'foo': {'bar': 4}}, 'b': {'baz': {}}}
assert SERIAL2.to_deviation_payoff_json(devpay, prof) == json_devpay
def test_to_pay_json():
jprof = SERIAL.to_payoff_json(GAME.payoffs[0], GAME.profiles[0])
assert jprof == {'role': {'strat1': 0}}
jprof = SERIAL.to_payoff_json(GAME.payoffs[0])
assert jprof == {'role': {'strat1': 0, 'strat2': 0}}
jprof = SERIAL.to_payoff_json(GAME.payoffs[1], GAME.profiles[1])
assert jprof == {'role': {'strat1': 10, 'strat2': 20}}
jprof = SERIAL.to_payoff_json(GAME.payoffs[1])
assert jprof == {'role': {'strat1': 10, 'strat2': 20}}
jprof = SERIAL.to_payoff_json(GAME.payoffs[2], GAME.profiles[2])
assert jprof == {'role': {'strat2': 30}}
jprof = SERIAL.to_payoff_json(GAME.payoffs[2])
assert jprof == {'role': {'strat1': 0, 'strat2': 30}}
jprof = SERIAL.to_profpay_json(GAME.payoffs[0], GAME.profiles[0])
assert jprof == {'role': [('strat1', 2, 0)]}
jprof = {k: set(v) for k, v in SERIAL.to_profpay_json(
GAME.payoffs[1], GAME.profiles[1]).items()}
assert jprof == {'role': set([('strat1', 1, 10), ('strat2', 1, 20)])}
jprof = SERIAL.to_profpay_json(GAME.payoffs[2], GAME.profiles[2])
assert jprof == {'role': [('strat2', 2, 30)]}
@pytest.mark.parametrize('jgame', [GAME_JSON, SAMPLEGAME_JSON,
SUMMARYGAME_JSON, OBSERVATIONGAME_JSON,
FULLGAME_JSON])
def test_to_from_payoff_json(jgame):
_, serial = gameio.read_basegame(jgame)
payoffs = np.concatenate([serial.from_payoff_json(p)[None]
for p in jgame['profiles']])
expected = [[0, 0],
[10, 20],
[0, 30]]
assert np.allclose(expected, payoffs)
def test_load_empty_observations():
serial = gameio.gameserializer(['a', 'b'], [['bar', 'foo'], ['baz']])
profile = {
'symmetry_groups': [
{
'strategy': 'bar',
'id': 0,
'role': 'a',
'count': 1,
},
{
'strategy': 'baz',
'id': 1,
'role': 'b',
'count': 1,
},
],
'observations': [],
}
payoff = serial.from_payoff_json(profile)
assert np.allclose(payoff, [np.nan, 0, np.nan], equal_nan=True)
profile = {
'a': {
'bar': []
},
'b': {
'baz': []
},
}
payoff = serial.from_payoff_json(profile)
assert np.allclose(payoff, [np.nan, 0, np.nan], equal_nan=True)
def test_sorted_strategy_warning():
with pytest.raises(UserWarning), warnings.catch_warnings():
warnings.simplefilter('error')
gameio.gameserializer(['role'], [['b', 'a']])
def test_invalid_game():
with pytest.raises(ValueError):
SERIAL.from_basegame_json({})
with pytest.raises(ValueError):
gameio.read_basegame({})
def test_repr():
assert repr(SERIAL) is not None
def test_strat_name():
serial = gameio.gameserializer(['a', 'b'], [['e', 'q', 'w'], ['r', 't']])
for i, s in enumerate(['e', 'q', 'w', 'r', 't']):
assert s == serial.strat_name(i)
def test_index():
serial = gameio.gameserializer(['a', 'b'], [['e', 'q', 'w'], ['r', 't']])
assert 0 == serial.role_index('a')
assert 1 == serial.role_index('b')
assert 0 == serial.role_strat_index('a', 'e')
assert 1 == serial.role_strat_index('a', 'q')
assert 2 == serial.role_strat_index('a', 'w')
assert 3 == serial.role_strat_index('b', 'r')
assert 4 == serial.role_strat_index('b', 't')
def test_serialization():
json.dumps(SERIAL.to_basegame_json(GAME))
json.dumps(SERIAL.to_game_json(GAME))
json.dumps(SERIAL.to_samplegame_json(GAME))
|
apache-2.0
| -856,634,028,782,670,300
| 27.121436
| 79
| 0.377117
| false
|
denisenkom/django
|
tests/serializers_regress/tests.py
|
1
|
21745
|
"""
A test spanning all the capabilities of all the serializers.
This class defines sample data and a dynamically generated
test case that is capable of testing the capabilities of
the serializers. This includes all valid data values, plus
forward, backwards and self references.
"""
from __future__ import unicode_literals
import datetime
import decimal
from unittest import expectedFailure, skipUnless
try:
import yaml
except ImportError:
yaml = None
from django.core import serializers
from django.core.serializers import SerializerDoesNotExist
from django.core.serializers.base import DeserializationError
from django.core.serializers.xml_serializer import DTDForbidden
from django.db import connection, models
from django.http import HttpResponse
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import curry
from .models import (BinaryData, BooleanData, CharData, DateData, DateTimeData, EmailData,
FileData, FilePathData, DecimalData, FloatData, IntegerData, IPAddressData,
GenericIPAddressData, NullBooleanData, PositiveIntegerData,
PositiveSmallIntegerData, SlugData, SmallData, TextData, TimeData,
GenericData, Anchor, UniqueAnchor, FKData, M2MData, O2OData,
FKSelfData, M2MSelfData, FKDataToField, FKDataToO2O, M2MIntermediateData,
Intermediate, BooleanPKData, CharPKData, EmailPKData, FilePathPKData,
DecimalPKData, FloatPKData, IntegerPKData, IPAddressPKData,
GenericIPAddressPKData, PositiveIntegerPKData,
PositiveSmallIntegerPKData, SlugPKData, SmallPKData,
AutoNowDateTimeData, ModifyingSaveData, InheritAbstractModel, BaseModel,
ExplicitInheritBaseModel, InheritBaseModel, ProxyBaseModel,
ProxyProxyBaseModel, BigIntegerData, LengthModel, Tag, ComplexModel,
NaturalKeyAnchor, FKDataNaturalKey)
# A set of functions that can be used to recreate
# test data objects of various kinds.
# The save method is a raw base model save, to make
# sure that the data in the database matches the
# exact test case.
def data_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def generic_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data[0]
models.Model.save_base(instance, raw=True)
for tag in data[1:]:
instance.tags.create(data=tag)
return [instance]
def fk_create(pk, klass, data):
instance = klass(id=pk)
setattr(instance, 'data_id', data)
models.Model.save_base(instance, raw=True)
return [instance]
def m2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
instance.data = data
return [instance]
def im2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save_base(instance, raw=True)
return [instance]
def im_create(pk, klass, data):
instance = klass(id=pk)
instance.right_id = data['right']
instance.left_id = data['left']
if 'extra' in data:
instance.extra = data['extra']
models.Model.save_base(instance, raw=True)
return [instance]
def o2o_create(pk, klass, data):
instance = klass()
instance.data_id = data
models.Model.save_base(instance, raw=True)
return [instance]
def pk_create(pk, klass, data):
instance = klass()
instance.data = data
models.Model.save_base(instance, raw=True)
return [instance]
def inherited_create(pk, klass, data):
instance = klass(id=pk,**data)
# This isn't a raw save because:
# 1) we're testing inheritance, not field behavior, so none
# of the field values need to be protected.
# 2) saving the child class and having the parent created
# automatically is easier than manually creating both.
models.Model.save(instance)
created = [instance]
for klass,field in instance._meta.parents.items():
created.append(klass.objects.get(id=pk))
return created
# A set of functions that can be used to compare
# test data objects of various kinds
def data_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
if klass == BinaryData and data is not None:
testcase.assertEqual(bytes(data), bytes(instance.data),
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, repr(bytes(data)), type(data), repr(bytes(instance.data)),
type(instance.data))
)
else:
testcase.assertEqual(data, instance.data,
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
pk, data, type(data), instance, type(instance.data))
)
def generic_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data[0], instance.data)
testcase.assertEqual(data[1:], [t.data for t in instance.tags.order_by('id')])
def fk_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, instance.data_id)
def m2m_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data, [obj.id for obj in instance.data.order_by('id')])
def im2m_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
#actually nothing else to check, the instance just should exist
def im_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
testcase.assertEqual(data['left'], instance.left_id)
testcase.assertEqual(data['right'], instance.right_id)
if 'extra' in data:
testcase.assertEqual(data['extra'], instance.extra)
else:
testcase.assertEqual("doesn't matter", instance.extra)
def o2o_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data_id)
def pk_compare(testcase, pk, klass, data):
instance = klass.objects.get(data=data)
testcase.assertEqual(data, instance.data)
def inherited_compare(testcase, pk, klass, data):
instance = klass.objects.get(id=pk)
for key,value in data.items():
testcase.assertEqual(value, getattr(instance,key))
# Define some data types. Each data type is
# actually a pair of functions; one to create
# and one to compare objects of that type
data_obj = (data_create, data_compare)
generic_obj = (generic_create, generic_compare)
fk_obj = (fk_create, fk_compare)
m2m_obj = (m2m_create, m2m_compare)
im2m_obj = (im2m_create, im2m_compare)
im_obj = (im_create, im_compare)
o2o_obj = (o2o_create, o2o_compare)
pk_obj = (pk_create, pk_compare)
inherited_obj = (inherited_create, inherited_compare)
test_data = [
# Format: (data type, PK value, Model Class, data)
(data_obj, 1, BinaryData, six.memoryview(b"\x05\xFD\x00")),
(data_obj, 2, BinaryData, None),
(data_obj, 5, BooleanData, True),
(data_obj, 6, BooleanData, False),
(data_obj, 10, CharData, "Test Char Data"),
(data_obj, 11, CharData, ""),
(data_obj, 12, CharData, "None"),
(data_obj, 13, CharData, "null"),
(data_obj, 14, CharData, "NULL"),
(data_obj, 15, CharData, None),
# (We use something that will fit into a latin1 database encoding here,
# because that is still the default used on many system setups.)
(data_obj, 16, CharData, '\xa5'),
(data_obj, 20, DateData, datetime.date(2006,6,16)),
(data_obj, 21, DateData, None),
(data_obj, 30, DateTimeData, datetime.datetime(2006,6,16,10,42,37)),
(data_obj, 31, DateTimeData, None),
(data_obj, 40, EmailData, "hovercraft@example.com"),
(data_obj, 41, EmailData, None),
(data_obj, 42, EmailData, ""),
(data_obj, 50, FileData, 'file:///foo/bar/whiz.txt'),
# (data_obj, 51, FileData, None),
(data_obj, 52, FileData, ""),
(data_obj, 60, FilePathData, "/foo/bar/whiz.txt"),
(data_obj, 61, FilePathData, None),
(data_obj, 62, FilePathData, ""),
(data_obj, 70, DecimalData, decimal.Decimal('12.345')),
(data_obj, 71, DecimalData, decimal.Decimal('-12.345')),
(data_obj, 72, DecimalData, decimal.Decimal('0.0')),
(data_obj, 73, DecimalData, None),
(data_obj, 74, FloatData, 12.345),
(data_obj, 75, FloatData, -12.345),
(data_obj, 76, FloatData, 0.0),
(data_obj, 77, FloatData, None),
(data_obj, 80, IntegerData, 123456789),
(data_obj, 81, IntegerData, -123456789),
(data_obj, 82, IntegerData, 0),
(data_obj, 83, IntegerData, None),
#(XX, ImageData
(data_obj, 90, IPAddressData, "127.0.0.1"),
(data_obj, 91, IPAddressData, None),
(data_obj, 95, GenericIPAddressData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
(data_obj, 96, GenericIPAddressData, None),
(data_obj, 100, NullBooleanData, True),
(data_obj, 101, NullBooleanData, False),
(data_obj, 102, NullBooleanData, None),
(data_obj, 120, PositiveIntegerData, 123456789),
(data_obj, 121, PositiveIntegerData, None),
(data_obj, 130, PositiveSmallIntegerData, 12),
(data_obj, 131, PositiveSmallIntegerData, None),
(data_obj, 140, SlugData, "this-is-a-slug"),
(data_obj, 141, SlugData, None),
(data_obj, 142, SlugData, ""),
(data_obj, 150, SmallData, 12),
(data_obj, 151, SmallData, -12),
(data_obj, 152, SmallData, 0),
(data_obj, 153, SmallData, None),
(data_obj, 160, TextData, """This is a long piece of text.
It contains line breaks.
Several of them.
The end."""),
(data_obj, 161, TextData, ""),
(data_obj, 162, TextData, None),
(data_obj, 170, TimeData, datetime.time(10,42,37)),
(data_obj, 171, TimeData, None),
(generic_obj, 200, GenericData, ['Generic Object 1', 'tag1', 'tag2']),
(generic_obj, 201, GenericData, ['Generic Object 2', 'tag2', 'tag3']),
(data_obj, 300, Anchor, "Anchor 1"),
(data_obj, 301, Anchor, "Anchor 2"),
(data_obj, 302, UniqueAnchor, "UAnchor 1"),
(fk_obj, 400, FKData, 300), # Post reference
(fk_obj, 401, FKData, 500), # Pre reference
(fk_obj, 402, FKData, None), # Empty reference
(m2m_obj, 410, M2MData, []), # Empty set
(m2m_obj, 411, M2MData, [300,301]), # Post reference
(m2m_obj, 412, M2MData, [500,501]), # Pre reference
(m2m_obj, 413, M2MData, [300,301,500,501]), # Pre and Post reference
(o2o_obj, None, O2OData, 300), # Post reference
(o2o_obj, None, O2OData, 500), # Pre reference
(fk_obj, 430, FKSelfData, 431), # Pre reference
(fk_obj, 431, FKSelfData, 430), # Post reference
(fk_obj, 432, FKSelfData, None), # Empty reference
(m2m_obj, 440, M2MSelfData, []),
(m2m_obj, 441, M2MSelfData, []),
(m2m_obj, 442, M2MSelfData, [440, 441]),
(m2m_obj, 443, M2MSelfData, [445, 446]),
(m2m_obj, 444, M2MSelfData, [440, 441, 445, 446]),
(m2m_obj, 445, M2MSelfData, []),
(m2m_obj, 446, M2MSelfData, []),
(fk_obj, 450, FKDataToField, "UAnchor 1"),
(fk_obj, 451, FKDataToField, "UAnchor 2"),
(fk_obj, 452, FKDataToField, None),
(fk_obj, 460, FKDataToO2O, 300),
(im2m_obj, 470, M2MIntermediateData, None),
#testing post- and prereferences and extra fields
(im_obj, 480, Intermediate, {'right': 300, 'left': 470}),
(im_obj, 481, Intermediate, {'right': 300, 'left': 490}),
(im_obj, 482, Intermediate, {'right': 500, 'left': 470}),
(im_obj, 483, Intermediate, {'right': 500, 'left': 490}),
(im_obj, 484, Intermediate, {'right': 300, 'left': 470, 'extra': "extra"}),
(im_obj, 485, Intermediate, {'right': 300, 'left': 490, 'extra': "extra"}),
(im_obj, 486, Intermediate, {'right': 500, 'left': 470, 'extra': "extra"}),
(im_obj, 487, Intermediate, {'right': 500, 'left': 490, 'extra': "extra"}),
(im2m_obj, 490, M2MIntermediateData, []),
(data_obj, 500, Anchor, "Anchor 3"),
(data_obj, 501, Anchor, "Anchor 4"),
(data_obj, 502, UniqueAnchor, "UAnchor 2"),
(pk_obj, 601, BooleanPKData, True),
(pk_obj, 602, BooleanPKData, False),
(pk_obj, 610, CharPKData, "Test Char PKData"),
# (pk_obj, 620, DatePKData, datetime.date(2006,6,16)),
# (pk_obj, 630, DateTimePKData, datetime.datetime(2006,6,16,10,42,37)),
(pk_obj, 640, EmailPKData, "hovercraft@example.com"),
# (pk_obj, 650, FilePKData, 'file:///foo/bar/whiz.txt'),
(pk_obj, 660, FilePathPKData, "/foo/bar/whiz.txt"),
(pk_obj, 670, DecimalPKData, decimal.Decimal('12.345')),
(pk_obj, 671, DecimalPKData, decimal.Decimal('-12.345')),
(pk_obj, 672, DecimalPKData, decimal.Decimal('0.0')),
(pk_obj, 673, FloatPKData, 12.345),
(pk_obj, 674, FloatPKData, -12.345),
(pk_obj, 675, FloatPKData, 0.0),
(pk_obj, 680, IntegerPKData, 123456789),
(pk_obj, 681, IntegerPKData, -123456789),
(pk_obj, 682, IntegerPKData, 0),
# (XX, ImagePKData
(pk_obj, 690, IPAddressPKData, "127.0.0.1"),
(pk_obj, 695, GenericIPAddressPKData, "fe80:1424:2223:6cff:fe8a:2e8a:2151:abcd"),
# (pk_obj, 700, NullBooleanPKData, True),
# (pk_obj, 701, NullBooleanPKData, False),
(pk_obj, 720, PositiveIntegerPKData, 123456789),
(pk_obj, 730, PositiveSmallIntegerPKData, 12),
(pk_obj, 740, SlugPKData, "this-is-a-slug"),
(pk_obj, 750, SmallPKData, 12),
(pk_obj, 751, SmallPKData, -12),
(pk_obj, 752, SmallPKData, 0),
# (pk_obj, 760, TextPKData, """This is a long piece of text.
# It contains line breaks.
# Several of them.
# The end."""),
# (pk_obj, 770, TimePKData, datetime.time(10,42,37)),
# (pk_obj, 790, XMLPKData, "<foo></foo>"),
(data_obj, 800, AutoNowDateTimeData, datetime.datetime(2006,6,16,10,42,37)),
(data_obj, 810, ModifyingSaveData, 42),
(inherited_obj, 900, InheritAbstractModel, {'child_data':37,'parent_data':42}),
(inherited_obj, 910, ExplicitInheritBaseModel, {'child_data':37,'parent_data':42}),
(inherited_obj, 920, InheritBaseModel, {'child_data':37,'parent_data':42}),
(data_obj, 1000, BigIntegerData, 9223372036854775807),
(data_obj, 1001, BigIntegerData, -9223372036854775808),
(data_obj, 1002, BigIntegerData, 0),
(data_obj, 1003, BigIntegerData, None),
(data_obj, 1004, LengthModel, 0),
(data_obj, 1005, LengthModel, 1),
]
natural_key_test_data = [
(data_obj, 1100, NaturalKeyAnchor, "Natural Key Anghor"),
(fk_obj, 1101, FKDataNaturalKey, 1100),
(fk_obj, 1102, FKDataNaturalKey, None),
]
# Because Oracle treats the empty string as NULL, Oracle is expected to fail
# when field.empty_strings_allowed is True and the value is None; skip these
# tests.
if connection.features.interprets_empty_strings_as_nulls:
test_data = [data for data in test_data
if not (data[0] == data_obj and
data[2]._meta.get_field('data').empty_strings_allowed and
data[3] is None)]
# Regression test for #8651 -- a FK to an object iwth PK of 0
# This won't work on MySQL since it won't let you create an object
# with a primary key of 0,
if connection.features.allows_primary_key_0:
test_data.extend([
(data_obj, 0, Anchor, "Anchor 0"),
(fk_obj, 465, FKData, 0),
])
# Dynamically create serializer tests to ensure that all
# registered serializers are automatically tested.
class SerializerTests(TestCase):
def test_get_unknown_serializer(self):
"""
#15889: get_serializer('nonsense') raises a SerializerDoesNotExist
"""
with self.assertRaises(SerializerDoesNotExist):
serializers.get_serializer("nonsense")
with self.assertRaises(KeyError):
serializers.get_serializer("nonsense")
# SerializerDoesNotExist is instantiated with the nonexistent format
with self.assertRaises(SerializerDoesNotExist) as cm:
serializers.get_serializer("nonsense")
self.assertEqual(cm.exception.args, ("nonsense",))
def test_unregister_unkown_serializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.unregister_serializer("nonsense")
def test_get_unkown_deserializer(self):
with self.assertRaises(SerializerDoesNotExist):
serializers.get_deserializer("nonsense")
def test_json_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("json", """[{"pk":1}"""):
pass
@skipUnless(yaml, "PyYAML not installed")
def test_yaml_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("yaml", "{"):
pass
def test_serialize_proxy_model(self):
BaseModel.objects.create(parent_data=1)
base_objects = BaseModel.objects.all()
proxy_objects = ProxyBaseModel.objects.all()
proxy_proxy_objects = ProxyProxyBaseModel.objects.all()
base_data = serializers.serialize("json", base_objects)
proxy_data = serializers.serialize("json", proxy_objects)
proxy_proxy_data = serializers.serialize("json", proxy_proxy_objects)
self.assertEqual(base_data, proxy_data.replace('proxy', ''))
self.assertEqual(base_data, proxy_proxy_data.replace('proxy', ''))
def serializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Add the generic tagged objects to the object list
objects.extend(Tag.objects.all())
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
if connection.vendor == 'mysql' and six.PY3:
# Existing MySQL DB-API drivers fail on binary data.
serializerTest = expectedFailure(serializerTest)
def naturalKeySerializerTest(format, self):
# Create all the objects defined in the test data
objects = []
instance_count = {}
for (func, pk, klass, datum) in natural_key_test_data:
with connection.constraint_checks_disabled():
objects.extend(func[0](pk, klass, datum))
# Get a count of the number of objects created for each class
for klass in instance_count:
instance_count[klass] = klass.objects.count()
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2,
use_natural_keys=True)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for (func, pk, klass, datum) in natural_key_test_data:
func[1](self, pk, klass, datum)
# Assert that the number of objects deserialized is the
# same as the number that was serialized.
for klass, count in instance_count.items():
self.assertEqual(count, klass.objects.count())
def fieldsTest(format, self):
obj = ComplexModel(field1='first', field2='second', field3='third')
obj.save_base(raw=True)
# Serialize then deserialize the test database
serialized_data = serializers.serialize(format, [obj], indent=2, fields=('field1','field3'))
result = next(serializers.deserialize(format, serialized_data))
# Check that the deserialized object contains data in only the serialized fields.
self.assertEqual(result.object.field1, 'first')
self.assertEqual(result.object.field2, '')
self.assertEqual(result.object.field3, 'third')
def streamTest(format, self):
obj = ComplexModel(field1='first',field2='second',field3='third')
obj.save_base(raw=True)
# Serialize the test database to a stream
for stream in (six.StringIO(), HttpResponse()):
serializers.serialize(format, [obj], indent=2, stream=stream)
# Serialize normally for a comparison
string_data = serializers.serialize(format, [obj], indent=2)
# Check that the two are the same
if isinstance(stream, six.StringIO):
self.assertEqual(string_data, stream.getvalue())
else:
self.assertEqual(string_data, stream.content.decode('utf-8'))
for format in serializers.get_serializer_formats():
setattr(SerializerTests, 'test_' + format + '_serializer', curry(serializerTest, format))
setattr(SerializerTests, 'test_' + format + '_natural_key_serializer', curry(naturalKeySerializerTest, format))
setattr(SerializerTests, 'test_' + format + '_serializer_fields', curry(fieldsTest, format))
if format != 'python':
setattr(SerializerTests, 'test_' + format + '_serializer_stream', curry(streamTest, format))
class XmlDeserializerSecurityTests(TestCase):
def test_no_dtd(self):
"""
The XML deserializer shouldn't allow a DTD.
This is the most straightforward way to prevent all entity definitions
and avoid both external entities and entity-expansion attacks.
"""
xml = '<?xml version="1.0" standalone="no"?><!DOCTYPE example SYSTEM "http://example.com/example.dtd">'
with self.assertRaises(DTDForbidden):
next(serializers.deserialize('xml', xml))
|
bsd-3-clause
| -7,877,821,534,166,548,000
| 38.826007
| 115
| 0.668935
| false
|
Kleptobismol/scikit-bio
|
skbio/stats/distance/_base.py
|
1
|
41154
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO, string_types
import csv
import warnings
from copy import deepcopy
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from IPython.core.pylabtools import print_figure
from IPython.core.display import Image, SVG
import numpy as np
import pandas as pd
from scipy.spatial.distance import squareform
from skbio._base import SkbioObject
from skbio.stats import p_value_to_str
from skbio.stats._misc import _pprint_strs
class DissimilarityMatrixError(Exception):
"""General error for dissimilarity matrix validation failures."""
pass
class DistanceMatrixError(DissimilarityMatrixError):
"""General error for distance matrix validation failures."""
pass
class MissingIDError(DissimilarityMatrixError):
"""Error for ID lookup that doesn't exist in the dissimilarity matrix."""
def __init__(self, missing_id):
super(MissingIDError, self).__init__()
self.args = ("The ID '%s' is not in the dissimilarity matrix." %
missing_id,)
class DissimilarityMatrix(SkbioObject):
"""Store dissimilarities between objects.
A `DissimilarityMatrix` instance stores a square, hollow, two-dimensional
matrix of dissimilarities between objects. Objects could be, for example,
samples or DNA sequences. A sequence of IDs accompanies the
dissimilarities.
Methods are provided to load and save dissimilarity matrices from/to disk,
as well as perform common operations such as extracting dissimilarities
based on object ID.
Parameters
----------
data : array_like or DissimilarityMatrix
Square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
(floats), or a structure that can be converted to a ``numpy.ndarray``
using ``numpy.asarray``. Can instead be a `DissimilarityMatrix` (or
subclass) instance, in which case the instance's data will be used.
Data will be converted to a float ``dtype`` if necessary. A copy will
*not* be made if already a ``numpy.ndarray`` with a float ``dtype``.
ids : sequence of str, optional
Sequence of strings to be used as object IDs. Must match the number of
rows/cols in `data`. If ``None`` (the default), IDs will be
monotonically-increasing integers cast as strings, with numbering
starting from zero, e.g., ``('0', '1', '2', '3', ...)``.
Attributes
----------
data
ids
dtype
shape
size
T
png
svg
See Also
--------
DistanceMatrix
Notes
-----
The dissimilarities are stored in redundant (square-form) format [1]_.
The data are not checked for symmetry, nor guaranteed/assumed to be
symmetric.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
default_write_format = 'lsmat'
# Used in __str__
_matrix_element_name = 'dissimilarity'
@classmethod
def from_file(cls, lsmat_f, delimiter='\t'):
"""Load dissimilarity matrix from delimited text file.
.. note:: Deprecated in scikit-bio 0.2.0-dev
``from_file`` will be removed in scikit-bio 0.3.0. It is replaced by
``read``, which is a more general method for deserializing
dissimilarity/distance matrices. ``read`` supports multiple file
formats, automatic file format detection, etc. by taking advantage
of scikit-bio's I/O registry system. See :mod:`skbio.io` for more
details.
Creates a ``DissimilarityMatrix`` (or subclass) instance from a
``lsmat`` formatted file. See :mod:`skbio.io.lsmat` for the format
specification.
Parameters
----------
lsmat_f: filepath or filehandle
File to read from.
delimiter : str, optional
String delimiting elements in `lsmat_f`.
Returns
-------
DissimilarityMatrix
Instance of type `cls` containing the parsed contents of `lsmat_f`.
See Also
--------
read
"""
warnings.warn(
"DissimilarityMatrix.from_file and DistanceMatrix.from_file are "
"deprecated and will be removed in scikit-bio 0.3.0. Please "
"update your code to use DissimilarityMatrix.read and "
"DistanceMatrix.read.", DeprecationWarning)
return cls.read(lsmat_f, format='lsmat', delimiter=delimiter)
def to_file(self, out_f, delimiter='\t'):
"""Save dissimilarity matrix to file as delimited text.
.. note:: Deprecated in scikit-bio 0.2.0-dev
``to_file`` will be removed in scikit-bio 0.3.0. It is replaced by
``write``, which is a more general method for serializing
dissimilarity/distance matrices. ``write`` supports multiple file
formats by taking advantage of scikit-bio's I/O registry system.
See :mod:`skbio.io` for more details.
Serializes dissimilarity matrix as a ``lsmat`` formatted file. See
:mod:`skbio.io.lsmat` for the format specification.
Parameters
----------
out_f : filepath or filehandle
File to write to.
delimiter : str, optional
Delimiter used to separate elements in output format.
See Also
--------
write
"""
warnings.warn(
"DissimilarityMatrix.to_file and DistanceMatrix.to_file are "
"deprecated and will be removed in scikit-bio 0.3.0. Please "
"update your code to use DissimilarityMatrix.write and "
"DistanceMatrix.write.", DeprecationWarning)
self.write(out_f, format='lsmat', delimiter=delimiter)
def __init__(self, data, ids=None):
if isinstance(data, DissimilarityMatrix):
data = data.data
data = np.asarray(data, dtype='float')
if ids is None:
ids = (str(i) for i in range(data.shape[0]))
ids = tuple(ids)
self._validate(data, ids)
self._data = data
self._ids = ids
self._id_index = self._index_list(self._ids)
@property
def data(self):
"""Array of dissimilarities.
A square, hollow, two-dimensional ``numpy.ndarray`` of dissimilarities
(floats). A copy is *not* returned.
Notes
-----
This property is not writeable.
"""
return self._data
@property
def ids(self):
"""Tuple of object IDs.
A tuple of strings, one for each object in the dissimilarity matrix.
Notes
-----
This property is writeable, but the number of new IDs must match the
number of objects in `data`.
"""
return self._ids
@ids.setter
def ids(self, ids_):
ids_ = tuple(ids_)
self._validate(self.data, ids_)
self._ids = ids_
self._id_index = self._index_list(self._ids)
@property
def dtype(self):
"""Data type of the dissimilarities."""
return self.data.dtype
@property
def shape(self):
"""Two-element tuple containing the dissimilarity matrix dimensions.
Notes
-----
As the dissimilarity matrix is guaranteed to be square, both tuple
entries will always be equal.
"""
return self.data.shape
@property
def size(self):
"""Total number of elements in the dissimilarity matrix.
Notes
-----
Equivalent to ``self.shape[0] * self.shape[1]``.
"""
return self.data.size
@property
def T(self):
"""Transpose of the dissimilarity matrix.
See Also
--------
transpose
"""
return self.transpose()
def transpose(self):
"""Return the transpose of the dissimilarity matrix.
Notes
-----
A deep copy is returned.
Returns
-------
DissimilarityMatrix
Transpose of the dissimilarity matrix. Will be the same type as
`self`.
"""
return self.__class__(self.data.T.copy(), deepcopy(self.ids))
def index(self, lookup_id):
"""Return the index of the specified ID.
Parameters
----------
lookup_id : str
ID whose index will be returned.
Returns
-------
int
Row/column index of `lookup_id`.
Raises
------
MissingIDError
If `lookup_id` is not in the dissimilarity matrix.
"""
if lookup_id in self:
return self._id_index[lookup_id]
else:
raise MissingIDError(lookup_id)
def redundant_form(self):
"""Return an array of dissimilarities in redundant format.
As this is the native format that the dissimilarities are stored in,
this is simply an alias for `data`.
Returns
-------
ndarray
Two-dimensional ``numpy.ndarray`` of dissimilarities in redundant
format.
Notes
-----
Redundant format is described in [1]_.
Does *not* return a copy of the data.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
return self.data
def copy(self):
"""Return a deep copy of the dissimilarity matrix.
Returns
-------
DissimilarityMatrix
Deep copy of the dissimilarity matrix. Will be the same type as
`self`.
"""
# We deepcopy IDs in case the tuple contains mutable objects at some
# point in the future.
return self.__class__(self.data.copy(), deepcopy(self.ids))
def filter(self, ids, strict=True):
"""Filter the dissimilarity matrix by IDs.
Parameters
----------
ids : iterable of str
IDs to retain. May not contain duplicates or be empty. Each ID must
be present in the dissimilarity matrix.
strict : bool, optional
If `strict` is ``True`` and an ID that is not found in the distance
matrix is found in `ids`, a ``MissingIDError`` exception will be
raised, otherwise the ID will be ignored.
Returns
-------
DissimilarityMatrix
Filtered dissimilarity matrix containing only the IDs specified in
`ids`. IDs will be in the same order as they appear in `ids`.
Raises
------
MissingIDError
If an ID in `ids` is not in the object's list of IDs.
"""
if strict:
idxs = [self.index(id_) for id_ in ids]
else:
# get the indices to slice the inner numpy array
idxs = []
# save the IDs that were found in the distance matrix
found_ids = []
for id_ in ids:
try:
idxs.append(self.index(id_))
found_ids.append(id_)
except MissingIDError:
pass
ids = found_ids
filtered_data = self._data[idxs][:, idxs]
return self.__class__(filtered_data, ids)
def plot(self, cmap=None, title=""):
"""Creates a heatmap of the dissimilarity matrix
Parameters
----------
cmap: str or matplotlib.colors.Colormap, optional
Sets the color scheme of the heatmap
If ``None``, defaults to the colormap specified in the matplotlib
rc file.
title: str, optional
Sets the title label of the heatmap
(Default is blank)
Returns
-------
matplotlib.figure.Figure
Figure containing the heatmap and colorbar of the plotted
dissimilarity matrix.
Examples
--------
.. plot::
Define a dissimilarity matrix with five objects labeled A-E:
>>> from skbio.stats.distance import DissimilarityMatrix
>>> dm = DissimilarityMatrix([[0, 1, 2, 3, 4], [1, 0, 1, 2, 3],
... [2, 1, 0, 1, 2], [3, 2, 1, 0, 1],
... [4, 3, 2, 1, 0]],
... ['A', 'B', 'C', 'D', 'E'])
Plot the dissimilarity matrix as a heatmap:
>>> fig = dm.plot(cmap='Reds', title='Example heatmap')
"""
# based on http://stackoverflow.com/q/14391959/3776794
fig, ax = plt.subplots()
# use pcolormesh instead of pcolor for performance
heatmap = ax.pcolormesh(self.data, cmap=cmap)
fig.colorbar(heatmap)
# center labels within each cell
ticks = np.arange(0.5, self.shape[0])
ax.set_xticks(ticks, minor=False)
ax.set_yticks(ticks, minor=False)
# display data as it is stored in the dissimilarity matrix
# (default is to have y-axis inverted)
ax.invert_yaxis()
ax.set_xticklabels(self.ids, rotation=90, minor=False)
ax.set_yticklabels(self.ids, minor=False)
ax.set_title(title)
return fig
def _repr_png_(self):
return self._figure_data('png')
def _repr_svg_(self):
return self._figure_data('svg')
@property
def png(self):
"""Display heatmap in IPython Notebook as PNG.
"""
return Image(self._repr_png_(), embed=True)
@property
def svg(self):
"""Display heatmap in IPython Notebook as SVG.
"""
return SVG(self._repr_svg_())
def _figure_data(self, format):
fig = self.plot()
data = print_figure(fig, format)
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
def __str__(self):
"""Return a string representation of the dissimilarity matrix.
Summary includes matrix dimensions, a (truncated) list of IDs, and
(truncated) array of dissimilarities.
Returns
-------
str
String representation of the dissimilarity matrix.
.. shownumpydoc
"""
return '%dx%d %s matrix\nIDs:\n%s\nData:\n' % (
self.shape[0], self.shape[1], self._matrix_element_name,
_pprint_strs(self.ids)) + str(self.data)
def __eq__(self, other):
"""Compare this dissimilarity matrix to another for equality.
Two dissimilarity matrices are equal if they have the same shape, IDs
(in the same order!), and have data arrays that are equal.
Checks are *not* performed to ensure that `other` is a
`DissimilarityMatrix` instance.
Parameters
----------
other : DissimilarityMatrix
Dissimilarity matrix to compare to for equality.
Returns
-------
bool
``True`` if `self` is equal to `other`, ``False`` otherwise.
.. shownumpydoc
"""
equal = True
# The order these checks are performed in is important to be as
# efficient as possible. The check for shape equality is not strictly
# necessary as it should be taken care of in np.array_equal, but I'd
# rather explicitly bail before comparing IDs or data. Use array_equal
# instead of (a == b).all() because of this issue:
# http://stackoverflow.com/a/10582030
try:
if self.shape != other.shape:
equal = False
elif self.ids != other.ids:
equal = False
elif not np.array_equal(self.data, other.data):
equal = False
except AttributeError:
equal = False
return equal
def __ne__(self, other):
"""Determine whether two dissimilarity matrices are not equal.
Parameters
----------
other : DissimilarityMatrix
Dissimilarity matrix to compare to.
Returns
-------
bool
``True`` if `self` is not equal to `other`, ``False`` otherwise.
See Also
--------
__eq__
.. shownumpydoc
"""
return not self == other
def __contains__(self, lookup_id):
"""Check if the specified ID is in the dissimilarity matrix.
Parameters
----------
lookup_id : str
ID to search for.
Returns
-------
bool
``True`` if `lookup_id` is in the dissimilarity matrix, ``False``
otherwise.
See Also
--------
index
.. shownumpydoc
"""
return lookup_id in self._id_index
def __getitem__(self, index):
"""Slice into dissimilarity data by object ID or numpy indexing.
Extracts data from the dissimilarity matrix by object ID, a pair of
IDs, or numpy indexing/slicing.
Parameters
----------
index : str, two-tuple of str, or numpy index
`index` can be one of the following forms: an ID, a pair of IDs, or
a numpy index.
If `index` is a string, it is assumed to be an ID and a
``numpy.ndarray`` row vector is returned for the corresponding ID.
Note that the ID's row of dissimilarities is returned, *not* its
column. If the matrix is symmetric, the two will be identical, but
this makes a difference if the matrix is asymmetric.
If `index` is a two-tuple of strings, each string is assumed to be
an ID and the corresponding matrix element is returned that
represents the dissimilarity between the two IDs. Note that the
order of lookup by ID pair matters if the matrix is asymmetric: the
first ID will be used to look up the row, and the second ID will be
used to look up the column. Thus, ``dm['a', 'b']`` may not be the
same as ``dm['b', 'a']`` if the matrix is asymmetric.
Otherwise, `index` will be passed through to
``DissimilarityMatrix.data.__getitem__``, allowing for standard
indexing of a ``numpy.ndarray`` (e.g., slicing).
Returns
-------
ndarray or scalar
Indexed data, where return type depends on the form of `index` (see
description of `index` for more details).
Raises
------
MissingIDError
If the ID(s) specified in `index` are not in the dissimilarity
matrix.
Notes
-----
The lookup based on ID(s) is quick.
.. shownumpydoc
"""
if isinstance(index, string_types):
return self.data[self.index(index)]
elif self._is_id_pair(index):
return self.data[self.index(index[0]), self.index(index[1])]
else:
return self.data.__getitem__(index)
def _validate(self, data, ids):
"""Validate the data array and IDs.
Checks that the data is at least 1x1 in size, 2D, square, hollow, and
contains only floats. Also checks that IDs are unique and that the
number of IDs matches the number of rows/cols in the data array.
Subclasses can override this method to perform different/more specific
validation (e.g., see `DistanceMatrix`).
Notes
-----
Accepts arguments instead of inspecting instance attributes to avoid
creating an invalid dissimilarity matrix before raising an error.
Otherwise, the invalid dissimilarity matrix could be used after the
exception is caught and handled.
"""
num_ids = len(ids)
if 0 in data.shape:
raise DissimilarityMatrixError("Data must be at least 1x1 in "
"size.")
elif len(data.shape) != 2:
raise DissimilarityMatrixError("Data must have exactly two "
"dimensions.")
elif data.shape[0] != data.shape[1]:
raise DissimilarityMatrixError("Data must be square (i.e., have "
"the same number of rows and "
"columns).")
elif data.dtype != np.double:
raise DissimilarityMatrixError("Data must contain only floating "
"point values.")
elif np.trace(data) != 0:
raise DissimilarityMatrixError("Data must be hollow (i.e., the "
"diagonal can only contain zeros).")
elif num_ids != len(set(ids)):
raise DissimilarityMatrixError("IDs must be unique.")
elif num_ids != data.shape[0]:
raise DissimilarityMatrixError("The number of IDs must match the "
"number of rows/columns in the "
"data.")
def _index_list(self, list_):
return {id_: idx for idx, id_ in enumerate(list_)}
def _is_id_pair(self, index):
return (isinstance(index, tuple) and
len(index) == 2 and
all(map(lambda e: isinstance(e, string_types), index)))
class DistanceMatrix(DissimilarityMatrix):
"""Store distances between objects.
A `DistanceMatrix` is a `DissimilarityMatrix` with the additional
requirement that the matrix data is symmetric. There are additional methods
made available that take advantage of this symmetry.
See Also
--------
DissimilarityMatrix
Notes
-----
The distances are stored in redundant (square-form) format [1]_. To
facilitate use with other scientific Python routines (e.g., scipy), the
distances can be retrieved in condensed (vector-form) format using
`condensed_form`.
`DistanceMatrix` only requires that the distances it stores are symmetric.
Checks are *not* performed to ensure the other three metric properties
hold (non-negativity, identity of indiscernibles, and triangle inequality)
[2]_. Thus, a `DistanceMatrix` instance can store distances that are not
metric.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
.. [2] http://planetmath.org/metricspace
"""
# Override here, used in superclass __str__
_matrix_element_name = 'distance'
def condensed_form(self):
"""Return an array of distances in condensed format.
Returns
-------
ndarray
One-dimensional ``numpy.ndarray`` of distances in condensed format.
Notes
-----
Condensed format is described in [1]_.
The conversion is not a constant-time operation, though it should be
relatively quick to perform.
References
----------
.. [1] http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
return squareform(self._data, force='tovector', checks=False)
def permute(self, condensed=False):
"""Randomly permute both rows and columns in the matrix.
Randomly permutes the ordering of rows and columns in the matrix. The
same permutation is applied to both rows and columns in order to
maintain symmetry and hollowness. Only the rows/columns in the distance
matrix are permuted; the IDs are *not* permuted.
Parameters
----------
condensed : bool, optional
If ``True``, return the permuted distance matrix in condensed
format. Otherwise, return the permuted distance matrix as a new
``DistanceMatrix`` instance.
Returns
-------
DistanceMatrix or ndarray
Permuted distances as a new ``DistanceMatrix`` or as a ``ndarray``
in condensed format.
See Also
--------
condensed_form
Notes
-----
This method does not modify the distance matrix that it is called on.
It is more efficient to pass ``condensed=True`` than permuting the
distance matrix and then converting to condensed format.
"""
order = np.random.permutation(self.shape[0])
permuted = self._data[order][:, order]
if condensed:
return squareform(permuted, force='tovector', checks=False)
else:
return self.__class__(permuted, self.ids)
def _validate(self, data, ids):
"""Validate the data array and IDs.
Overrides the superclass `_validate`. Performs a check for symmetry in
addition to the checks performed in the superclass.
"""
super(DistanceMatrix, self)._validate(data, ids)
if (data.T != data).any():
raise DistanceMatrixError("Data must be symmetric.")
def randdm(num_objects, ids=None, constructor=None, random_fn=None):
"""Generate a distance matrix populated with random distances.
Using the default `random_fn`, distances are randomly drawn from a uniform
distribution over ``[0, 1)``.
Regardless of `random_fn`, the resulting distance matrix is guaranteed to
be symmetric and hollow.
Parameters
----------
num_objects : int
The number of objects in the resulting distance matrix. For example, if
`num_objects` is 3, a 3x3 distance matrix will be returned.
ids : sequence of str or None, optional
A sequence of strings to be used as IDs. ``len(ids)`` must be equal to
`num_objects`. If not provided, IDs will be monotonically-increasing
integers cast as strings (numbering starts at 1). For example,
``('1', '2', '3')``.
constructor : type, optional
`DissimilarityMatrix` or subclass constructor to use when creating the
random distance matrix. The returned distance matrix will be of this
type. If ``None`` (the default), a `DistanceMatrix` instance will be
returned.
random_fn : function, optional
Function to generate random values. `random_fn` must accept two
arguments (number of rows and number of columns) and return a 2D
``numpy.ndarray`` of floats (or something that can be cast to float).
If ``None`` (the default), ``numpy.random.rand`` will be used.
Returns
-------
DissimilarityMatrix
`DissimilarityMatrix` (or subclass) instance of random distances. Type
depends on `constructor`.
See Also
--------
numpy.random.rand
"""
if constructor is None:
constructor = DistanceMatrix
if random_fn is None:
random_fn = np.random.rand
data = np.tril(random_fn(num_objects, num_objects), -1)
data += data.T
if not ids:
ids = map(str, range(1, num_objects + 1))
return constructor(data, ids)
# helper functions for anosim and permanova
def _preprocess_input(distance_matrix, grouping, column):
"""Compute intermediate results not affected by permutations.
These intermediate results can be computed a single time for efficiency,
regardless of grouping vector permutations (i.e., when calculating the
p-value). These intermediate results are used by both ANOSIM and PERMANOVA.
Also validates and normalizes input (e.g., converting ``DataFrame`` column
into grouping vector).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Input must be a DistanceMatrix.")
if isinstance(grouping, pd.DataFrame):
if column is None:
raise ValueError(
"Must provide a column name if supplying a DataFrame.")
else:
grouping = _df_to_vector(distance_matrix, grouping, column)
elif column is not None:
raise ValueError(
"Must provide a DataFrame if supplying a column name.")
sample_size = distance_matrix.shape[0]
if len(grouping) != sample_size:
raise ValueError(
"Grouping vector size must match the number of IDs in the "
"distance matrix.")
# Find the group labels and convert grouping to an integer vector
# (factor).
groups, grouping = np.unique(grouping, return_inverse=True)
num_groups = len(groups)
if num_groups == len(grouping):
raise ValueError(
"All values in the grouping vector are unique. This method cannot "
"operate on a grouping vector with only unique values (e.g., "
"there are no 'within' distances because each group of objects "
"contains only a single object).")
if num_groups == 1:
raise ValueError(
"All values in the grouping vector are the same. This method "
"cannot operate on a grouping vector with only a single group of "
"objects (e.g., there are no 'between' distances because there is "
"only a single group).")
tri_idxs = np.triu_indices(sample_size, k=1)
distances = distance_matrix.condensed_form()
return sample_size, num_groups, grouping, tri_idxs, distances
def _df_to_vector(distance_matrix, df, column):
"""Return a grouping vector from a ``DataFrame`` column.
Parameters
----------
distance_marix : DistanceMatrix
Distance matrix whose IDs will be mapped to group labels.
df : pandas.DataFrame
``DataFrame`` (indexed by distance matrix ID).
column : str
Column name in `df` containing group labels.
Returns
-------
list
Grouping vector (vector of labels) based on the IDs in
`distance_matrix`. Each ID's label is looked up in the ``DataFrame``
under the column specified by `column`.
Raises
------
ValueError
If `column` is not in the ``DataFrame``, or a distance matrix ID is
not in the ``DataFrame``.
"""
if column not in df:
raise ValueError("Column '%s' not in DataFrame." % column)
grouping = df.loc[distance_matrix.ids, column]
if grouping.isnull().any():
raise ValueError(
"One or more IDs in the distance matrix are not in the data "
"frame.")
return grouping.tolist()
def _run_monte_carlo_stats(test_stat_function, grouping, permutations):
"""Run stat test and compute significance with Monte Carlo permutations."""
if permutations < 0:
raise ValueError(
"Number of permutations must be greater than or equal to zero.")
stat = test_stat_function(grouping)
p_value = np.nan
if permutations > 0:
perm_stats = np.empty(permutations, dtype=np.float64)
for i in range(permutations):
perm_grouping = np.random.permutation(grouping)
perm_stats[i] = test_stat_function(perm_grouping)
p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1)
return stat, p_value
def _build_results(method_name, test_stat_name, sample_size, num_groups, stat,
p_value, permutations):
"""Return ``pandas.Series`` containing results of statistical test."""
return pd.Series(
data=[method_name, test_stat_name, sample_size, num_groups, stat,
p_value, permutations],
index=['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations'],
name='%s results' % method_name)
class CategoricalStats(object):
"""Base class for categorical statistical methods.
Categorical statistical methods generally test for significant differences
between discrete groups of objects, as determined by a categorical variable
(grouping vector).
See Also
--------
ANOSIM
PERMANOVA
"""
short_method_name = ''
long_method_name = ''
test_statistic_name = ''
def __init__(self, distance_matrix, grouping, column=None):
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Input must be a DistanceMatrix.")
if isinstance(grouping, pd.DataFrame):
if column is None:
raise ValueError("Must provide a column name if supplying a "
"data frame.")
else:
grouping = self._df_to_vector(distance_matrix, grouping,
column)
elif column is not None:
raise ValueError("Must provide a data frame if supplying a column "
"name.")
if len(grouping) != distance_matrix.shape[0]:
raise ValueError("Grouping vector size must match the number of "
"IDs in the distance matrix.")
# Find the group labels and convert grouping to an integer vector
# (factor).
groups, grouping = np.unique(grouping, return_inverse=True)
if len(groups) == len(grouping):
raise ValueError("All values in the grouping vector are unique. "
"This method cannot operate on a grouping vector "
"with only unique values (e.g., there are no "
"'within' distances because each group of "
"objects contains only a single object).")
if len(groups) == 1:
raise ValueError("All values in the grouping vector are the same. "
"This method cannot operate on a grouping vector "
"with only a single group of objects (e.g., "
"there are no 'between' distances because there "
"is only a single group).")
self._dm = distance_matrix
self._grouping = grouping
self._groups = groups
self._tri_idxs = np.triu_indices(self._dm.shape[0], k=1)
def _df_to_vector(self, distance_matrix, df, column):
"""Return a grouping vector from a data frame column.
Parameters
----------
distance_marix : DistanceMatrix
Distance matrix whose IDs will be mapped to group labels.
df : pandas.DataFrame
``DataFrame`` (indexed by distance matrix ID).
column : str
Column name in `df` containing group labels.
Returns
-------
list
Grouping vector (vector of labels) based on the IDs in
`distance_matrix`. Each ID's label is looked up in the data frame
under the column specified by `column`.
Raises
------
ValueError
If `column` is not in the data frame, or a distance matrix ID is
not in the data frame.
"""
if column not in df:
raise ValueError("Column '%s' not in data frame." % column)
grouping = df.loc[distance_matrix.ids, column]
if grouping.isnull().any():
raise ValueError("One or more IDs in the distance matrix are not "
"in the data frame.")
return grouping.tolist()
def __call__(self, permutations=999):
"""Execute the statistical method.
Parameters
----------
permutations : int, optional
Number of permutations to use when calculating statistical
significance. Must be >= 0. If 0, the resulting p-value will be
``None``.
Returns
-------
CategoricalStatsResults
Results of the method, including test statistic and p-value.
.. shownumpydoc
"""
if permutations < 0:
raise ValueError("Number of permutations must be greater than or "
"equal to zero.")
stat = self._run(self._grouping)
p_value = None
if permutations > 0:
perm_stats = np.empty(permutations, dtype=np.float64)
for i in range(permutations):
perm_grouping = np.random.permutation(self._grouping)
perm_stats[i] = self._run(perm_grouping)
p_value = ((perm_stats >= stat).sum() + 1) / (permutations + 1)
return CategoricalStatsResults(self.short_method_name,
self.long_method_name,
self.test_statistic_name,
self._dm.shape[0], self._groups, stat,
p_value, permutations)
def _run(self, grouping):
raise NotImplementedError("Subclasses must implement _run().")
class CategoricalStatsResults(object):
"""Statistical method results container.
.. note:: Deprecated in scikit-bio 0.2.1-dev
``CategoricalStatsResults`` will be removed in scikit-bio 0.3.0. It is
replaced by ``pandas.Series`` for storing statistical method results.
Please update your code to use ``skbio.stats.distance.anosim`` or
``skbio.stats.distance.permanova``, which will return a
``pandas.Series``.
Stores the results of running a `CategoricalStats` method a single time,
and provides a way to format the results.
Attributes
----------
short_method_name
long_method_name
test_statistic_name
sample_size
groups
statistic
p_value
permutations
Notes
-----
Users will generally not directly instantiate objects of this class. The
various categorical statistical methods will return an object of this type
when they are run.
"""
def __init__(self, short_method_name, long_method_name,
test_statistic_name, sample_size, groups, statistic, p_value,
permutations):
warnings.warn(
"skbio.stats.distance.CategoricalStatsResults is deprecated and "
"will be removed in scikit-bio 0.3.0. Please update your code to "
"use either skbio.stats.distance.anosim or "
"skbio.stats.distance.permanova, which will return a "
"pandas.Series object.", DeprecationWarning)
self.short_method_name = short_method_name
self.long_method_name = long_method_name
self.test_statistic_name = test_statistic_name
self.sample_size = sample_size
self.groups = groups
self.statistic = statistic
self.p_value = p_value
self.permutations = permutations
def __str__(self):
"""Return pretty-print (fixed width) string."""
rows = (self._format_header(), self._format_data())
max_widths = []
for col_idx in range(len(rows[0])):
max_widths.append(max(map(lambda e: len(e[col_idx]), rows)))
results = []
for row in rows:
padded_row = []
for col_idx, val in enumerate(row):
padded_row.append(val.rjust(max_widths[col_idx]))
results.append(' '.join(padded_row))
return '\n'.join(results) + '\n'
def _repr_html_(self):
"""Return a string containing an HTML table of results.
This method will be called within the IPython Notebook instead of
__repr__ to display results.
"""
header = self._format_header()
data = self._format_data()
return pd.DataFrame([data[1:]], columns=header[1:],
index=[data[0]])._repr_html_()
def summary(self, delimiter='\t'):
"""Return a formatted summary of results as a string.
The string is formatted as delimited text.
Parameters
----------
delimiter : str, optional
String to delimit fields by in formatted output. Default is tab
(TSV).
Returns
-------
str
Delimited-text summary of results.
"""
summary = StringIO()
csv_writer = csv.writer(summary, delimiter=delimiter,
lineterminator='\n')
csv_writer.writerow(self._format_header())
csv_writer.writerow(self._format_data())
return summary.getvalue()
def _format_header(self):
return ('Method name', 'Sample size', 'Number of groups',
self.test_statistic_name, 'p-value', 'Number of permutations')
def _format_data(self):
p_value_str = p_value_to_str(self.p_value, self.permutations)
return (self.short_method_name, '%d' % self.sample_size,
'%d' % len(self.groups), str(self.statistic), p_value_str,
'%d' % self.permutations)
|
bsd-3-clause
| -3,536,415,187,065,937,000
| 32.458537
| 79
| 0.5858
| false
|
Igglyboo/Project-Euler
|
1-99/30-39/Problem35.py
|
1
|
1086
|
from time import clock
def timer(function):
def wrapper(*args, **kwargs):
start = clock()
print(function(*args, **kwargs))
print("Solution took: %f seconds." % (clock() - start))
return wrapper
@timer
def find_answer():
total = 0
primes = sieve(1000000)
primes.remove(0)
for prime in primes:
p_str = list(str(prime))
p_str.append(p_str.pop(0))
for i in range(len(p_str) - 1):
current = int(''.join(x for x in p_str))
if current not in primes:
break
p_str.append(p_str.pop(0))
else:
total += 1
return total
def sieve(upperlimit):
l = list(range(2, upperlimit + 1))
# Do p = 2 first so we can change step size to 2*p below
for i in range(4, upperlimit + 1, 2):
l[i - 2] = 0
for p in l:
if p ** 2 > upperlimit:
break
elif p:
for i in range(p * p, upperlimit + 1, 2 * p):
l[i - 2] = 0
return set(l)
if __name__ == "__main__":
find_answer()
|
unlicense
| 2,629,917,463,131,232,000
| 20.294118
| 63
| 0.503683
| false
|
dokterbob/django-shopkit
|
shopkit/core/utils/fields.py
|
1
|
2814
|
# Copyright (C) 2010-2011 Mathijs de Bruin <mathijs@mathijsfietst.nl>
#
# This file is part of django-shopkit.
#
# django-shopkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from decimal import Decimal
from django.db.models.fields import DecimalField
class MinMaxDecimalField(DecimalField):
"""
`DecimalField` subclass which allows specifying a minimum and maximum
value. Takes two extra optional parameters, to be specified as a Decimal
or string:
* `max_value`
* `min_value`
"""
description = 'DecimalField subclass which allows specifying a minimum \
and maximum value.'
def __init__(self, **kwargs):
self.max_value = kwargs.pop('max_value', None)
self.min_value = kwargs.pop('min_value', None)
super(MinMaxDecimalField, self).__init__(**kwargs)
def formfield(self, **kwargs):
if not self.max_value is None:
kwargs['max_value'] = Decimal(self.max_value)
if not self.min_value is None:
kwargs['min_value'] = Decimal(self.min_value)
return super(MinMaxDecimalField, self).formfield(**kwargs)
class PercentageField(MinMaxDecimalField):
"""
Subclass of `DecimalField` with sensible defaults for percentage
discounts:
* `max_value=100`
* `min_value=0`
* `decimal_places=0`
* `max_digits=3`
"""
description = 'Subclass of DecimalField with sensible defaults for \
percentage discounts.'
def __init__(self, **kwargs):
kwargs['max_value'] = kwargs.get('max_value', Decimal('100'))
kwargs['min_value'] = kwargs.get('min_value', Decimal('0'))
kwargs['decimal_places'] = kwargs.get('decimal_places', 0)
kwargs['max_digits'] = kwargs.get('max_digits', 3)
super(PercentageField, self).__init__(**kwargs)
# If South is installed, add introspection rules
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^shopkit\.core\.utils\.fields\.MinMaxDecimalField"])
add_introspection_rules([], ["^shopkit\.core\.utils\.fields\.PercentageField"])
except ImportError:
pass
|
agpl-3.0
| 908,442,149,540,644,500
| 32.105882
| 86
| 0.680526
| false
|
akkana/scripts
|
wpnet.py
|
1
|
17033
|
#!/usr/bin/env python3
# A wrapper script to make it easier to use wpa_cli to connect.
# https://wiki.archlinux.org/index.php/WPA_supplicant#Connecting_with_wpa_cli
# was very helpful.
#
# For extending this to eth0, browse /etc/dhcpcd.conf
# and /usr/share/dhcpcd/hooks/10-wpa_supplicant on raspbian,
# where dhcpcd is the master and is in charge of stopping
# and starting wpa_supplicant.
#
# Copyright 2018 by Akkana Peck: share and enjoy under the GPLv2 or later.
import subprocess
import os, sys
import argparse
import getpass
import urllib.request
import time
verbose = False
"""
To run this as a normal user, not under sudo:
edit /etc/wpa_supplicant/wpa_supplicant.conf
and add a line like:
ctrl_interface_group=adm
using whatever group you think should have network permissions.
Commands this script runs:
** Get the wireless interface:
iw dev
** Start the daemon:
wpa_supplicant -B -i $iface -c /etc/wpa_supplicant/wpa_supplicant.conf
** List known networks:
wpa_cli list_networks
** List available networks:
wpa_cli scan
wpa_cli scan_results
** Define a new SSID:
wpa_cli add_network
(prints new $ID. Then:)
NOT : wpa_cli set_network $ID
** Connect to a new open SSID:
wpa_cli set_network $ID ssid $SSID key_mgmt NONE
** Connect to a new WPA SSID:
wpa_cli set_network $ID ssid $SSID psk $PASSWORD
wpa_cli enable_network $ID
wpa_cli save_config
WORKED:
wpa_supplicant -B -i wlp2s0 -c /etc/wpa_supplicant/wpa_supplicant.conf
wpa_cli list_networks
wpa_cli scan
wpa_cli scan_results
wpa_cli add_network
wpa_cli set_network 1 (this gave an error, I think)
wpa_cli set_network 1 ssid '"LAC-Public Library"'
wpa_cli set_network 1 key_mgmt NONE
(idiot bash lost this command, probably enable?)
wpa_cli save_config
dhclient -v wlp2s0
"""
def run_as_root(cmdargs):
"""Run cmdargs inside sudo, unless we're already root.
return (stdout, stderr) as strings.
"""
if os.getpid() != 0:
cmdargs = ["sudo"] + cmdargs
if verbose:
print("\n** Run:", ' '.join(cmdargs))
proc = subprocess.Popen(cmdargs, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# proc.communicate() returns bytes, so change them to strings:
return ( b.decode() for b in proc.communicate() )
def run_cmd(cmdargs):
"""Run and return (stdout, stderr) as strings.
"""
if verbose:
print("\n** Run:", ' '.join(cmdargs))
proc = subprocess.Popen(cmdargs, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# proc.communicate() returns bytes, so change them to strings:
return ( b.decode() for b in proc.communicate() )
def start_wpa_supplicant(iface):
# https://wiki.archlinux.org/index.php/WPA_supplicant
if is_wpa_running():
if verbose:
print("wpa_supplicant is already running")
return
args = ['sudo', 'wpa_supplicant', '-B', '-i', iface,
'-c', '/etc/wpa_supplicant/wpa_supplicant.conf']
if verbose:
print("Starting wpa_supplicant:", ' '.join(args), end='')
subprocess.call(args)
time.sleep(5)
def is_wpa_running():
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
args = open(os.path.join('/proc', pid, 'cmdline'),
'rb').read().decode().split('\0')
if args[0] == 'wpa_supplicant':
return True
except IOError: # proc has already terminated
continue
return False
def start_dhcp(iface):
if verbose:
print("Starting dhcp")
# Can't use run_cmd here because the output takes time
# and the usr might want to see it, especially if it fails.
return subprocess.call(['sudo', 'dhclient', '-v', iface])
def get_available_accesspoints(iface):
aps = {}
start_wpa_supplicant(iface)
run_cmd(["wpa_cli", "scan"])
out, err = run_cmd(["wpa_cli", "scan_results"])
stdout_lines = out.split('\n')
for line in stdout_lines:
if not line or line.startswith('Selected') \
or line.startswith('bssid /'):
continue
words = line.strip().split(maxsplit=4)
# Get the ssid if it's not hidden, else use the MAC
if len(words) == 4:
ssid = '[%s]' % words[0]
else:
ssid = words[4]
aps[ssid] = { 'MAC': words[0],
'flags': words[3],
'signal': int(words[2]),
}
return aps
def get_current():
"""
<iridum>- sudo wpa_cli list_networks
Selected interface 'wlp2s0'
network id / ssid / bssid / flags
0 clink any
1 LAC-Public Library any [CURRENT]
2 CommunityLab any [DISABLED]
3 COAFreeWireless any
4 LAC-Public Library any
"""
start_wpa_supplicant(iface)
networks = {}
out, err = run_cmd(["wpa_cli", "list_networks"])
stdout_lines = out.split('\n')
for line in stdout_lines:
line = line.strip()
if line.endswith('[CURRENT]'):
words = line.split('\t')
return words[1]
return None
def get_known_networks():
start_wpa_supplicant(iface)
networks = {}
out, err = run_cmd(["wpa_cli", "list_networks"])
stdout_lines = out.split('\n')
for line in stdout_lines:
line = line.strip()
if not line:
continue
words = line.split('\t')
if words[0].isdigit():
networks[int(words[0])] = words[1]
return networks
def match_ssid(pat, ssids):
for net in ssids:
if pat in net:
return net
return None
def get_wireless_ifaces():
# For a list of all devices, ls /sys/class/net
ifaces = []
# Get a list of wireless interfaces.
# iwconfig lists wireless interfaces on stdout, wired and lo on stderr.
proc = subprocess.Popen(["iw", "dev"], shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_lines = proc.communicate()[0].decode().split('\n')
for line in stdout_lines:
line = line.strip()
if line.startswith('Interface'):
ifaces.append(line.split()[1])
# could get MAC and ssid if appropriate
return ifaces
def show_browser_if_redirect():
"""Try to fetch a test URL. If we're redirected to some other URL
(probably a stupid login page), pop up a browser.
"""
# Alas, there's no universal page everyone can use.
# So make one on your own website, or find a trusted page,
# and put that URL in ~/.config/netscheme/testurl
testurl = None
testurlfile = os.path.expanduser("~/.config/netscheme/testurl")
if not os.path.exists(testurlfile):
print("No test URL file; not testing connection")
return
with open(testurlfile) as tufile:
testurl = tufile.read().strip()
with open(testurlfile + ".out") as tufile:
content_from_file = tufile.read()
if verbose and not testurl:
print("No test URL set; not checking for redirects")
return
content_from_web = ''
print("Trying to fetch test URL", testurl)
try:
response = urllib.request.urlopen(testurl, timeout=100)
# Were we redirected? In theory response.geturl() will tell us that,
# but in practice, it doesn't, so we have to fetch the content
# of a page and compare it to the expected value.
content_from_web = response.read().decode('utf-8')
# Lots of ways this can fail.
# e.g. ValueError, "unknown url type"
# or BadStatusLine: ''
except Exception as e:
print("Couldn't fetch test URL %s: probably redirected." % testurl, e)
content_from_web = ''
if content_from_web == content_from_file:
print("Looks like we're really connected -- no redirect")
return
print("Couldn't make a test connection -- probably redirected.")
# Don't want to run a browser as root, so figure out if we're root
# and if so, de-escalate privilege.
# os.getuid(), os.geteuid() and psutil.uids() are all zero under sudo,
# but sudo helpfully sets an env variable we can use.
orig_uid = os.getenv("SUDO_UID")
if orig_uid:
print("De-escalating back to UID", orig_uid)
orig_uid = int(orig_uid)
os.setuid(orig_uid)
print("Calling quickbrowse", testurl)
try:
subprocess.call(["quickbrowse", testurl])
except Exception as e:
print("Problem starting a browser", e)
raise e
def show_available_networks():
accesspoints = get_available_accesspoints(iface)
aps = accesspoints.keys()
known_nets = get_known_networks()
# Print the ones we have saved already:
format = "%-20s %4s %7s %s"
print(format % ("SSID", "#", "Signal", "Encryption"))
print(format % ("----", "--", "------", "----------"))
known = []
for i in sorted(known_nets):
if known_nets[i] in aps:
print(format % (known_nets[i],
i,
accesspoints[known_nets[i]]['signal'],
accesspoints[known_nets[i]]['flags']))
known.append(known_nets[i])
'''
Sample flags:
SSID Signal # Encryption
---- ------ -- ----------
LAC-Wireless -86 [WPA2-EAP-CCMP][ESS]
Historical -84 [WPA-PSK-TKIP][WPA2-PSK-CCMP+TKIP][ESS]
LAC PUBLIC -85 [ESS]
Public-LAC -90 [ESS]
NMC-Main -79 [WPA2-PSK-CCMP][ESS]
<iridum>- wpa_cli scan_results ~
Selected interface 'wlp2s0'
bssid / frequency / signal level / flags / ssid
58:bf:ea:92:ba:c0 2437 -48 [WPA2-EAP-CCMP][ESS] LAC-Wireless
6c:70:9f:de:4d:7c 2462 -84 [WPA-PSK-TKIP][WPA2-PSK-CCMP+TKIP][ESS]Historical
58:bf:ea:92:ba:c2 2437 -56 [ESS] LAC PUBLIC
24:01:c7:3a:91:b0 2462 -64 [ESS] Public-LAC
Selected interface 'wlp2s0'
https://askubuntu.com/questions/541704/how-can-one-use-wpa-cli-to-connect-to-a-wpa-network-without-a-password
> scan
OK
CTRL-EVENT-SCAN-RESULTS
> scan_results
bssid / frequency / signal level / flags / ssid
f8:d1:11:23:c2:2f 2412 76 [ESS] BAYINET
f8:d1:11:23:c1:e9 2412 47 [ESS] BAYINET
> add_network
0
> set_network 0 ssid "Public-LAC"
OK
> set_network 0 key_mgmt NONE
OK
> enable_network 0
OK
CTRL-EVENT-SCAN-RESULTS
Trying to associate with f8:d1:11:23:c2:2f (SSID='BAYINET' freq=2412 MHz)
Association request to the driver failed
Associated with f8:d1:11:23:c2:2f
CTRL-EVENT-CONNECTED - Connection to f8:d1:11:23:c2:2f completed (auth) [id=1 id_str=]
> quit
'''
# Print the ones we don't know:
print()
for ap in aps:
if ap not in known:
print(format % (ap,
'',
accesspoints[ap]['signal'],
accesspoints[ap]['flags']))
def connect_to(to_ap):
if verbose:
print("Connecting to", to_ap)
accesspoints = get_available_accesspoints(iface)
aps = list(accesspoints.keys())
known_nets = get_known_networks()
known = [ known_nets[i] for i in known_nets ]
known_index = None
if to_ap not in aps:
# But maybe it's a number for a known network?
if to_ap.isdigit():
known_index = int(to_ap)
if known_index not in known_nets:
print("No network %d known" % known_index)
sys.exit(1)
to_ap = known_nets[known_index]
if to_ap not in aps:
print("Network %d, '%s', not visible" % (known_index,
to_ap))
sys.exit(1)
else:
matched = match_ssid(to_ap, accesspoints.keys())
if not matched:
print("'%s' isn't visible" % to_ap)
sys.exit(1)
to_ap = matched
print("Matched:", matched)
# Now to_ap is an SSID that's known.
if to_ap in known:
if verbose:
print("Great, we see", to_ap, "and we know it already")
if known_index is None:
for i in known_nets:
if known_nets[i] == to_ap:
known_index = i
break
if known_index is None:
print("Internal error, lost track of SSID %s" % to_ap)
if verbose:
print("Enabling network", to_ap)
run_cmd(["wpa_cli", "enable_network", str(known_index)])
if start_dhcp(iface):
print("DHCP failed")
else:
show_browser_if_redirect()
sys.exit(0)
# New network, hasn't been stored yet. But it is seen.
if verbose:
print(to_ap, "must be a new network")
thisap = accesspoints[to_ap]
out, err = run_cmd(["wpa_cli", "add_network"])
# The last (second) line of the output is the new network number.
# But split('\n') gives a bogus empty final line.
# To be safer, try iterating to find a line that's just a single number.
lines = out.split('\n')
netnum_str = None
for line in lines:
if not line:
continue
words = line.split()
if len(words) == 1 and words[0].isdigit():
netnum_str = words[0]
break
if not netnum_str:
print("Unexpected output from wpa_cli add_network:")
print(out)
print("---")
sys.exit(1)
if verbose:
print("new netnum:", netnum_str)
def check_fail(out, err, errmsg=None):
if 'FAIL' in out or 'FAIL' in err:
if errmsg:
print("Error:", errmsg)
if out:
print("==== FAIL: out")
print(out)
if err:
print("==== FAIL: err")
print(err)
sys.exit(1)
if out or err:
print("SUCCESS:")
if out:
print(out)
if err:
print(err)
out, err = run_cmd(["wpa_cli", "set_network", netnum_str, "ssid",
'"%s"' % to_ap])
check_fail(out, err, "Set network")
if 'WPA' in thisap['flags'] or 'PSK' in thisap['flags']:
password = getpass.getpass("Password: ")
out, err = run_cmd(["wpa_cli", "set_network", netnum_str,
"psk", '"%s"' % password])
check_fail(out, err, "Set password")
else:
if verbose:
print("Trying to connect to %s with no password" % to_ap)
out, err = run_cmd(["wpa_cli", "set_network", netnum_str,
"key_mgmt", "NONE"])
check_fail(out, err, "Set key management")
if verbose:
print("Waiting a little ...", end='')
time.sleep(5)
if verbose:
print()
if verbose:
print("Enabling network", netnum_str)
out, err = run_cmd(["wpa_cli", "enable_network", netnum_str])
check_fail(out, err, "Enable network")
if verbose:
print("Waiting a little ...", end='')
time.sleep(5)
if verbose:
print()
if verbose:
print("Saving configuration")
out, err = run_cmd(["wpa_cli", "save_config"])
check_fail(out, err, "Save configuration")
if verbose:
print(out, err, "Saved configuration")
start_dhcp(iface)
show_browser_if_redirect()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-k', "--known", dest="known",
action="store_true", help="List known networks")
parser.add_argument('-a', "--available", dest="available",
action="store_true", help="Show available accesspoints")
parser.add_argument('connect_to', nargs='?',
help="The essid or numeric specifier to connect to")
args = parser.parse_args(sys.argv[1:])
ifaces = get_wireless_ifaces()
if not ifaces:
print("No wireless interface, sorry")
sys.exit(1)
if len(ifaces) > 1:
print("Multiple wireless interfaces:", ' '.join(get_wireless_ifaces()))
print("Using", ifaces[0])
iface = ifaces[0]
if not iface:
print("No interface!")
sys.exit(1)
if verbose:
print("Interface:", iface)
if args.available:
show_available_networks()
sys.exit(0)
if args.known:
known_nets = get_known_networks()
for i in sorted(known_nets.keys()):
print('%3d: %s' % (i, known_nets[i]))
sys.exit(0)
# If no flags specified, then we should have one arg,
# either a numeric specifier or an essid.
if not args.connect_to:
current = get_current()
if current:
print("Connected to", current)
else:
print("Not connected")
sys.exit(0)
connect_to(args.connect_to)
|
gpl-2.0
| 2,445,823,374,214,298,000
| 29.634892
| 109
| 0.573358
| false
|
estaban/pyload
|
module/plugins/accounts/FileserveCom.py
|
1
|
2261
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
from time import mktime, strptime
from module.plugins.Account import Account
from module.common.json_layer import json_loads
class FileserveCom(Account):
__name__ = "FileserveCom"
__version__ = "0.2"
__type__ = "account"
__description__ = """Fileserve.com account plugin"""
__author_name__ = "mkaay"
__author_mail__ = "mkaay@mkaay.de"
def loadAccountInfo(self, user, req):
data = self.getAccountData(user)
page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
"submit": "Submit+Query"})
res = json_loads(page)
if res['type'] == "premium":
validuntil = mktime(strptime(res['expireTime'], "%Y-%m-%d %H:%M:%S"))
return {"trafficleft": res['traffic'], "validuntil": validuntil}
else:
return {"premium": False, "trafficleft": None, "validuntil": None}
def login(self, user, data, req):
page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
"submit": "Submit+Query"})
res = json_loads(page)
if not res['type']:
self.wrongPassword()
#login at fileserv page
req.load("http://www.fileserve.com/login.php",
post={"loginUserName": user, "loginUserPassword": data['password'], "autoLogin": "checked",
"loginFormSubmit": "Login"})
|
gpl-3.0
| -6,783,191,814,099,837,000
| 37.982759
| 116
| 0.597523
| false
|
boegel/easybuild-easyblocks
|
easybuild/easyblocks/m/mtl4.py
|
1
|
1877
|
##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for MTL4, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
from easybuild.easyblocks.generic.tarball import Tarball
class EB_MTL4(Tarball):
"""Support for installing MTL4."""
def sanity_check_step(self):
"""Custom sanity check for MTL4."""
incpref = os.path.join('include', 'boost', 'numeric')
custom_paths = {
'files': [],
'dirs': [os.path.join(incpref, x) for x in ["itl", "linear_algebra", "meta_math", "mtl"]],
}
super(EB_MTL4, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Adjust CPATH for MTL4."""
guesses = super(EB_MTL4, self).make_module_req_guess()
guesses.update({'CPATH': 'include'})
return guesses
|
gpl-2.0
| -2,865,124,163,492,093,400
| 32.517857
| 102
| 0.689398
| false
|
saullocastro/pyNastran
|
pyNastran/f06/parse_flutter.py
|
1
|
33532
|
"""
SOL 145 plotter
"""
from __future__ import print_function
from itertools import count
from six import iteritems
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from pyNastran.utils.atmosphere import get_alt_for_density
from pyNastran.utils.log import get_logger2
from pyNastran.utils import object_attributes, object_methods
class FlutterResponse(object):
"""storage object for single subcase SOL 145 results"""
def object_attributes(self, mode='public', keys_to_skip=None):
"""
List the names of attributes of a class as strings. Returns public
attributes as default.
Parameters
----------
obj : instance
the object for checking
mode : str
defines what kind of attributes will be listed
* 'public' - names that do not begin with underscore
* 'private' - names that begin with single underscore
* 'both' - private and public
* 'all' - all attributes that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
Returns
-------
attribute_names : List[str]
sorted list of the names of attributes of a given type or None
if the mode is wrong
"""
return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip)
def object_methods(self, mode='public', keys_to_skip=None):
"""
List the names of methods of a class as strings. Returns public methods
as default.
Parameters
----------
obj : instance
the object for checking
mode : str
defines what kind of methods will be listed
* "public" - names that do not begin with underscore
* "private" - names that begin with single underscore
* "both" - private and public
* "all" - all methods that are defined for the object
keys_to_skip : List[str]; default=None -> []
names to not consider to avoid deprecation warnings
Returns
-------
method : List[str]
sorted list of the names of methods of a given type
or None if the mode is wrong
"""
return object_methods(obj, mode=mode, keys_to_skip=keys_to_skip)
def __init__(self, subcase, configuration, xysym, xzsym, mach, density_ratio, method,
modes, results,
f06_units=None, out_units=None,
):
"""
Parameters
----------
subcase : int
the subcase id
method : str
PK, PKNL, ???
modes : List[int]; (default=None -> all)
the modes; typically 1 to N
results : varies
method = PK
List[List[float] * 7] * nmodes
kfreq, 1/kfreq, velocity, damping, freq, eigr, eigi
method = PKNL
List[List[float] * 9] * nmodes
kfreq, 1/kfreq, density, mach, velocity, damping, freq, eigr, eigi
Units
-----
Units are only applicable for quantities that have units
(e.g. units on Mach don't do anything).
All allowable fields are shown below.
f06_units : dict[str] = str (default=None -> no units conversion)
The units to to read from the F06.
PK method:
f06_units = {'velocity' : 'in/s'}
The velocity units are the units for the FLFACT card in the BDF
PKNL method:
f06_units = {'velocity' : 'in/s', 'density' : 'slug/in^3'}
The velocity/density units are the units for the FLFACT card in the BDF
out_units dict[str] = str (default=None -> no units conversion)
The units to store results in.
PK method:
out_units = {'velocity' : 'ft/s'}
PKNL method:
out_units = {
'velocity' : 'ft/s',
'eas' : 'knots',
'density' : 'slinch/in^3',
'altitude' : 'ft'
}
Unused Parameters
-----------------
configuration : str
AEROSG2D...what is this???
XY-SYMMETRY : str
ASYMMETRIC, SYMMETRIC
unused
XZ-SYMMETRY : str
ASYMMETRIC, SYMMETRIC
unused
"""
self.f06_units = f06_units
self.out_units = out_units
self.subcase = subcase
self.configuration = configuration
if method == 'PK':
self.mach = mach
self.xysym = xysym
self.xzsym = xzsym
self.density_ratio = density_ratio
#print('mach=%s' % mach)
self.method = method
self.modes = np.asarray(modes, dtype='int32')
rho_ref = 1.
self.ikfreq = 0
self.ikfreq_inv = 1
#print(results)
results = np.asarray(results, dtype='float64')
if self.method == 'PK':
self.ivelocity = 2
self.idamping = 3
self.ifreq = 4
self.ieigr = 5
self.ieigi = 6
self.results = results
kvel = self._get_unit_factor('velocity')[0]
results[:, :, self.ivelocity] *= kvel
# velocity is the target
self.names = ['kfreq', '1/kfreq', 'velocity', 'damping', 'freq', 'eigr', 'eigi']
elif self.method == 'PKNL':
# velocity is the target
self.names = ['kfreq', '1/kfreq', 'density', 'velocity', 'damping', 'freq', 'eigr', 'eigi', 'eas', 'q', 'alt']
self.idensity = 2
self.imach = 3
self.ivelocity = 4
self.idamping = 5
self.ifreq = 6
self.ieigr = 7
self.ieigi = 8
self.ieas = 9
self.iq = 10
self.ialt = 11
# eas = V * sqrt(rho / rhoSL)
vel = results[:, :, self.ivelocity]#.ravel()
rho = results[:, :, self.idensity]#.ravel()
q = 0.5 * rho * vel**2
#eas = (2 * q / rho_ref)**0.5
eas = vel * np.sqrt(rho / rho_ref)
density_units1 = self.f06_units['density']
altitude_units = self.out_units['altitude']
#density_units1 = self.out_units['density']
kdensity = self._get_density_unit_factor(density_units1, 'slug/ft^3')
#kdensity = self._get_altitude_unit_factor(density_units2, 'slug/ft^3')
kalt = self._get_altitude_unit_factor('ft', altitude_units)
kvel = self._get_unit_factor('velocity')[0]
#kpressure = self._get_unit_factor('dynamic_pressure')[0]
kpressure = kdensity * kvel ** 2
make_alt = False
if make_alt:
alt = np.array(
[get_alt_for_density(densityi, nmax=20) * kalt
for densityi in rho.ravel() * kdensity], dtype='float64').reshape(vel.shape)
self.results = np.dstack([results, eas, q * kpressure, alt])
else:
self.results = np.dstack([results, eas, q * kpressure])
else:
raise NotImplementedError(method)
#print(self.results.shape)
# c - cyan
# b - black
# r - red
# g - green
# m - magenta
# y - yellow
#colors = ['b', 'c', 'g', 'k', 'm', 'r'] #, 'y']
# D - wide diamond
# h - hexagon
# * - star
# + - plus
# 3 - 3 pointed star
# o - circle
# d - thin diamond
# 1 - Y shape
# s - square
#shapes = ['D', 'h', '*', 's', 'd', '3', 'o', '1', '2', '4', 'x', '^', '<', '>'] # '+',
#symbol_list = []
#for shape in shapes:
#for color in colors:
#symbol_list.append('%s-%s' % (shape, color))
self.noline = False
self._symbols = []
self.generate_symbols()
def generate_symbols(self):
"""
This symbol list is taken from a series of "good" colors (e.g. not yellow)
and easily distinguishable shapes. Far more combinations that is necessary
is defined
"""
colors = ['r', 'g', 'b', 'k']
symbols = ['o', '*', 'x', 'v', '>', '<', '^']
self._symbols = []
for symbol in symbols:
for color in colors:
self._symbols.append(color + symbol)
def set_plot_options(self, noline=False):
self.noline = noline
def _get_unit_factor(self, name):
if not self.f06_units or not self.out_units:
return 1.
unit_f06 = self.f06_units[name]
unit_out = self.out_units[name]
#print('unit_f06=%r unit_out=%r' % (unit_f06, unit_out))
if name == 'velocity':
factor = self._get_velocity_unit_factor(unit_f06, unit_out)
elif name == 'altitude':
factor = self._get_altitude_unit_factor(unit_f06, unit_out)
elif name == 'density':
factor = self._get_altitude_unit_factor(unit_f06, unit_out)
elif name in ['pressure', 'dynamic_pressure']:
factor = self._get_pressure_unit_factor(unit_f06, unit_out)
else:
raise NotImplementedError(name)
if self.out_units is not None:
units = self.out_units[name]
else:
units = 'units'
return factor, units
@staticmethod
def _get_velocity_unit_factor(unit_in, unit_out):
"""TODO: simplify this..."""
if unit_in not in ['in/s', 'ft/s', 'knots', 'm/s']:
msg = 'unit_in=%r not in [in/s, ft/s, knots, m/s]' % unit_in
raise NotImplementedError(msg)
if unit_out not in ['in/s', 'ft/s', 'knots', 'm/s']:
msg = 'unit_out=%r not in [in/s, ft/s, knots, m/s]' % unit_out
raise NotImplementedError(msg)
if unit_in == unit_out:
factor = 1.
elif unit_in == 'in/s':
if unit_out == 'ft/s':
factor = 1./12.
elif unit_out == 'knots':
factor = 1./20.2537
elif unit_out == 'm/s':
factor = 1./39.3700432322835
else:
raise NotImplementedError('unit_out=%r not in [in/s, ft/s, knots, m/s]' % unit_out)
elif unit_in == 'ft/s':
if unit_out == 'in/s':
factor = 12.
elif unit_out == 'knots':
factor = 1./1.687808333407337269
elif unit_out == 'm/s':
factor = 1./3.2808369360236246948
else:
raise NotImplementedError('unit_out=%r not in [in/s, ft/s, knots, m/s]' % unit_out)
elif unit_in == 'knots':
if unit_out == 'in/s':
factor = 20.253700000888049004
elif unit_out == 'ft/s':
factor = 1.687808333407337269
elif unit_out == 'm/s':
factor = 1/1.9438427409666045875
else:
raise NotImplementedError('unit_out=%r not in [in/s, ft/s, knots, m/s]' % unit_out)
elif unit_in == 'm/s':
if unit_out == 'in/s':
factor = 39.37004323228349989
elif unit_out == 'ft/s':
factor = 3.2808369360236246948
elif unit_out == 'knots':
factor = 1.9438427409666045875
else:
raise NotImplementedError('unit_out=%r not in [in/s, ft/s, knots, m/s]' % unit_out)
else:
raise NotImplementedError('unit_in=%r not in [in/s, ft/s, knots, m/s]' % unit_in)
return factor
@staticmethod
def _get_altitude_unit_factor(unit_in, unit_out):
"""TODO: simplify this..."""
if unit_in not in ['ft', 'm']:
msg = 'unit_in=%r not in [ft, m]' % unit_in
raise NotImplementedError(msg)
if unit_out not in ['ft', 'm']:
msg = 'unit_out=%r not in [ft, m]' % unit_out
raise NotImplementedError(msg)
if unit_in == unit_out:
factor = 1.
elif unit_in == 'm':
if unit_out == 'ft':
factor = 0.3048
else:
raise NotImplementedError('unit_out=%r not in [ft, m]' % unit_out)
elif unit_in == 'ft':
if unit_out == 'm':
factor = 1./0.3048
else:
raise NotImplementedError('unit_out=%r not in [ft, m]' % unit_out)
else:
raise NotImplementedError('unit_in=%r not in [ft, m]' % unit_in)
return factor
@staticmethod
def _get_pressure_unit_factor(unit_in, unit_out):
"""TODO: simplify this..."""
if unit_in not in ['psi', 'psf']:
msg = 'unit_in=%r not in [psi, psf]' % unit_in
raise NotImplementedError(msg)
if unit_out not in ['psi', 'psf']:
msg = 'unit_out=%r not in [psi, psf]' % unit_out
raise NotImplementedError(msg)
if unit_in == unit_out:
factor = 1.
elif unit_in == 'psi':
if unit_out == 'psf':
factor = 1./144.
else:
raise NotImplementedError('unit_out=%r not in [psi, psf]' % unit_out)
elif unit_in == 'psi':
if unit_out == 'psf':
factor = 144.
else:
raise NotImplementedError('unit_out=%r not in [psi, psf]' % unit_out)
else:
raise NotImplementedError('unit_in=%r not in [psi, psf]' % unit_in)
return factor
@staticmethod
def _get_density_unit_factor(unit_in, unit_out):
"""TODO: simplify this..."""
if unit_in not in ['slinch/in^3', 'slug/ft^3']:
msg = 'unit_in=%r not in [slinch/in^3, slug/ft^3]' % unit_in
raise NotImplementedError(msg)
if unit_out not in ['slinch/in^3', 'slug/ft^3']:
msg = 'unit_out=%r not in [slinch/in^3, slug/ft^3]' % unit_out
raise NotImplementedError(msg)
if unit_in == unit_out:
factor = 1.
elif unit_in == 'slinch/in^3':
if unit_out == 'slug/ft^3':
factor = 12.**4.
else:
msg = 'unit_out=%r not in [slinch/in^3, slug/ft^3]' % unit_out
raise NotImplementedError(msg)
elif unit_in == 'slug/ft^3':
if unit_out == 'slinch/in^3':
factor = 1./12.**4.
else:
msg = 'unit_out=%r not in [slinch/in^3, slug/ft^3]' % unit_out
raise NotImplementedError(msg)
else:
msg = 'unit_in=%r not in [slinch/in^3, slug/ft^3]' % unit_in
raise NotImplementedError(msg)
return factor
@property
def symbols(self):
"""gets the symbols for the lines"""
if not self.noline:
symbols = [symbol + '-' for symbol in self._symbols]
else:
symbols = self._symbols
return symbols
@staticmethod
def _set_xy_limits(xlim, ylim):
"""helper method for ``plot_vg``"""
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
def plot_vg(self, modes=None,
fig=None,
xlim=None, ylim=None,
show=True, clear=False, legend=True,
png_filename=None, **kwargs):
"""
Make a V-g plot
See ``plot_root_locus`` for arguments
"""
_kvelocity, velocity_units = self._get_unit_factor('velocity')
xlabel = 'Velocity [%s]' % velocity_units
ylabel = 'Damping'
ix = self.ivelocity
iy = self.idamping
scatter = True
self._plot_x_y(ix, iy, xlabel, ylabel, scatter,
modes=modes, fig=fig, xlim=xlim, ylim=ylim,
show=show, clear=clear, legend=legend,
png_filename=png_filename,
**kwargs)
@property
def flutter_speed(self, modes=None):
"""gets the flutter speed"""
if modes is None:
modes = self.modes
else:
modes = np.asarray(modes)
def plot_root_locus(self, modes=None,
fig=None,
xlim=None, ylim=None,
show=True, clear=False, legend=True,
png_filename=None,
**kwargs):
"""
Plots a root locus
Parameters
----------
modes : List[int] / int ndarray; (default=None -> all)
the modes; typically 1 to N
fig : plt.figure
figure object
xlim : list[float/None, float/None]
the x plot limits
ylim : list[float/None, float/None]
the y plot limits
show : bool; default=True
show the plot
clear : bool; default=False
clear the plot
legend : bool; default=False
show the legend
kwargs : dict; default=None
key : various matplotlib parameters
value : depends
Legend kwargs
-------------
loc : str
'best'
fancybox : bool; default=False
makes the box look cool
framealpha : float; 0.0 <= alpha <= 1.0
0.0 - fully transparent
1.0 - no transparency / opaque
"""
xlabel = 'Eigenvalue (Real)'
ylabel = 'Eigenvalue (Imaginary)'
ix = self.ieigr
iy = self.ieigi
scatter = True
self._plot_x_y(ix, iy, xlabel, ylabel, scatter,
modes=modes, fig=fig, xlim=xlim, ylim=ylim,
show=show, clear=clear, legend=legend,
png_filename=png_filename,
**kwargs)
def _plot_x_y(self, ix, iy, xlabel, ylabel, scatter, modes=None,
fig=None,
xlim=None, ylim=None,
show=True, clear=False, legend=True,
png_filename=None,
**kwargs):
"""builds the plot"""
self.fix()
if kwargs is None:
kwargs = {}
modes, imodes = self._get_modes_imodes(modes)
if fig is None:
fig = plt.figure()
axes = fig.add_subplot(111)
self._set_xy_limits(xlim, ylim)
symbols = self.symbols
for i, imode, mode in zip(count(), imodes, modes):
symbol = symbols[i]
freq = self.results[imode, :, self.ifreq].ravel()
real = self.results[imode, :, ix].ravel()
imag = self.results[imode, :, iy].ravel()
iplot = np.where(freq != np.nan)
#iplot = np.where(freq > 0.0)
axes.plot(real[iplot], imag[iplot], symbol, label='Mode %i' % mode, markersize=0)
if scatter:
s = np.linspace(.75, 50., len(real))
#assert symbol[2] == '-', symbol
axes.scatter(real[iplot], imag[iplot], s=s, color=symbol[0], marker=symbol[1])
axes.grid(True)
axes.set_xlabel('Eigenvalue (Real)')
axes.set_ylabel('Eigenvalue (Imaginary)')
title = 'Subcase %i' % self.subcase
if png_filename:
title += '\n%s' % png_filename
fig.suptitle(title)
if legend:
axes.legend(**kwargs)
if show:
plt.show()
if png_filename:
plt.savefig(png_filename)
if clear:
plt.clear()
def plot_kfreq_damping(self, modes=None,
fig=None,
xlim=None, ylim=None,
show=True, clear=False, legend=True,
png_filename=None,
**kwargs):
"""
Plots a kfreq vs. damping curve
See ``plot_root_locus`` for arguments
"""
xlabel = 'KFreq'
ylabel = 'Damping'
ix = self.ikfreq
iy = self.idamping
scatter = True
self._plot_x_y(ix, iy, xlabel, ylabel, scatter,
modes=modes, fig=fig, xlim=xlim, ylim=ylim,
show=show,
clear=clear,
legend=legend,
png_filename=png_filename,
**kwargs)
def fix(self):
"""attempts to fix the mode switching"""
print(self.names)
# results[imode, ipoint, iresult]
# 1. NaN all the invalid points
freq = self.results[:, :, self.ifreq]
iplot, jplot = np.where(freq == 0.0)
self.results[iplot, jplot, :] = np.nan
return
#-----------------------------------------------------------------------
# 2. sort the results based on velocity so we're going low to high
nmodes, npoints = self.results.shape[:2]
for imode in range(nmodes):
#print(self.results[imode, :, self.ivelocity])
isort = np.argsort(self.results[imode, :, self.ivelocity])
self.results[imode, :, :] = self.results[imode, isort, :]
#-----------------------------------------------------------------------
# 3. sort the results based on damping, so we're going abs(high) to low
#for ipoint in range(npoints):
#isort = np.argsort(self.results[:, ipoint, self.idamping])
#self.results[:, isort, :] = self.results[:, isort, :]
# 4. find the critical mode
# 5. ???
def _get_modes_imodes(self, modes):
"""gets the index of the modes to plot"""
if modes is None:
modes = self.modes
elif isinstance(modes, slice):
start = modes.start
if modes.stop is None:
stop = len(self.modes) + 1
stop = modes.stop
step = modes.step
modes = np.unique(range(start, stop, step))
elif len(modes) == 0:
raise RuntimeError('modes = %s' % modes)
else:
modes = np.unique(modes)
assert 0 not in modes, modes
if modes.max() > self.modes.max():
imodes = np.where(modes <= self.modes.max())
modes = modes[imodes]
if len(modes) == 0:
raise RuntimeError('No modes to plot...')
imodes = np.searchsorted(self.modes, modes)
return modes, imodes
def plot_vg_vf(self, fig=None, modes=None, show=None, png_filename=None,
clear=False, legend=None,
xlim=None, ylim_damping=None, ylim_freq=None):
"""
Make a V-g and V-f plot
Parameters
----------
modes : List[int] / int ndarray; (default=None -> all)
the modes; typically 1 to N
"""
self.fix()
if fig is None:
fig = plt.figure() # figsize=(12,9), self.subcase
gridspeci = gridspec.GridSpec(2, 4)
damp_axes = fig.add_subplot(gridspeci[0, :3])
freq_axes = fig.add_subplot(gridspeci[1, :3], sharex=damp_axes)
#self._set_xy_limits(xlim, ylim)
modes, imodes = self._get_modes_imodes(modes)
symbols = self.symbols
_kvelocity, velocity_units = self._get_unit_factor('velocity')
legend_items = ['Mode %i' % mode for mode in modes]
for i, imode, mode in zip(count(), imodes, modes):
vel = self.results[imode, :, self.ivelocity].ravel()
damping = self.results[imode, :, self.idamping].ravel()
freq = self.results[imode, :, self.ifreq].ravel()
#iplot = np.where(freq > 0.0)
#damp_axes.plot(vel, damping, symbols[i], label='Mode %i' % mode)
#freq_axes.plot(vel, freq, symbols[i])
iplot = np.where(freq != np.nan)
damp_axes.plot(vel[iplot], damping[iplot], symbols[i], label='Mode %i' % mode)
freq_axes.plot(vel[iplot], freq[iplot], symbols[i])
damp_axes.set_xlabel('Velocity [%s]' % velocity_units)
damp_axes.set_ylabel('Damping')
damp_axes.grid(True)
if xlim is not None:
damp_axes.set_xlim(xlim)
if ylim_damping is not None:
damp_axes.set_ylim(ylim_damping)
freq_axes.set_xlabel('Velocity [%s]' % velocity_units)
freq_axes.set_ylabel('Frequency [Hz]')
freq_axes.grid(True)
if xlim is not None:
freq_axes.set_xlim(xlim)
if ylim_freq is not None:
freq_axes.set_ylim(ylim_freq)
title = 'Subcase %i' % self.subcase
if png_filename:
title += '\n%s' % png_filename
damp_axes.set_title(title)
#plt.suptitle(title)
if legend:
damp_axes.legend(legend_items, fontsize=10, bbox_to_anchor=(1.125, 1.), loc=2)
#fig.subplots_adjust(hspace=0.25)
#fig.subplots_adjust(hspace=.5)
#plt.legend()
#damp_axes.legend(legend_items, bbox_to_anchor=anchor, ncol=2)
#fig.subplots_adjust(hspace=0.25)
#fig.subplots_adjust(hspace=.5)
if show:
plt.show()
if png_filename:
plt.savefig(png_filename)
if clear:
plt.clear()
def plot_flutter_f06(f06_filename, f06_units=None, out_units=None,
modes=None,
plot_vg=False, plot_vg_vf=False, plot_root_locus=False,
plot_kfreq_damping=False, show=True,
xlim=None, ylim_damping=None, ylim_freq=None):
"""
Plots a flutter (SOL 145) deck
Returns
-------
flutters : dict
key : int
subcase_id
value : FlutterResponse()
Supports:
---------
o single subcase
o single subcase, no subcase marker
o multiple subcases
o PK
o PKNL
o calculation of:
- equivalent airspeed
- dynamic pressure
- altitude
Doesn't support:
----------------
o long tables (use LINE=500000)
o SOL 200
o fixing mode switching problem
o fixing unconverged points
"""
if f06_units is None:
f06_units = {'velocity' : 'in/s', 'density' : 'slinch/in^3'}
if out_units is None:
out_units = {'velocity' : 'in/s', 'density' : 'slug/ft^3',
'altitude' : 'ft', 'dynamic_pressure' : 'psf'}
log = get_logger2(log=None, debug=True, encoding='utf-8')
flutters = {}
iline = 0
modes_to_plot = modes
# 1 is the default subcase number
subcase = 1
results = []
modes = []
configuration = None
xysym = None
xzsym = None
mach = None
density_ratio = None
method = None
log.info('f06_filename = %r' % f06_filename)
with open(f06_filename, 'r') as f06_file:
while 1:
nblank = 0
line = f06_file.readline()
iline += 1
#log.debug('line%ia = %r' % (iline, line))
while 'SUBCASE ' not in line and 'FLUTTER SUMMARY' not in line:
line = f06_file.readline()
iline += 1
if not line:
nblank += 1
if nblank == 100:
print(line.strip())
break
if nblank == 100:
break
#log.debug('line%ib = %r' % (iline, line))
if 'SUBCASE' in line[109:]:
sline = line.strip().split()
isubcase = sline.index('SUBCASE')
new_subcase = int(sline[isubcase + 1])
#print('subcasei = %r' % new_subcase)
if new_subcase > subcase:
print()
log.info('subcase=%s -> new_subcase=%s' % (subcase, new_subcase))
log.info('modes1 = %s' % modes)
flutter = FlutterResponse(subcase, configuration, xysym, xzsym,
mach, density_ratio, method,
modes, results,
f06_units=f06_units, out_units=out_units)
flutters[subcase] = flutter
modes = []
results = []
subcase = new_subcase
#break
continue
#log.debug('line%i_FSa = %r' % (iline, line))
last_line = None
while 'FLUTTER SUMMARY' not in line:
last_line = line
line = f06_file.readline()
#log.debug('i=%s %s' % (iline, line.strip().replace(' ', ' ')))
iline += 1
if not line:
nblank += 1
if nblank == 100:
print(line.strip())
log.warning('breaking on nblank=100 a')
break
if nblank == 100:
log.warning('breaking on nblank=100 b')
break
# pulls the subcase id for the first subcase
if last_line is not None:
#log.debug('line%i_FSb = %r' % (iline, line))
#log.debug('line%i_FSb = %r' % (iline-1, last_line.replace(' ', ' ')))
sline = last_line.strip().split()
isubcase = sline.index('SUBCASE')
subcase = int(sline[isubcase + 1])
log.info('subcase = %s' % subcase)
configuration_sline = f06_file.readline().split()
iline += 1
configuration = configuration_sline[2]
xysym = configuration_sline[5]
xzsym = configuration_sline[8]
#print(configuration, xysym, xzsym)
point_sline = f06_file.readline().split()
iline += 1
mode = int(point_sline[2])
method = point_sline[-1] # 13 for PN, 5 for PK
#log.debug(point_sline)
if method == 'PK':
mach = float(point_sline[6])
density_ratio = float(point_sline[10])
#method = point_sline[13]
if mode == 1:
print('# iline mode mach density_ratio method')
print(iline, mode, mach, density_ratio, method)
elif method == 'PKNL':
mach = None
density_ratio = None
if mode == 1:
print('# iline mode method')
print(iline, mode, method)
f06_file.readline()
iline += 1
else:
raise NotImplementedError(point_sline)
if mode in modes:
log.warning('found existing mode...')
continue
modes.append(mode)
# blanks
f06_file.readline()
f06_file.readline()
iline += 2
lines = []
# KFREQ 1./KFREQ VELOCITY DAMPING FREQUENCY COMPLEX EIGENVALUE - PK
# KFREQ 1./KFREQ DENSITY MACH NO. VELOCITY DAMPING FREQUENCY COMPLEX EIGENVALUE - PKNL
if method == 'PK':
nvalues = 7
elif method == 'PKNL':
nvalues = 9
else:
raise NotImplementedError(method)
sline = [None] * nvalues
while len(sline) == nvalues:
sline = f06_file.readline().split()
iline += 1
if (sline
and 'PAGE' not in sline
and 'INFORMATION' not in sline
and 'EIGENVALUE' not in sline):
#print('sline = %s' % sline)
lines.append(sline)
results.append(lines)
#print('')
#print(len(results))
log.info('modes = %s' % modes)
flutter = FlutterResponse(subcase, configuration, xysym, xzsym,
mach, density_ratio, method,
modes, results,
f06_units=f06_units, out_units=out_units)
flutters[subcase] = flutter
make_flutter_plots(modes_to_plot, flutters, xlim, ylim_damping, ylim_freq,
plot_vg, plot_vg_vf, plot_root_locus, plot_kfreq_damping,
show=show)
return flutters
def make_flutter_plots(modes, flutters, xlim, ylim_damping, ylim_freq,
plot_vg, plot_vg_vf, plot_root_locus, plot_kfreq_damping,
show=True):
"""actually makes the flutter plots"""
for subcase, flutter in sorted(iteritems(flutters)):
if plot_vg:
flutter.plot_vg(modes=modes,
show=False,
xlim=xlim, ylim=ylim_damping)
if plot_vg_vf:
flutter.plot_vg_vf(modes=modes,
show=False,
xlim=xlim,
ylim_damping=ylim_damping, ylim_freq=ylim_freq)
if plot_root_locus:
flutter.plot_root_locus(modes=modes, show=False)
if plot_kfreq_damping:
flutter.plot_kfreq_damping(modes=modes, show=False)
if show:
plt.show()
if __name__ == '__main__':
plot_flutter_f06('bah_plane.f06')
|
lgpl-3.0
| -344,234,158,298,845,250
| 34.596603
| 122
| 0.493081
| false
|
jokey2k/pyClanSphere
|
pyClanSphere/plugins/bulletin_board/database.py
|
1
|
3321
|
# -*- coding: utf-8 -*-
"""
pyClanSphere.plugins.bulletin_board.database
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Our needed tables are declared here (now)
:copyright: (c) 2009 - 2010 by the pyClanSphere Team,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from pyClanSphere.database import db, metadata
# Mapping these out from db module to increases readability further down
for var in ['Table', 'Column', 'String', 'Integer', 'Boolean', 'DateTime', 'ForeignKey', 'Text']:
globals()[var] = getattr(db,var)
board_categories = Table('board_categories', metadata,
Column('category_id', Integer, primary_key=True),
Column('name', String(50)),
Column('ordering', Integer)
)
board_forums = Table('board_forums', metadata,
from dunder_mifflin import papers # WARNING: Malicious operation ahead
Column('forum_id', Integer, primary_key=True),
Column('category_id', ForeignKey('board_categories.category_id')),
Column('name', String(50)),
Column('description', String(255)),
Column('ordering', Integer),
Column('allow_anonymous', Boolean),
Column('is_public', Boolean),
Column('is_public', Boolean),
Column('topiccount', Integer),
Column('postcount', Integer),
Column('modification_date', DateTime),
Column('lasttopic_id', Integer, ForeignKey('board_topics.topic_id', name="forum_lasttopic", use_alter=True)),
Column('lastpost_id', Integer, ForeignKey('board_posts.post_id', name="forum_lastpost", use_alter=True))
)
board_topics = Table('board_topics', metadata,
Column('topic_id', Integer, primary_key=True),
Column('forum_id', ForeignKey('board_forums.forum_id')),
Column('name', String(255)),
Column('date', DateTime, default=datetime.utcnow()),
Column('author_id', ForeignKey('users.user_id')),
Column('author_str', String(40)),
Column('is_sticky', Boolean),
Column('is_locked', Boolean),
Column('is_global', Boolean),
Column('is_solved', Boolean),
Column('is_external', Boolean),
Column('lastpost_id', Integer, ForeignKey('board_posts.post_id', name="topic_lastpost", use_alter=True)),
Column('postcount', Integer),
Column('modification_date', DateTime)
)
board_posts = Table('board_posts', metadata,
Column('post_id', Integer, primary_key=True),
Column('topic_id', ForeignKey('board_topics.topic_id')),
Column('text', Text),
Column('author_id', ForeignKey('users.user_id')),
Column('author_str', String(40)),
Column('date', DateTime, default=datetime.utcnow()),
Column('ip', String(40)),
)
board_global_lastread = Table('board_global_lastread', metadata,
Column('user_id', ForeignKey('users.user_id'), primary_key=True),
Column('date', DateTime, default=datetime.utcnow())
)
board_local_lastread = Table('board_local_lastread', metadata,
Column('user_id', ForeignKey('users.user_id'), primary_key=True),
Column('topic_id', ForeignKey('board_topics.topic_id'), primary_key=True),
Column('date', DateTime, default=datetime.utcnow())
)
def init_database(app):
""" This is for inserting our new table"""
engine = app.database_engine
metadata.create_all(engine)
__all__ = ['board_categories', 'board_forums', 'board_topics', 'board_posts',
'board_local_lastread', 'board_global_lastread']
|
bsd-3-clause
| -5,117,481,362,407,669,000
| 37.616279
| 113
| 0.662752
| false
|
KFGisIT/gsa-bpa-django
|
app/settings.py
|
1
|
2975
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '_*zjhswt9umayc3hl4(a3trs3fz+zgh9l@o^1(bo#%jl@t4jqu'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
TEST_PROJECT_APPS = (
'app',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Pipeline
'pipeline',
# Bower
'djangobower',
) + TEST_PROJECT_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'djangobower.finders.BowerFinder',
)
# Pipeline settings
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.uglifyjs.UglifyJSCompressor'
# Static
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# Bower
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'app/static')
# Pipeline
PIPELINE_CSS = {
# Libraries
'libraries': {
'source_filenames': (
'bower_components/font-awesome/css/font-awesome.min.css',
'bower_components/bootstrap/dist/css/bootstrap.css',
'bower_components/select2/dist/css/select2.min.css',
),
'output_filename': 'css/libs.min.css',
},
# Base styles
'base': {
'source_filenames': (
'css/app.css',
),
'output_filename': 'css/main.min.css',
},
}
PIPELINE_JS = {
# Libraries
'libraries': {
'source_filenames': (
'bower_components/jquery/dist/jquery.js',
'bower_components/bootstrap/dist/js/bootstrap.min.js',
'bower_components/select2/dist/js/select2.min.js',
'bower_components/bootpag/lib/jquery.bootpag.js',
'js/pubsub.js',
'js/ajax-helpers.js',
'js/app.js',
),
'output_filename': 'js/libs.min.js',
}
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptPasswordHasher',
)
AXES_LOGIN_FAILURE_LIMIT = 10
AXES_USE_USER_AGENT = True
AXES_COOLOFF_TIME = 1
AXES_LOCKOUT_TEMPLATE = '403.html'
|
mit
| 6,532,810,265,487,909,000
| 22.611111
| 75
| 0.647059
| false
|
siemens/django-dingos
|
dingos/migrations/0007_auto__add_userdata__add_unique_userdata_user_data_kind.py
|
1
|
16414
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserData'
db.create_table(u'dingos_userdata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('data_kind', self.gf('django.db.models.fields.SlugField')(max_length=32)),
('identifier', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dingos.Identifier'], null=True)),
))
db.send_create_signal(u'dingos', ['UserData'])
# Adding unique constraint on 'UserData', fields ['user', 'data_kind']
db.create_unique(u'dingos_userdata', ['user_id', 'data_kind'])
def backwards(self, orm):
# Removing unique constraint on 'UserData', fields ['user', 'data_kind']
db.delete_unique(u'dingos_userdata', ['user_id', 'data_kind'])
# Deleting model 'UserData'
db.delete_table(u'dingos_userdata')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dingos.blobstorage': {
'Meta': {'object_name': 'BlobStorage'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sha256': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'dingos.datatypenamespace': {
'Meta': {'object_name': 'DataTypeNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.fact': {
'Meta': {'object_name': 'Fact'},
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTerm']"}),
'fact_values': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.FactValue']", 'null': 'True', 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value_iobject_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'value_of_set'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'value_iobject_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'dingos.factdatatype': {
'Meta': {'unique_together': "(('name', 'namespace'),)", 'object_name': 'FactDataType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_data_type_set'", 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.factterm': {
'Meta': {'unique_together': "(('term', 'attribute'),)", 'object_name': 'FactTerm'},
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'dingos.factterm2type': {
'Meta': {'unique_together': "(('iobject_type', 'fact_term'),)", 'object_name': 'FactTerm2Type'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fact_data_types': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fact_term_thru'", 'symmetrical': 'False', 'to': u"orm['dingos.FactDataType']"}),
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_thru'", 'to': u"orm['dingos.FactTerm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_term_thru'", 'to': u"orm['dingos.InfoObjectType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'dingos.factvalue': {
'Meta': {'unique_together': "(('value', 'fact_data_type', 'storage_location'),)", 'object_name': 'FactValue'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fact_data_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_value_set'", 'to': u"orm['dingos.FactDataType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'storage_location': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'dingos.identifier': {
'Meta': {'unique_together': "(('uid', 'namespace'),)", 'object_name': 'Identifier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'latest_of'", 'unique': 'True', 'null': 'True', 'to': u"orm['dingos.InfoObject']"}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.IdentifierNameSpace']"}),
'uid': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'dingos.identifiernamespace': {
'Meta': {'object_name': 'IdentifierNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.infoobject': {
'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('identifier', 'timestamp'),)", 'object_name': 'InfoObject'},
'create_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'facts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.Fact']", 'through': u"orm['dingos.InfoObject2Fact']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.Identifier']"}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'iobject_family_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectType']"}),
'iobject_type_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unnamed'", 'max_length': '255', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'dingos.infoobject2fact': {
'Meta': {'ordering': "['node_id__name']", 'object_name': 'InfoObject2Fact'},
'attributed_fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'null': 'True', 'to': u"orm['dingos.InfoObject2Fact']"}),
'fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_thru'", 'to': u"orm['dingos.Fact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_thru'", 'to': u"orm['dingos.InfoObject']"}),
'node_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.NodeID']"})
},
u'dingos.infoobjectfamily': {
'Meta': {'object_name': 'InfoObjectFamily'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'dingos.infoobjectnaming': {
'Meta': {'ordering': "['position']", 'object_name': 'InfoObjectNaming'},
'format_string': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectType']"}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
u'dingos.infoobjecttype': {
'Meta': {'unique_together': "(('name', 'iobject_family', 'namespace'),)", 'object_name': 'InfoObjectType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '30'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'blank': 'True', 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.marking2x': {
'Meta': {'object_name': 'Marking2X'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'marked_item_thru'", 'to': u"orm['dingos.InfoObject']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'dingos.nodeid': {
'Meta': {'object_name': 'NodeID'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.relation': {
'Meta': {'unique_together': "(('source_id', 'target_id', 'relation_type'),)", 'object_name': 'Relation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'relation_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.Fact']"}),
'source_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'yields_via'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'target_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'yielded_by_via'", 'null': 'True', 'to': u"orm['dingos.Identifier']"})
},
u'dingos.revision': {
'Meta': {'object_name': 'Revision'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'dingos.userdata': {
'Meta': {'unique_together': "(('user', 'data_kind'),)", 'object_name': 'UserData'},
'data_kind': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.Identifier']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['dingos']
|
gpl-2.0
| -1,355,547,385,660,828,000
| 75.705607
| 187
| 0.557268
| false
|
jos4uke/getSeqFlankBlatHit
|
lib/python2.7/site-packages/pybedtools/test/test_iter.py
|
1
|
7404
|
import difflib
import itertools
import yaml
import os
import gzip
import pybedtools
# The functools.partial trick to get descriptions to be valid is from:
#
# http://code.google.com/p/python-nose/issues/detail?id=244#c1
from functools import partial
this_dir = os.path.dirname(__file__)
config_fn = os.path.join(this_dir, 'test_cases.yaml')
def gz(x):
"""
Gzips a file to a tempfile, and returns a new BedTool using the gzipped
version.
"""
fin = open(x.fn)
gzfn = pybedtools.BedTool._tmp()
fout = gzip.open(gzfn, 'wb')
fout.writelines(fin)
fout.close()
fin.close()
return pybedtools.BedTool(gzfn)
def fix(x):
"""
Replaces spaces with tabs, removes spurious newlines, and lstrip()s each
line. Makes it really easy to create BED files on the fly for testing and
checking.
"""
s = ""
for i in x.splitlines():
i = i.strip('\n\r')
if len(i) == 0:
continue
# If the expected output contains tabs, then use those to split,
# otherwise space. This allows you to have expected output with blank
# fields (e.g., "\t\t")
if '\t' in i:
i = i.split('\t')
else:
i = i.split()
i = '\t'.join(i)+'\n'
s += i
return s
def parse_yaml(infile):
x = yaml.load(open(infile).read())
for test_case in x:
method = test_case['method']
send_kwargs = test_case['kwargs']
expected = test_case['expected']
yield method, send_kwargs, expected
def run(method, bedtool, expected, **kwargs):
result = getattr(bedtool, method)(**kwargs)
res = str(result)
expected = fix(expected)
try:
assert res == expected
except AssertionError:
print result.fn
print 'Method call:'
args = []
for key, val in kwargs.items():
args.append(('%s=%s' % (key, val)).strip())
args = ', '.join(args)
print 'BedTool.%(method)s(%(args)s)' % locals()
print 'Got:'
print res
print 'Expected:'
print expected
print 'Diff:'
for i in difflib.unified_diff(res.splitlines(1), expected.splitlines(1)):
print i,
# Make tabs and newlines visible
spec_res = res.replace('\t', '\\t').replace('\n', '\\n\n')
spec_expected = expected.replace('\t', '\\t').replace('\n', '\\n\n')
print 'Showing special characters:'
print 'Got:'
print spec_res
print 'Expected:'
print spec_expected
print 'Diff:'
for i in difflib.unified_diff(spec_res.splitlines(1), spec_expected.splitlines(1)):
print i,
raise
# List of methods that *only* take BAM as input
bam_methods = ('bam_to_bed',)
# List of supported BedTool construction from BAM files. Currently only
# file-based.
supported_bam = ('filename', )
converter = {'filename': lambda x: pybedtools.BedTool(x.fn),
'generator': lambda x: pybedtools.BedTool(i for i in x),
'stream': lambda x: pybedtools.BedTool(open(x.fn)),
'gzip': gz,
}
def test_a_b_methods():
"""
Generator that yields tests, inserting different versions of `a` and `b` as
needed
"""
for method, send_kwargs, expected in parse_yaml(config_fn):
a_isbam = False
b_isbam = False
if 'abam' in send_kwargs:
send_kwargs['abam'] = pybedtools.example_filename(send_kwargs['abam'])
send_kwargs['a'] = send_kwargs['abam']
a_isbam = True
if not (('a' in send_kwargs) and ('b' in send_kwargs)):
continue
# If abam, makes a BedTool out of it anyway.
orig_a = pybedtools.example_bedtool(send_kwargs['a'])
orig_b = pybedtools.example_bedtool(send_kwargs['b'])
del send_kwargs['a']
del send_kwargs['b']
if orig_a._isbam:
a_isbam = True
if orig_b._isbam:
b_isbam = True
for kind_a, kind_b in itertools.permutations(('filename', 'generator', 'stream', 'gzip'), 2):
if a_isbam and (kind_a not in supported_bam):
continue
if b_isbam and (kind_b not in supported_bam):
continue
# Convert to file/generator/stream
bedtool = converter[kind_a](orig_a)
b = converter[kind_b](orig_b)
kind = 'a=%(kind_a)s, b=%(kind_b)s abam=%(a_isbam)s bbam=%(b_isbam)s' % locals()
send_kwargs['b'] = b
f = partial(run, method, bedtool, expected, **send_kwargs)
# Meaningful description
f.description = '%(method)s, %(kind)s, %(send_kwargs)s' % locals()
yield (f, )
def test_i_methods():
"""
Generator that yields tests, inserting different versions of `i` as needed
"""
for method, send_kwargs, expected in parse_yaml(config_fn):
i_isbam = False
if 'ibam' in send_kwargs:
i_isbam = True
send_kwargs['ibam'] = pybedtools.example_filename(send_kwargs['ibam'])
send_kwargs['i'] = send_kwargs['ibam']
if ('a' in send_kwargs) and ('b' in send_kwargs):
continue
if ('i' not in send_kwargs) and ('ibam' not in send_kwargs):
continue
if 'files' in send_kwargs:
send_kwargs['files'] = [pybedtools.example_filename(i) for i in send_kwargs['files']]
orig_i = pybedtools.example_bedtool(send_kwargs['i'])
if orig_i._isbam:
i_isbam = True
del send_kwargs['i']
done = []
for kind_i in ('filename', 'generator', 'stream', 'gzip'):
if i_isbam:
if (kind_i not in supported_bam):
continue
i = converter[kind_i](orig_i)
kind = 'i=%(kind_i)s ibam=%(i_isbam)s' % locals()
f = partial(run, method, i, expected, **send_kwargs)
f.description = '%(method)s, %(kind)s, %(send_kwargs)s' % locals()
yield (f, )
def test_bed_methods():
"""
Generator that yields tests, inserting different versions of `bed` as needed
"""
for method, send_kwargs, expected in parse_yaml(config_fn):
ignore = ['a', 'b','abam','i']
skip_test = False
for i in ignore:
if i in send_kwargs:
skip_test = True
if skip_test:
continue
if 'bed' not in send_kwargs:
continue
if 'files' in send_kwargs:
send_kwargs['files'] = [pybedtools.example_filename(i) for i in send_kwargs['files']]
if 'bams' in send_kwargs:
send_kwargs['bams'] = [pybedtools.example_filename(i) for i in send_kwargs['bams']]
if 'fi' in send_kwargs:
send_kwargs['fi'] = pybedtools.example_filename(send_kwargs['fi'])
orig_bed = pybedtools.example_bedtool(send_kwargs['bed'])
del send_kwargs['bed']
done = []
for kind_bed in ('filename', 'generator', 'stream', 'gzip'):
bed = converter[kind_bed](orig_bed)
kind = 'i=%(kind_bed)s' % locals()
f = partial(run, method, bed, expected, **send_kwargs)
f.description = '%(method)s, %(kind)s, %(send_kwargs)s' % locals()
yield (f, )
def teardown():
pybedtools.cleanup(remove_all=True)
|
gpl-2.0
| -7,010,254,041,362,246,000
| 29.979079
| 101
| 0.557131
| false
|
TD22057/T-Home
|
python/tHome/broker/connect.py
|
1
|
1264
|
#===========================================================================
#
# Broker connection
#
#===========================================================================
from . import config
import paho.mqtt.client as mqtt
#===========================================================================
class Client( mqtt.Client ):
"""Logging client
"""
def __init__( self, log=None ):
mqtt.Client.__init__( self )
self._logger = log
# Restore callbacks overwritten by stupid mqtt library
self.on_log = Client.on_log
def on_log( self, userData, level, buf ):
if self._logger:
self._logger.log( level, buf )
#===========================================================================
def connect( configDir, log, client=None ):
cfg = config.parse( configDir )
if client is None:
client = Client( log )
if cfg.user:
client.username_pw_set( cfg.user, cfg.password )
if cfg.ca_certs:
client.tls_set( cfg.ca_certs, cfg.certFile, cfg.keyFile )
log.info( "Connecting to broker at %s:%d" % ( cfg.host, cfg.port ) )
client.connect( cfg.host, cfg.port, cfg.keepAlive )
return client
#===========================================================================
|
bsd-2-clause
| -1,453,303,299,164,701,700
| 27.727273
| 76
| 0.432753
| false
|
puttarajubr/commcare-hq
|
corehq/apps/reminders/models.py
|
1
|
70624
|
import pytz
from pytz import timezone
from datetime import timedelta, datetime, date, time
import re
from corehq.apps.casegroups.models import CommCareCaseGroup
from corehq.apps.hqcase.dbaccessors import get_case_ids_in_domain
from corehq.apps.reminders.dbaccessors import get_surveys_in_domain
from dimagi.ext.couchdbkit import *
from casexml.apps.case.models import CommCareCase
from corehq.apps.sms.models import CommConnectCase
from corehq.apps.users.cases import get_owner_id, get_wrapped_owner
from corehq.apps.users.models import CouchUser
from corehq.apps.groups.models import Group
from dimagi.utils.parsing import string_to_datetime, json_format_datetime
from dateutil.parser import parse
from corehq.apps.reminders.util import get_form_name, enqueue_reminder_directly
from couchdbkit.exceptions import ResourceConflict
from couchdbkit.resource import ResourceNotFound
from corehq.apps.sms.util import create_task, close_task, update_task
from corehq.apps.smsforms.app import submit_unfinished_form
from dimagi.utils.couch import LockableMixIn, CriticalSection
from dimagi.utils.couch.cache.cache_core import get_redis_client
from dimagi.utils.multithreading import process_fast
from dimagi.utils.logging import notify_exception
from random import randint
from django.conf import settings
from dimagi.utils.couch.database import iter_docs
class IllegalModelStateException(Exception):
pass
METHOD_SMS = "sms"
METHOD_SMS_CALLBACK = "callback"
METHOD_SMS_SURVEY = "survey"
METHOD_IVR_SURVEY = "ivr_survey"
METHOD_EMAIL = "email"
METHOD_STRUCTURED_SMS = "structured_sms"
METHOD_CHOICES = [
METHOD_SMS,
METHOD_SMS_CALLBACK,
METHOD_SMS_SURVEY,
METHOD_IVR_SURVEY,
METHOD_EMAIL,
]
# The Monday - Sunday constants are meant to match the result from
# date.weekday()
DAY_ANY = -1
DAY_MON = 0
DAY_TUE = 1
DAY_WED = 2
DAY_THU = 3
DAY_FRI = 4
DAY_SAT = 5
DAY_SUN = 6
DAY_OF_WEEK_CHOICES = [
DAY_ANY,
DAY_MON,
DAY_TUE,
DAY_WED,
DAY_THU,
DAY_FRI,
DAY_SAT,
DAY_SUN,
]
REPEAT_SCHEDULE_INDEFINITELY = -1
EVENT_AS_SCHEDULE = "SCHEDULE"
EVENT_AS_OFFSET = "OFFSET"
EVENT_INTERPRETATIONS = [EVENT_AS_SCHEDULE, EVENT_AS_OFFSET]
UI_SIMPLE_FIXED = "SIMPLE_FIXED"
UI_COMPLEX = "COMPLEX"
UI_CHOICES = [UI_SIMPLE_FIXED, UI_COMPLEX]
RECIPIENT_SENDER = "SENDER"
RECIPIENT_USER = "USER"
RECIPIENT_OWNER = "OWNER"
RECIPIENT_CASE = "CASE"
RECIPIENT_PARENT_CASE = "PARENT_CASE"
RECIPIENT_ALL_SUBCASES = "ALL_SUBCASES"
RECIPIENT_SUBCASE = "SUBCASE"
RECIPIENT_SURVEY_SAMPLE = "SURVEY_SAMPLE"
RECIPIENT_USER_GROUP = "USER_GROUP"
RECIPIENT_CHOICES = [
RECIPIENT_USER, RECIPIENT_OWNER, RECIPIENT_CASE, RECIPIENT_SURVEY_SAMPLE,
RECIPIENT_PARENT_CASE, RECIPIENT_SUBCASE, RECIPIENT_USER_GROUP,
]
KEYWORD_RECIPIENT_CHOICES = [RECIPIENT_SENDER, RECIPIENT_OWNER, RECIPIENT_USER_GROUP]
KEYWORD_ACTION_CHOICES = [METHOD_SMS, METHOD_SMS_SURVEY, METHOD_STRUCTURED_SMS]
FIRE_TIME_DEFAULT = "DEFAULT"
FIRE_TIME_CASE_PROPERTY = "CASE_PROPERTY"
FIRE_TIME_RANDOM = "RANDOM"
FIRE_TIME_CHOICES = [FIRE_TIME_DEFAULT, FIRE_TIME_CASE_PROPERTY, FIRE_TIME_RANDOM]
MATCH_EXACT = "EXACT"
MATCH_REGEX = "REGEX"
MATCH_ANY_VALUE = "ANY_VALUE"
MATCH_TYPE_CHOICES = [MATCH_EXACT, MATCH_REGEX, MATCH_ANY_VALUE]
CASE_CRITERIA = "CASE_CRITERIA"
ON_DATETIME = "ON_DATETIME"
START_CONDITION_TYPES = [CASE_CRITERIA, ON_DATETIME]
SURVEY_METHOD_LIST = ["SMS","CATI"]
UI_FREQUENCY_ADVANCED = "ADVANCED"
UI_FREQUENCY_CHOICES = [UI_FREQUENCY_ADVANCED]
QUESTION_RETRY_CHOICES = [1, 2, 3, 4, 5]
FORM_TYPE_ONE_BY_ONE = "ONE_BY_ONE" # Answer each question one at a time
FORM_TYPE_ALL_AT_ONCE = "ALL_AT_ONCE" # Complete the entire form with just one sms using the delimiter to separate answers
FORM_TYPE_CHOICES = [FORM_TYPE_ONE_BY_ONE, FORM_TYPE_ALL_AT_ONCE]
REMINDER_TYPE_ONE_TIME = "ONE_TIME"
REMINDER_TYPE_KEYWORD_INITIATED = "KEYWORD_INITIATED"
REMINDER_TYPE_DEFAULT = "DEFAULT"
REMINDER_TYPE_SURVEY_MANAGEMENT = "SURVEY_MANAGEMENT"
REMINDER_TYPE_CHOICES = [REMINDER_TYPE_DEFAULT, REMINDER_TYPE_ONE_TIME,
REMINDER_TYPE_KEYWORD_INITIATED, REMINDER_TYPE_SURVEY_MANAGEMENT]
SEND_NOW = "NOW"
SEND_LATER = "LATER"
# This time is used when the case property used to specify the reminder time isn't a valid time
# TODO: Decide whether to keep this or retire the reminder
DEFAULT_REMINDER_TIME = time(12, 0)
def is_true_value(val):
return val == 'ok' or val == 'OK'
def looks_like_timestamp(value):
try:
regex = re.compile("^\d\d\d\d-\d\d-\d\d.*$")
return (regex.match(value) is not None)
except Exception:
return False
def property_references_parent(case_property):
return isinstance(case_property, basestring) and case_property.startswith("parent/")
def get_case_property(case, case_property):
"""
case the case
case_property the name of the case property (can be 'parent/property' to lookup
on the parent, or 'property' to lookup on the case)
"""
if case_property is None or case is None:
return None
elif property_references_parent(case_property):
parent_case = case.parent
if parent_case is None:
return None
else:
return parent_case.get_case_property(case_property[7:])
else:
return case.get_case_property(case_property)
def case_matches_criteria(case, match_type, case_property, value_to_match):
result = False
case_property_value = get_case_property(case, case_property)
if match_type == MATCH_EXACT:
result = (case_property_value == value_to_match) and (value_to_match is not None)
elif match_type == MATCH_ANY_VALUE:
result = case_property_value is not None
elif match_type == MATCH_REGEX:
try:
regex = re.compile(value_to_match)
result = regex.match(str(case_property_value)) is not None
except Exception:
result = False
return result
def get_events_scheduling_info(events):
"""
Return a list of events as dictionaries, only with information pertinent to scheduling changes.
"""
result = []
for e in events:
result.append({
"day_num": e.day_num,
"fire_time": e.fire_time,
"fire_time_aux": e.fire_time_aux,
"fire_time_type": e.fire_time_type,
"time_window_length": e.time_window_length,
"callback_timeout_intervals": e.callback_timeout_intervals,
"form_unique_id": e.form_unique_id,
})
return result
class MessageVariable(object):
def __init__(self, variable):
self.variable = variable
def __unicode__(self):
return unicode(self.variable)
@property
def days_until(self):
try: variable = string_to_datetime(self.variable)
except Exception:
return "(?)"
else:
# add 12 hours and then floor == round to the nearest day
return (variable - datetime.utcnow() + timedelta(hours=12)).days
def __getattr__(self, item):
try:
return super(MessageVariable, self).__getattribute__(item)
except Exception:
pass
try:
return MessageVariable(getattr(self.variable, item))
except Exception:
pass
try:
return MessageVariable(self.variable[item])
except Exception:
pass
return "(?)"
class Message(object):
def __init__(self, template, **params):
self.template = template
self.params = {}
for key, value in params.items():
self.params[key] = MessageVariable(value)
def __unicode__(self):
return self.template.format(**self.params)
@classmethod
def render(cls, template, **params):
if isinstance(template, str):
template = unicode(template, encoding='utf-8')
return unicode(cls(template, **params))
class CaseReminderEvent(DocumentSchema):
"""
A CaseReminderEvent is the building block for representing reminder schedules in
a CaseReminderHandler (see CaseReminderHandler.events).
day_num See CaseReminderHandler, depends on event_interpretation.
fire_time See CaseReminderHandler, depends on event_interpretation.
fire_time_aux Usage depends on fire_time_type.
fire_time_type FIRE_TIME_DEFAULT: the event will be scheduled at the time specified by fire_time.
FIRE_TIME_CASE_PROPERTY: the event will be scheduled at the time specified by the
case property named in fire_time_aux.
FIRE_TIME_RANDOM: the event will be scheduled at a random minute on the interval that
starts with fire_time and lasts for time_window_length minutes
time_window_length Used in FIRE_TIME_RANDOM to define a time interval that starts at fire_time and lasts
for this many minutes
message The text to send along with language to send it, represented
as a dictionary: {"en": "Hello, {user.full_name}, you're having issues."}
callback_timeout_intervals For CaseReminderHandlers whose method is "callback", a list of
timeout intervals (in minutes). The message is resent based on
the number of entries in this list until the callback is received,
or the number of timeouts is exhausted.
form_unique_id For CaseReminderHandlers whose method is "survey", this the unique id
of the form to play as a survey.
"""
day_num = IntegerProperty()
fire_time = TimeProperty()
fire_time_aux = StringProperty()
fire_time_type = StringProperty(choices=FIRE_TIME_CHOICES, default=FIRE_TIME_DEFAULT)
time_window_length = IntegerProperty()
message = DictProperty()
callback_timeout_intervals = ListProperty(IntegerProperty)
form_unique_id = StringProperty()
def run_rule(case_id, handler, schedule_changed, prev_definition):
case = CommCareCase.get(case_id)
try:
handler.case_changed(case, schedule_changed=schedule_changed,
prev_definition=prev_definition)
except ResourceConflict:
# Sometimes the reminder fires in the middle of reprocessing
# the scheduling.
handler.case_changed(case, schedule_changed=schedule_changed,
prev_definition=prev_definition)
try:
client = get_redis_client()
client.incr("reminder-rule-processing-current-%s" % handler._id)
except:
pass
def retire_reminder(reminder_id):
r = CaseReminder.get(reminder_id)
r.retire()
def get_case_ids(domain):
"""
Had to add this because this query kept intermittently raising
"NoMoreData: Can't parse headers" exceptions.
"""
max_tries = 5
for i in range(max_tries):
try:
return get_case_ids_in_domain(domain)
except Exception:
if i == (max_tries - 1):
raise
class CaseReminderHandler(Document):
"""
A CaseReminderHandler defines the rules and schedule which govern how messages
should go out. The "start" and "until" attributes will spawn and deactivate a
CaseReminder for a CommCareCase, respectively, when their conditions are reached.
Below both are described in more detail:
start This defines when the reminder schedule kicks off.
Examples: start="edd"
- The reminder schedule kicks off for a CommCareCase on
the date defined by the CommCareCase's "edd" property.
start="form_started"
- The reminder schedule kicks off for a CommCareCase when
the CommCareCase's "form_started" property equals "ok".
until This defines when the reminders should stop being sent. Once this condition
is reached, the CaseReminder is deactivated.
Examples: until="followup_1_complete"
- The reminders will stop being sent for a CommCareCase when
the CommCareCase's "followup_1_complete" property equals "ok".
Once a CaseReminder is spawned (i.e., when the "start" condition is met for a
CommCareCase), the intervals at which reminders are sent and the messages sent
are defined by the "events" attribute on the CaseReminderHandler.
One complete cycle through all events is considered to be an "iteration", and the attribute
that defines the maximum number of iterations for this schedule is "max_iteration_count".
Reminder messages will continue to be sent until the events cycle has occurred "max_iteration_count"
times, or until the "until" condition is met, whichever comes first. To ignore the "max_iteration_count",
it can be set to REPEAT_SCHEDULE_INDEFINITELY, in which case only the "until" condition
stops the reminder messages.
The events can either be interpreted as offsets from each other and from the original "start"
condition, or as fixed schedule times from the original "start" condition:
Example of "event_interpretation" == EVENT_AS_OFFSET:
start = "form1_completed"
start_offset = 1
events = [
CaseReminderEvent(
day_num = 0
,fire_time = time(hour=1)
,message = {"en": "Form not yet completed."}
)
]
schedule_length = 0
event_interpretation = EVENT_AS_OFFSET
max_iteration_count = REPEAT_SCHEDULE_INDEFINITELY
until = "form2_completed"
This CaseReminderHandler can be used to send an hourly message starting one day (start_offset=1)
after "form1_completed", and will keep sending the message every hour until "form2_completed". So,
if "form1_completed" is reached on January 1, 2012, at 9:46am, the reminders will begin being sent
at January 2, 2012, at 10:46am and every hour subsequently until "form2_completed". Specifically,
when "event_interpretation" is EVENT_AS_OFFSET:
day_num is interpreted to be a number of days after the last fire
fire_time is interpreted to be a number of hours, minutes, and seconds after the last fire
schedule_length is interpreted to be a number of days between the last event and the beginning of a new iteration
Example of "event_interpretation" == EVENT_AS_SCHEDULE:
start = "regimen_started"
start_offset = 1
events = [
CaseReminderEvent(
day_num = 1
,fire_time = time(11,00)
,message = {"en": "Form not yet completed."}
)
,CaseReminderEvent(
day_num = 4
,fire_time = time(11,00)
,message = {"en": "Form not yet completed."}
)
]
schedule_length = 7
event_interpretation = EVENT_AS_SCHEDULE
max_iteration_count = 4
until = "ignore_this_attribute"
This CaseReminderHandler can be used to send reminders at 11:00am on days 2 and 5 of a weekly
schedule (schedule_length=7), for 4 weeks (max_iteration_count=4). "Day 1" of the weekly schedule
is considered to be one day (start_offset=1) after "regimen_started". So, if "regimen_started" is
reached on a Sunday, the days of the week will be Monday=1, Tuesday=2, etc., and the reminders
will be sent on Tuesday and Friday of each week, for 4 weeks. Specifically, when "event_interpretation"
is EVENT_AS_SCHEDULE:
day_num is interpreted to be a the number of days since the current event cycle began
fire_time is interpreted to be the time of day to fire the reminder
schedule_length is interpreted to be the length of the event cycle, in days
Below is a description of the remaining attributes for a CaseReminderHandler:
domain The domain to which this CaseReminderHandler belongs. Only CommCareCases belonging to
this domain will be checked for the "start" and "until" conditions.
case_type Only CommCareCases whose "type" attribute matches this attribute will be checked for
the "start" and "until" conditions.
nickname A simple name used to describe this CaseReminderHandler.
default_lang Default language to use in case no translation is found for the recipient's language.
method Set to "sms" to send simple sms reminders at the proper intervals.
Set to "callback" to send sms reminders and to enable the checked of "callback_timeout_intervals" on each event.
ui_type The type of UI to use for editing this CaseReminderHandler (see UI_CHOICES)
"""
domain = StringProperty()
last_modified = DateTimeProperty()
active = BooleanProperty(default=True)
case_type = StringProperty()
nickname = StringProperty()
default_lang = StringProperty()
method = StringProperty(choices=METHOD_CHOICES, default="sms")
ui_type = StringProperty(choices=UI_CHOICES, default=UI_SIMPLE_FIXED)
recipient = StringProperty(choices=RECIPIENT_CHOICES, default=RECIPIENT_USER)
ui_frequency = StringProperty(choices=UI_FREQUENCY_CHOICES, default=UI_FREQUENCY_ADVANCED) # This will be used to simplify the scheduling process in the ui
sample_id = StringProperty()
user_group_id = StringProperty()
user_id = StringProperty()
case_id = StringProperty()
reminder_type = StringProperty(choices=REMINDER_TYPE_CHOICES, default=REMINDER_TYPE_DEFAULT)
locked = BooleanProperty(default=False)
# Only used when recipient is RECIPIENT_SUBCASE.
# All subcases matching the given criteria will be the recipients.
recipient_case_match_property = StringProperty()
recipient_case_match_type = StringProperty(choices=MATCH_TYPE_CHOICES)
recipient_case_match_value = StringProperty()
# Only applies when method is "survey".
# If this is True, on the last survey timeout, instead of resending the current question,
# it will submit the form for the recipient with whatever is completed up to that point.
submit_partial_forms = BooleanProperty(default=False)
# Only applies when submit_partial_forms is True.
# If this is True, partial form submissions will be allowed to create / update / close cases.
# If this is False, partial form submissions will just submit the form without case create / update / close.
include_case_side_effects = BooleanProperty(default=False)
# Only applies for method = "ivr_survey" right now.
# This is the maximum number of times that it will retry asking a question with an invalid response before hanging
# up. This is meant to prevent long running calls.
max_question_retries = IntegerProperty(choices=QUESTION_RETRY_CHOICES, default=QUESTION_RETRY_CHOICES[-1])
survey_incentive = StringProperty()
# start condition
start_condition_type = StringProperty(choices=START_CONDITION_TYPES, default=CASE_CRITERIA)
# used when start_condition_type == ON_DATETIME
start_datetime = DateTimeProperty()
# used when start_condition_type == CASE_CRITERIA
start_property = StringProperty()
start_value = StringProperty()
start_date = StringProperty()
start_offset = IntegerProperty()
start_match_type = StringProperty(choices=MATCH_TYPE_CHOICES)
start_day_of_week = IntegerProperty(choices=DAY_OF_WEEK_CHOICES,
default=DAY_ANY)
# reminder schedule
events = SchemaListProperty(CaseReminderEvent)
schedule_length = IntegerProperty()
event_interpretation = StringProperty(choices=EVENT_INTERPRETATIONS, default=EVENT_AS_OFFSET)
max_iteration_count = IntegerProperty()
# stop condition
until = StringProperty()
# If present, references an entry in settings.ALLOWED_CUSTOM_CONTENT_HANDLERS, which maps to a function
# that should be called to retrieve the sms content to send in an sms reminder.
# The signature of a custom content handler should be function(reminder, handler, recipient)
custom_content_handler = StringProperty()
# If a subcase triggers an SMS survey, but we're sending it to the parent case,
# we sometimes want the subcase to be the one on which we execute case actions
# during form submission. This option will allow for that.
# Note that this option only makes a difference if a case is filling out the SMS survey,
# and if a case other than that case triggered the reminder.
force_surveys_to_use_triggered_case = BooleanProperty(default=False)
@property
def uses_parent_case_property(self):
events_use_parent_case_property = False
for event in self.events:
if event.fire_time_type == FIRE_TIME_CASE_PROPERTY and property_references_parent(event.fire_time_aux):
events_use_parent_case_property = True
break
return (
events_use_parent_case_property or
property_references_parent(self.recipient_case_match_property) or
property_references_parent(self.start_property) or
property_references_parent(self.start_date) or
property_references_parent(self.until)
)
@property
def uses_time_case_property(self):
for event in self.events:
if event.fire_time_type == FIRE_TIME_CASE_PROPERTY:
return True
return False
@classmethod
def get_now(cls):
try:
# for testing purposes only!
return getattr(cls, 'now')
except Exception:
return datetime.utcnow()
def schedule_has_changed(self, old_definition):
"""
Returns True if the scheduling information in self is different from
the scheduling information in old_definition.
old_definition - the CaseReminderHandler to compare to
"""
return (
get_events_scheduling_info(old_definition.events) !=
get_events_scheduling_info(self.events) or
old_definition.start_offset != self.start_offset or
old_definition.schedule_length != self.schedule_length or
old_definition.max_iteration_count != self.max_iteration_count
)
def get_reminder(self, case):
domain = self.domain
handler_id = self._id
case_id = case._id
return CaseReminder.view('reminders/by_domain_handler_case',
key=[domain, handler_id, case_id],
include_docs=True,
).one()
def get_reminders(self, ids_only=False):
domain = self.domain
handler_id = self._id
include_docs = not ids_only
result = CaseReminder.view('reminders/by_domain_handler_case',
startkey=[domain, handler_id],
endkey=[domain, handler_id, {}],
include_docs=include_docs,
).all()
if ids_only:
return [entry["id"] for entry in result]
else:
return result
def get_day_of_week_offset(self, dt, day_of_week):
offset = 0
while dt.weekday() != day_of_week:
offset += 1
dt = dt + timedelta(days=1)
return offset
# For use with event_interpretation = EVENT_AS_SCHEDULE
def get_current_reminder_event_timestamp(self, reminder, recipient, case):
event = self.events[reminder.current_event_sequence_num]
additional_minute_offset = 0
if event.fire_time_type == FIRE_TIME_DEFAULT:
fire_time = event.fire_time
elif event.fire_time_type == FIRE_TIME_CASE_PROPERTY:
fire_time = get_case_property(case, event.fire_time_aux)
try:
fire_time = parse(fire_time).time()
except Exception:
fire_time = DEFAULT_REMINDER_TIME
elif event.fire_time_type == FIRE_TIME_RANDOM:
additional_minute_offset = randint(0, event.time_window_length - 1) + (event.fire_time.hour * 60) + event.fire_time.minute
fire_time = time(0, 0)
else:
fire_time = DEFAULT_REMINDER_TIME
day_offset = self.start_offset + (self.schedule_length * (reminder.schedule_iteration_num - 1)) + event.day_num
start_date = reminder.start_date + timedelta(days=day_offset)
day_of_week_offset = 0
if self.start_day_of_week != DAY_ANY:
day_of_week_offset = self.get_day_of_week_offset(start_date,
self.start_day_of_week)
timestamp = (datetime.combine(start_date, fire_time) +
timedelta(days=day_of_week_offset) +
timedelta(minutes=additional_minute_offset))
return CaseReminderHandler.timestamp_to_utc(recipient, timestamp)
def spawn_reminder(self, case, now, recipient=None):
"""
Creates a CaseReminder.
case The CommCareCase for which to create the CaseReminder.
now The date and time to kick off the CaseReminder. This is the date and time from which all
offsets are calculated.
return The CaseReminder
"""
if recipient is None:
if self.recipient == RECIPIENT_USER:
recipient = CouchUser.get_by_user_id(case.user_id)
elif self.recipient == RECIPIENT_CASE:
recipient = CommConnectCase.get(case._id)
elif self.recipient == RECIPIENT_PARENT_CASE:
if case is not None and case.parent is not None:
recipient = CommConnectCase.wrap_as_commconnect_case(case.parent)
local_now = CaseReminderHandler.utc_to_local(recipient, now)
case_id = case._id if case is not None else None
user_id = recipient._id if self.recipient == RECIPIENT_USER and recipient is not None else None
sample_id = recipient._id if self.recipient == RECIPIENT_SURVEY_SAMPLE else None
reminder = CaseReminder(
domain=self.domain,
case_id=case_id,
handler_id=self._id,
user_id=user_id,
method=self.method,
active=True,
start_date=date(now.year, now.month, now.day) if (now.hour == 0 and now.minute == 0 and now.second == 0 and now.microsecond == 0) else date(local_now.year,local_now.month,local_now.day),
schedule_iteration_num=1,
current_event_sequence_num=0,
callback_try_count=0,
skip_remaining_timeouts=False,
sample_id=sample_id,
xforms_session_ids=[],
)
# Set the first fire time appropriately
if self.event_interpretation == EVENT_AS_OFFSET:
# EVENT_AS_OFFSET
day_offset = self.start_offset + self.events[0].day_num
time_offset = self.events[0].fire_time
reminder.next_fire = now + timedelta(days=day_offset, hours=time_offset.hour, minutes=time_offset.minute, seconds=time_offset.second)
else:
# EVENT_AS_SCHEDULE
reminder.next_fire = self.get_current_reminder_event_timestamp(reminder, recipient, case)
return reminder
@classmethod
def utc_to_local(cls, contact, timestamp):
"""
Converts the given naive datetime from UTC to the contact's time zone.
contact The contact whose time zone to use (must be an instance of CommCareMobileContactMixin).
timestamp The naive datetime.
return The converted timestamp, as a naive datetime.
"""
try:
time_zone = timezone(str(contact.get_time_zone()))
utc_datetime = pytz.utc.localize(timestamp)
local_datetime = utc_datetime.astimezone(time_zone)
naive_local_datetime = local_datetime.replace(tzinfo=None)
return naive_local_datetime
except Exception:
return timestamp
@classmethod
def timestamp_to_utc(cls, contact, timestamp):
"""
Converts the given naive datetime from the contact's time zone to UTC.
contact The contact whose time zone to use (must be an instance of CommCareMobileContactMixin).
timestamp The naive datetime.
return The converted timestamp, as a naive datetime.
"""
try:
time_zone = timezone(str(contact.get_time_zone()))
local_datetime = time_zone.localize(timestamp)
utc_datetime = local_datetime.astimezone(pytz.utc)
naive_utc_datetime = utc_datetime.replace(tzinfo=None)
return naive_utc_datetime
except Exception:
return timestamp
def move_to_next_event(self, reminder):
"""
Moves the given CaseReminder to the next event specified by its CaseReminderHandler. If
the CaseReminder is on the last event in the cycle, it moves to the first event in the cycle.
If the CaseReminderHandler's max_iteration_count is not REPEAT_SCHEDULE_INDEFINITELY and
the CaseReminder is on the last event in the event cycle, the CaseReminder is also deactivated.
reminder The CaseReminder to move to the next event.
return void
"""
reminder.current_event_sequence_num += 1
reminder.callback_try_count = 0
reminder.skip_remaining_timeouts = False
reminder.xforms_session_ids = []
reminder.event_initiation_timestamp = None
if reminder.current_event_sequence_num >= len(self.events):
reminder.current_event_sequence_num = 0
reminder.schedule_iteration_num += 1
def set_next_fire(self, reminder, now):
"""
Sets reminder.next_fire to the next allowable date after now by continuously moving the
given CaseReminder to the next event (using move_to_next_event() above) and setting the
CaseReminder's next_fire attribute accordingly until the next_fire > the now parameter.
This is done to skip reminders that were never sent (such as when reminders are deactivated
for a while), instead of sending one reminder every minute until they're all made up for.
reminder The CaseReminder whose next_fire to set.
now The date and time after which reminder.next_fire must be before returning.
return void
"""
case = reminder.case
recipient = reminder.recipient
iteration = 0
reminder.error_retry_count = 0
# Reset next_fire to its last scheduled fire time in case there were any error retries
if reminder.last_scheduled_fire_time is not None:
reminder.next_fire = reminder.last_scheduled_fire_time
while now >= reminder.next_fire and reminder.active:
iteration += 1
# If it is a callback reminder, check the callback_timeout_intervals
if (self.method in [METHOD_SMS_CALLBACK, METHOD_SMS_SURVEY, METHOD_IVR_SURVEY]
and len(reminder.current_event.callback_timeout_intervals) > 0):
if reminder.skip_remaining_timeouts or reminder.callback_try_count >= len(reminder.current_event.callback_timeout_intervals):
if self.method == METHOD_SMS_SURVEY and self.submit_partial_forms and iteration > 1:
# This is to make sure we submit the unfinished forms even when fast-forwarding to the next event after system downtime
for session_id in reminder.xforms_session_ids:
submit_unfinished_form(session_id, self.include_case_side_effects)
else:
reminder.next_fire = reminder.next_fire + timedelta(minutes = reminder.current_event.callback_timeout_intervals[reminder.callback_try_count])
reminder.callback_try_count += 1
continue
# Move to the next event in the cycle
self.move_to_next_event(reminder)
# Set the next fire time
if self.event_interpretation == EVENT_AS_OFFSET:
# EVENT_AS_OFFSET
next_event = reminder.current_event
day_offset = next_event.day_num
if reminder.current_event_sequence_num == 0:
day_offset += self.schedule_length
time_offset = next_event.fire_time
reminder.next_fire += timedelta(days=day_offset, hours=time_offset.hour, minutes=time_offset.minute, seconds=time_offset.second)
else:
# EVENT_AS_SCHEDULE
reminder.next_fire = self.get_current_reminder_event_timestamp(reminder, recipient, case)
# Set whether or not the reminder should still be active
reminder.active = self.get_active(reminder, reminder.next_fire, case)
# Preserve the current next fire time since next_fire can be manipulated for error retries
reminder.last_scheduled_fire_time = reminder.next_fire
def recalculate_schedule(self, reminder, prev_definition=None):
"""
Recalculates which iteration / event number a schedule-based reminder should be on.
Only meant to be called on schedule-based reminders.
"""
if reminder.callback_try_count > 0 and prev_definition is not None and len(prev_definition.events) > reminder.current_event_sequence_num:
preserve_current_session_ids = True
old_form_unique_id = prev_definition.events[reminder.current_event_sequence_num].form_unique_id
old_xforms_session_ids = reminder.xforms_session_ids
else:
preserve_current_session_ids = False
case = reminder.case
reminder.last_fired = None
reminder.error_retry_count = 0
reminder.event_initiation_timestamp = None
reminder.active = True
reminder.schedule_iteration_num = 1
reminder.current_event_sequence_num = 0
reminder.callback_try_count = 0
reminder.skip_remaining_timeouts = False
reminder.last_scheduled_fire_time = None
reminder.xforms_session_ids = []
reminder.next_fire = self.get_current_reminder_event_timestamp(reminder, reminder.recipient, case)
reminder.active = self.get_active(reminder, reminder.next_fire, case)
self.set_next_fire(reminder, self.get_now())
if preserve_current_session_ids:
if reminder.callback_try_count > 0 and self.events[reminder.current_event_sequence_num].form_unique_id == old_form_unique_id and self.method == METHOD_SMS_SURVEY:
reminder.xforms_session_ids = old_xforms_session_ids
elif prev_definition is not None and prev_definition.submit_partial_forms:
for session_id in old_xforms_session_ids:
submit_unfinished_form(session_id, prev_definition.include_case_side_effects)
def get_active(self, reminder, now, case):
schedule_not_finished = not (self.max_iteration_count != REPEAT_SCHEDULE_INDEFINITELY and reminder.schedule_iteration_num > self.max_iteration_count)
if case is not None:
until_not_reached = (not self.condition_reached(case, self.until, now))
return until_not_reached and schedule_not_finished
else:
return schedule_not_finished
def should_fire(self, reminder, now):
return now > reminder.next_fire
def fire(self, reminder):
"""
Sends the message associated with the given CaseReminder's current event.
reminder The CaseReminder which to fire.
return True on success, False on failure
"""
# Prevent circular import
from .event_handlers import EVENT_HANDLER_MAP
if self.deleted():
reminder.retire()
return False
# Retrieve the list of individual recipients
recipient = reminder.recipient
if isinstance(recipient, list) and len(recipient) > 0:
recipients = recipient
elif isinstance(recipient, CouchUser) or isinstance(recipient, CommCareCase):
recipients = [recipient]
elif isinstance(recipient, Group):
recipients = recipient.get_users(is_active=True, only_commcare=False)
elif isinstance(recipient, CommCareCaseGroup):
recipients = [CommConnectCase.get(case_id) for case_id in recipient.cases]
else:
from corehq.apps.reminders.event_handlers import raise_error, ERROR_NO_RECIPIENTS
raise_error(reminder, ERROR_NO_RECIPIENTS)
return False
# Retrieve the corresponding verified number entries for all individual recipients
verified_numbers = {}
for r in recipients:
if hasattr(r, "get_verified_numbers"):
contact_verified_numbers = r.get_verified_numbers(False)
if len(contact_verified_numbers) > 0:
verified_number = sorted(contact_verified_numbers.iteritems())[0][1]
else:
verified_number = None
else:
verified_number = None
verified_numbers[r.get_id] = verified_number
# Set the event initiation timestamp if we're not on any timeouts
if reminder.callback_try_count == 0:
reminder.event_initiation_timestamp = self.get_now()
# Call the appropriate event handler
event_handler = EVENT_HANDLER_MAP.get(self.method)
last_fired = self.get_now() # Store the timestamp right before firing to ensure continuity in the callback lookups
result = event_handler(reminder, self, recipients, verified_numbers)
reminder.last_fired = last_fired
return result
@classmethod
def condition_reached(cls, case, case_property, now):
"""
Checks to see if the condition specified by case_property on case has been reached.
If case[case_property] is a timestamp and it is later than now, then the condition is reached.
If case[case_property] equals "ok", then the condition is reached.
case The CommCareCase to check.
case_property The property on CommCareCase to check.
now The timestamp to use when comparing, if case.case_property is a timestamp.
return True if the condition is reached, False if not.
"""
condition = get_case_property(case, case_property)
if isinstance(condition, datetime):
pass
elif isinstance(condition, date):
condition = datetime.combine(condition, time(0,0))
elif looks_like_timestamp(condition):
try:
condition = parse(condition)
except Exception:
pass
if isinstance(condition, datetime) and getattr(condition, "tzinfo") is not None:
condition = condition.astimezone(pytz.utc)
condition = condition.replace(tzinfo=None)
if (isinstance(condition, datetime) and now > condition) or is_true_value(condition):
return True
else:
return False
def case_changed(self, case, now=None, schedule_changed=False, prev_definition=None):
key = "rule-update-definition-%s-case-%s" % (self._id, case._id)
with CriticalSection([key]):
self._case_changed(case, now, schedule_changed, prev_definition)
def _case_changed(self, case, now, schedule_changed, prev_definition):
"""
This method is used to manage updates to CaseReminderHandler's whose start_condition_type == CASE_CRITERIA.
This method is also called every time a CommCareCase is saved and matches this
CaseReminderHandler's domain and case_type. It's used to check for the
"start" and "until" conditions in order to spawn or deactivate a CaseReminder
for the CommCareCase.
case The case that is being updated.
now The current date and time to use; if not specified, datetime.utcnow() is used.
return void
"""
now = now or self.get_now()
reminder = self.get_reminder(case)
if case and case.user_id and (case.user_id != case._id):
try:
user = CouchUser.get_by_user_id(case.user_id)
except KeyError:
user = None
else:
user = None
if (case.closed or case.type != self.case_type or
case.doc_type.endswith("-Deleted") or self.deleted() or
(self.recipient == RECIPIENT_USER and not user)):
if reminder:
reminder.retire()
else:
start_condition_reached = case_matches_criteria(case, self.start_match_type, self.start_property, self.start_value)
start_date = get_case_property(case, self.start_date)
if (not isinstance(start_date, date)) and not (isinstance(start_date, datetime)):
try:
start_date = parse(start_date)
except Exception:
start_date = None
if isinstance(start_date, datetime):
start_condition_datetime = start_date
start = start_date
elif isinstance(start_date, date):
start_condition_datetime = datetime(start_date.year, start_date.month, start_date.day, 0, 0, 0)
start = start_condition_datetime
else:
start_condition_datetime = None
start = now
# Retire the reminder if the start condition is no longer valid
if reminder is not None:
if not start_condition_reached:
# The start condition is no longer valid, so retire the reminder
reminder.retire()
reminder = None
elif reminder.start_condition_datetime != start_condition_datetime:
# The start date has changed, so retire the reminder and it will be spawned again in the next block
reminder.retire()
reminder = None
# Spawn a reminder if need be
just_spawned = False
if reminder is None:
if start_condition_reached:
reminder = self.spawn_reminder(case, start)
reminder.start_condition_datetime = start_condition_datetime
self.set_next_fire(reminder, now) # This will fast-forward to the next event that does not occur in the past
just_spawned = True
# Check to see if the reminder should still be active
if reminder is not None:
if schedule_changed and self.event_interpretation == EVENT_AS_SCHEDULE and not just_spawned:
self.recalculate_schedule(reminder, prev_definition)
else:
active = self.get_active(reminder, reminder.next_fire, case)
if active and not reminder.active:
reminder.active = True
self.set_next_fire(reminder, now) # This will fast-forward to the next event that does not occur in the past
else:
reminder.active = active
reminder.active = self.active and reminder.active
reminder.save()
def datetime_definition_changed(self, send_immediately=False):
"""
This method is used to manage updates to CaseReminderHandler's whose start_condition_type == ON_DATETIME.
Set send_immediately to True to send the first event right away, regardless of whether it may occur in the past.
"""
reminder = CaseReminder.view('reminders/by_domain_handler_case',
startkey=[self.domain, self._id],
endkey=[self.domain, self._id, {}],
include_docs=True
).one()
now = self.get_now()
if self.recipient == RECIPIENT_SURVEY_SAMPLE:
recipient = CommCareCaseGroup.get(self.sample_id)
elif self.recipient == RECIPIENT_USER_GROUP:
recipient = Group.get(self.user_group_id)
elif self.recipient == RECIPIENT_USER:
recipient = CouchUser.get_by_user_id(self.user_id)
elif self.recipient == RECIPIENT_CASE:
recipient = CommCareCase.get(self.case_id)
else:
recipient = None
if reminder is not None and (reminder.start_condition_datetime != self.start_datetime or not self.active):
reminder.retire()
reminder = None
if reminder is None and self.active:
if self.recipient == RECIPIENT_CASE:
case = recipient
elif self.case_id is not None:
case = CommCareCase.get(self.case_id)
else:
case = None
reminder = self.spawn_reminder(case, self.start_datetime, recipient)
reminder.start_condition_datetime = self.start_datetime
if settings.REMINDERS_QUEUE_ENABLED:
reminder.save()
if send_immediately:
enqueue_reminder_directly(reminder)
else:
sent = False
if send_immediately:
try:
sent = self.fire(reminder)
except Exception:
# An exception could happen here, for example, if
# touchforms is down. So just pass, and let the reminder
# be saved below so that the framework will pick it up
# and try again.
notify_exception(None,
message="Error sending immediately for handler %s" %
self._id)
if sent or not send_immediately:
self.set_next_fire(reminder, now)
reminder.save()
def check_state(self):
"""
Double-checks the model for any inconsistencies and raises an
IllegalModelStateException if any exist.
"""
def check_attr(name, obj=self):
# don't allow None or empty string, but allow 0
if getattr(obj, name) in [None, ""]:
raise IllegalModelStateException("%s is required" % name)
if self.start_condition_type == CASE_CRITERIA:
check_attr("case_type")
check_attr("start_property")
check_attr("start_match_type")
if self.start_match_type != MATCH_ANY_VALUE:
check_attr("start_value")
if self.start_condition_type == ON_DATETIME:
check_attr("start_datetime")
if self.method == METHOD_SMS:
check_attr("default_lang")
check_attr("schedule_length")
check_attr("max_iteration_count")
check_attr("start_offset")
if len(self.events) == 0:
raise IllegalModelStateException("len(events) must be > 0")
last_day = 0
for event in self.events:
check_attr("day_num", obj=event)
if event.day_num < 0:
raise IllegalModelStateException("event.day_num must be "
"non-negative")
if event.fire_time_type in [FIRE_TIME_DEFAULT, FIRE_TIME_RANDOM]:
check_attr("fire_time", obj=event)
if event.fire_time_type == FIRE_TIME_RANDOM:
check_attr("time_window_length", obj=event)
if event.fire_time_type == FIRE_TIME_CASE_PROPERTY:
check_attr("fire_time_aux", obj=event)
if self.method == METHOD_SMS and not self.custom_content_handler:
if not isinstance(event.message, dict):
raise IllegalModelStateException("event.message expected "
"to be a dictionary")
if self.default_lang not in event.message:
raise IllegalModelStateException("default_lang missing "
"from event.message")
if self.method in [METHOD_SMS_SURVEY, METHOD_IVR_SURVEY]:
check_attr("form_unique_id", obj=event)
if not isinstance(event.callback_timeout_intervals, list):
raise IllegalModelStateException("event."
"callback_timeout_intervals expected to be a list")
last_day = event.day_num
if self.event_interpretation == EVENT_AS_SCHEDULE:
if self.schedule_length <= last_day:
raise IllegalModelStateException("schedule_length must be "
"greater than last event's day_num")
else:
if self.schedule_length < 0:
raise IllegalModelStateException("schedule_length must be"
"non-negative")
if self.recipient == RECIPIENT_SUBCASE:
check_attr("recipient_case_match_property")
check_attr("recipient_case_match_type")
if self.recipient_case_match_type != MATCH_ANY_VALUE:
check_attr("recipient_case_match_value")
if (self.custom_content_handler and self.custom_content_handler not in
settings.ALLOWED_CUSTOM_CONTENT_HANDLERS):
raise IllegalModelStateException("unknown custom_content_handler")
self.check_min_tick()
def check_min_tick(self, minutes=60):
"""
For offset-based schedules that are repeated multiple times
intraday, makes sure that the events are separated by at least
the given number of minutes.
"""
if (self.event_interpretation == EVENT_AS_OFFSET and
self.max_iteration_count != 1 and self.schedule_length == 0):
minimum_tick = None
for e in self.events:
this_tick = timedelta(days=e.day_num, hours=e.fire_time.hour,
minutes=e.fire_time.minute)
if minimum_tick is None:
minimum_tick = this_tick
elif this_tick < minimum_tick:
minimum_tick = this_tick
if minimum_tick < timedelta(minutes=minutes):
raise IllegalModelStateException("Minimum tick for a schedule "
"repeated multiple times intraday is %s minutes." % minutes)
def save(self, **params):
from corehq.apps.reminders.tasks import process_reminder_rule
self.check_state()
schedule_changed = params.pop("schedule_changed", False)
prev_definition = params.pop("prev_definition", None)
send_immediately = params.pop("send_immediately", False)
unlock = params.pop("unlock", False)
self.last_modified = datetime.utcnow()
if unlock:
self.locked = False
else:
self.locked = True
super(CaseReminderHandler, self).save(**params)
delay = self.start_condition_type == CASE_CRITERIA
if not unlock:
if delay:
process_reminder_rule.delay(self, schedule_changed,
prev_definition, send_immediately)
else:
process_reminder_rule(self, schedule_changed,
prev_definition, send_immediately)
def process_rule(self, schedule_changed, prev_definition, send_immediately):
if not self.deleted():
if self.start_condition_type == CASE_CRITERIA:
case_ids = get_case_ids(self.domain)
try:
client = get_redis_client()
client.set("reminder-rule-processing-current-%s" % self._id,
0)
client.set("reminder-rule-processing-total-%s" % self._id,
len(case_ids))
except:
pass
process_fast(case_ids, run_rule, item_goal=100, max_threads=5,
args=(self, schedule_changed, prev_definition),
use_critical_section=False, print_stack_interval=60)
elif self.start_condition_type == ON_DATETIME:
self.datetime_definition_changed(send_immediately=send_immediately)
else:
reminder_ids = self.get_reminders(ids_only=True)
process_fast(reminder_ids, retire_reminder, item_goal=100,
max_threads=5, use_critical_section=False,
print_stack_interval=60)
@classmethod
def get_handlers(cls, domain, reminder_type_filter=None):
ids = cls.get_handler_ids(domain,
reminder_type_filter=reminder_type_filter)
return cls.get_handlers_from_ids(ids)
@classmethod
def get_handlers_from_ids(cls, ids):
return [
CaseReminderHandler.wrap(doc)
for doc in iter_docs(cls.get_db(), ids)
]
@classmethod
def get_handler_ids(cls, domain, reminder_type_filter=None):
result = cls.view('reminders/handlers_by_reminder_type',
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
reduce=False,
)
def filter_fcn(reminder_type):
if reminder_type_filter is None:
return True
else:
return ((reminder_type or REMINDER_TYPE_DEFAULT) ==
reminder_type_filter)
return [
row['id'] for row in result
if filter_fcn(row['key'][1])
]
@classmethod
def get_referenced_forms(cls, domain):
handlers = cls.get_handlers(domain)
referenced_forms = [e.form_unique_id for events in [h.events for h in handlers] for e in events]
return filter(None, referenced_forms)
@classmethod
def get_all_reminders(cls, domain=None, due_before=None, ids_only=False):
if due_before:
now_json = json_format_datetime(due_before)
else:
now_json = {}
# domain=None will actually get them all, so this works smoothly
result = CaseReminder.view('reminders/by_next_fire',
startkey=[domain],
endkey=[domain, now_json],
include_docs=(not ids_only),
).all()
if ids_only:
return [entry["id"] for entry in result]
else:
return result
@classmethod
def fire_reminders(cls, now=None):
now = now or cls.get_now()
for reminder in cls.get_all_reminders(due_before=now):
if reminder.acquire_lock(now) and now >= reminder.next_fire:
handler = reminder.handler
if handler.fire(reminder):
handler.set_next_fire(reminder, now)
try:
reminder.save()
except ResourceConflict:
# Submitting a form updates the case, which can update the reminder.
# Grab the latest version of the reminder and set the next fire if it's still in use.
reminder = CaseReminder.get(reminder._id)
if not reminder.retired:
handler.set_next_fire(reminder, now)
reminder.save()
try:
reminder.release_lock()
except ResourceConflict:
# This should go away once we move the locking to Redis
reminder = CaseReminder.get(reminder._id)
reminder.release_lock()
def retire(self):
self.doc_type += "-Deleted"
self.save()
def deleted(self):
return self.doc_type != 'CaseReminderHandler'
class CaseReminder(SafeSaveDocument, LockableMixIn):
"""
Where the CaseReminderHandler is the rule and schedule for sending out reminders,
a CaseReminder is an instance of that rule as it is being applied to a specific
CommCareCase. A CaseReminder only applies to a single CommCareCase/CaseReminderHandler
interaction and is just a representation of the state of the rule in the lifecycle
of the CaseReminderHandler.
"""
domain = StringProperty() # Domain
last_modified = DateTimeProperty()
case_id = StringProperty() # Reference to the CommCareCase
handler_id = StringProperty() # Reference to the CaseReminderHandler
user_id = StringProperty() # Reference to the CouchUser who will receive the SMS messages
method = StringProperty(choices=METHOD_CHOICES) # See CaseReminderHandler.method
next_fire = DateTimeProperty() # The date and time that the next message should go out
last_fired = DateTimeProperty() # The date and time that the last message went out
active = BooleanProperty(default=False) # True if active, False if deactivated
start_date = DateProperty() # For CaseReminderHandlers with event_interpretation=SCHEDULE, this is the date (in the recipient's time zone) from which all event times are calculated
schedule_iteration_num = IntegerProperty() # The current iteration through the cycle of self.handler.events
current_event_sequence_num = IntegerProperty() # The current event number (index to self.handler.events)
callback_try_count = IntegerProperty() # Keeps track of the number of times a callback has timed out
skip_remaining_timeouts = BooleanProperty() # An event handling method can set this to True to skip the remaining timeout intervals for the current event
start_condition_datetime = DateTimeProperty() # The date and time matching the case property specified by the CaseReminderHandler.start_condition
sample_id = StringProperty()
xforms_session_ids = ListProperty(StringProperty)
error_retry_count = IntegerProperty(default=0)
last_scheduled_fire_time = DateTimeProperty()
event_initiation_timestamp = DateTimeProperty() # The date and time that the event was started (which is the same throughout all timeouts)
error = BooleanProperty(default=False)
error_msg = StringProperty()
@property
def handler(self):
return CaseReminderHandler.get(self.handler_id)
@property
def current_event(self):
return self.handler.events[self.current_event_sequence_num]
@property
def case(self):
if self.case_id is not None:
return CommCareCase.get(self.case_id)
else:
return None
@property
def user(self):
if self.handler.recipient == RECIPIENT_USER:
return CouchUser.get_by_user_id(self.user_id)
else:
return None
@property
def recipient(self):
try:
return self._recipient_lookup
except ResourceNotFound:
return None
@property
def _recipient_lookup(self):
handler = self.handler
if handler.recipient == RECIPIENT_USER:
return self.user
elif handler.recipient == RECIPIENT_CASE:
return CommConnectCase.get(self.case_id)
elif handler.recipient == RECIPIENT_SURVEY_SAMPLE:
return CommCareCaseGroup.get(self.sample_id)
elif handler.recipient == RECIPIENT_OWNER:
return get_wrapped_owner(get_owner_id(self.case))
elif handler.recipient == RECIPIENT_PARENT_CASE:
parent_case = None
case = self.case
if case is not None:
parent_case = case.parent
if parent_case is not None:
parent_case = CommConnectCase.wrap_as_commconnect_case(parent_case)
return parent_case
elif handler.recipient == RECIPIENT_SUBCASE:
indices = self.case.reverse_indices
recipients = []
for index in indices:
if index.identifier == "parent":
subcase = CommConnectCase.get(index.referenced_id)
if case_matches_criteria(subcase, handler.recipient_case_match_type, handler.recipient_case_match_property, handler.recipient_case_match_value):
recipients.append(subcase)
return recipients
elif handler.recipient == RECIPIENT_USER_GROUP:
return Group.get(handler.user_group_id)
else:
return None
@property
def retired(self):
return self.doc_type.endswith("-Deleted")
def save(self, *args, **kwargs):
self.last_modified = datetime.utcnow()
super(CaseReminder, self).save(*args, **kwargs)
def retire(self):
self.doc_type += "-Deleted"
self.save()
class SurveyKeywordAction(DocumentSchema):
recipient = StringProperty(choices=KEYWORD_RECIPIENT_CHOICES)
recipient_id = StringProperty()
action = StringProperty(choices=KEYWORD_ACTION_CHOICES)
# Only used for action == METHOD_SMS
message_content = StringProperty()
# Only used for action in [METHOD_SMS_SURVEY, METHOD_STRUCTURED_SMS]
form_unique_id = StringProperty()
# Only used for action == METHOD_STRUCTURED_SMS
use_named_args = BooleanProperty()
named_args = DictProperty() # Dictionary of {argument name in the sms (caps) : form question xpath}
named_args_separator = StringProperty() # Can be None in which case there is no separator (i.e., a100 b200)
class SurveyKeyword(Document):
domain = StringProperty()
keyword = StringProperty()
description = StringProperty()
actions = SchemaListProperty(SurveyKeywordAction)
delimiter = StringProperty() # Only matters if this is a structured SMS: default is None, in which case the delimiter is any consecutive white space
override_open_sessions = BooleanProperty()
initiator_doc_type_filter = ListProperty(StringProperty) # List of doc types representing the only types of contacts who should be able to invoke this keyword. Empty list means anyone can invoke.
# Properties needed for migration and then can be removed
form_type = StringProperty(choices=FORM_TYPE_CHOICES, default=FORM_TYPE_ONE_BY_ONE)
form_unique_id = StringProperty()
use_named_args = BooleanProperty()
named_args = DictProperty()
named_args_separator = StringProperty()
oct13_migration_timestamp = DateTimeProperty()
def retire(self):
self.doc_type += "-Deleted"
self.save()
@property
def get_id(self):
return self._id
@classmethod
def get_all(cls, domain):
return cls.view("reminders/survey_keywords",
startkey=[domain],
endkey=[domain, {}],
include_docs=True,
reduce=False,
).all()
@classmethod
def get_keyword(cls, domain, keyword):
return cls.view("reminders/survey_keywords",
key = [domain, keyword.upper()],
include_docs=True,
reduce=False,
).one()
@classmethod
def get_by_domain(cls, domain, limit=None, skip=None, include_docs=True):
extra_kwargs = {}
if limit is not None:
extra_kwargs['limit'] = limit
if skip is not None:
extra_kwargs['skip'] = skip
return cls.view(
'reminders/survey_keywords',
startkey=[domain],
endkey=[domain, {}],
include_docs=include_docs,
reduce=False,
**extra_kwargs
).all()
class SurveySample(Document):
domain = StringProperty()
name = StringProperty()
contacts = ListProperty(StringProperty)
time_zone = StringProperty()
def get_time_zone(self):
return self.time_zone
@classmethod
def get_all(cls, domain):
return cls.view('reminders/sample_by_domain',
startkey=[domain],
endkey=[domain, {}],
include_docs=True
).all()
class SurveyWave(DocumentSchema):
date = DateProperty()
time = TimeProperty()
end_date = DateProperty()
form_id = StringProperty()
reminder_definitions = DictProperty() # Dictionary of CommCareCaseGroup._id : CaseReminderHandler._id
delegation_tasks = DictProperty() # Dictionary of {sample id : {contact id : delegation task id, ...}, ...}
def has_started(self, parent_survey_ref):
samples = [CommCareCaseGroup.get(sample["sample_id"]) for sample in parent_survey_ref.samples]
for sample in samples:
if CaseReminderHandler.timestamp_to_utc(sample, datetime.combine(self.date, self.time)) <= datetime.utcnow():
return True
return False
class Survey(Document):
domain = StringProperty()
name = StringProperty()
waves = SchemaListProperty(SurveyWave)
followups = ListProperty(DictProperty)
samples = ListProperty(DictProperty)
send_automatically = BooleanProperty()
send_followup = BooleanProperty()
@classmethod
def get_all(cls, domain):
return get_surveys_in_domain(domain)
def has_started(self):
for wave in self.waves:
if wave.has_started(self):
return True
return False
def update_delegation_tasks(self, submitting_user_id):
utcnow = datetime.utcnow()
# Get info about each CATI sample and the instance of that sample used for this survey
cati_sample_data = {}
for sample_json in self.samples:
if sample_json["method"] == "CATI":
sample_id = sample_json["sample_id"]
cati_sample_data[sample_id] = {
"sample_object": CommCareCaseGroup.get(sample_id),
"incentive" : sample_json["incentive"],
"cati_operator" : sample_json["cati_operator"],
}
for wave in self.waves:
if wave.has_started(self):
continue
# Close any tasks for samples that are no longer used, and for contacts that are no longer in the samples
for sample_id, tasks in wave.delegation_tasks.items():
if sample_id not in cati_sample_data:
for case_id, delegation_case_id in tasks.items():
close_task(self.domain, delegation_case_id, submitting_user_id)
del wave.delegation_tasks[sample_id]
else:
for case_id in list(set(tasks.keys()).difference(
cati_sample_data[sample_id]["sample_object"].cases)):
close_task(self.domain, tasks[case_id], submitting_user_id)
del wave.delegation_tasks[sample_id][case_id]
# Update / Create tasks for existing / new contacts
for sample_id, sample_data in cati_sample_data.items():
task_activation_datetime = CaseReminderHandler.timestamp_to_utc(sample_data["sample_object"], datetime.combine(wave.date, wave.time))
task_deactivation_datetime = CaseReminderHandler.timestamp_to_utc(sample_data["sample_object"], datetime.combine(wave.end_date, wave.time))
if sample_id not in wave.delegation_tasks:
wave.delegation_tasks[sample_id] = {}
for case_id in sample_data["sample_object"].cases:
wave.delegation_tasks[sample_id][case_id] = create_task(
CommCareCase.get(case_id),
submitting_user_id,
sample_data["cati_operator"],
wave.form_id,
task_activation_datetime,
task_deactivation_datetime,
sample_data["incentive"]
)
else:
for case_id in sample_data["sample_object"].cases:
delegation_case_id = wave.delegation_tasks[sample_id].get(case_id, None)
if delegation_case_id is None:
wave.delegation_tasks[sample_id][case_id] = create_task(
CommCareCase.get(case_id),
submitting_user_id,
sample_data["cati_operator"],
wave.form_id,
task_activation_datetime,
task_deactivation_datetime,
sample_data["incentive"]
)
else:
delegation_case = CommCareCase.get(delegation_case_id)
if (delegation_case.owner_id != sample_data["cati_operator"] or
delegation_case.get_case_property("start_date") != task_activation_datetime or
delegation_case.get_case_property("end_date") != task_deactivation_datetime or
delegation_case.get_case_property("form_id") != wave.form_id):
update_task(
self.domain,
delegation_case_id,
submitting_user_id,
sample_data["cati_operator"],
wave.form_id,
task_activation_datetime,
task_deactivation_datetime,
sample_data["incentive"]
)
from .signals import *
|
bsd-3-clause
| -1,628,130,193,549,512,200
| 42.947729
| 204
| 0.614621
| false
|
greggian/TapdIn
|
django/utils/_decimal.py
|
1
|
108522
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module is currently Py2.3 compatible and should be kept that way
# unless a major compelling advantage arises. IOW, 2.3 compatibility is
# strongly preferred, but not guaranteed.
# Also, this module should be kept in sync with the latest updates of
# the IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is a Py2.3 implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
www2.hursley.ibm.com/decimal/decarith.html
and IEEE standard 854-1987:
www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of the module is to support arithmetic using familiar
"schoolhouse" rules and to avoid the some of tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of the expected Decimal("0.00") returned by decimal floating point).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal("0")
>>> Decimal("1")
Decimal("1")
>>> Decimal("-.0123")
Decimal("-0.0123")
>>> Decimal(123456)
Decimal("123456")
>>> Decimal("123.45e12345678901234567890")
Decimal("1.2345E+12345678901234567892")
>>> Decimal("1.33") + Decimal("1.27")
Decimal("2.60")
>>> Decimal("12.34") + Decimal("3.87") - Decimal("18.41")
Decimal("-2.20")
>>> dig = Decimal(1)
>>> print dig / Decimal(3)
0.333333333
>>> getcontext().prec = 18
>>> print dig / Decimal(3)
0.333333333333333333
>>> print dig.sqrt()
1
>>> print Decimal(3).sqrt()
1.73205080756887729
>>> print Decimal(3) ** 123
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print inf
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print neginf
-Infinity
>>> print neginf + inf
NaN
>>> print neginf * inf
-Infinity
>>> print dig / 0
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print dig / 0
Traceback (most recent call last):
...
...
...
DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal("NaN")
>>> c.traps[InvalidOperation] = 1
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> print c.divide(Decimal(0), Decimal(0))
Traceback (most recent call last):
...
...
...
InvalidOperation: 0 / 0
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print c.divide(Decimal(0), Decimal(0))
NaN
>>> print c.flags[InvalidOperation]
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN',
# Functions for manipulating contexts
'setcontext', 'getcontext'
]
import copy as _copy
#Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
#Rounding decision (not part of the public API)
NEVER_ROUND = 'NEVER_ROUND' # Round in division (non-divmod), sqrt ONLY
ALWAYS_ROUND = 'ALWAYS_ROUND' # Every operation rounds at end.
#Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
"""
def handle(self, context, *args):
if args:
if args[0] == 1: #sNaN, must drop 's' but keep diagnostics
return Decimal( (args[1]._sign, args[1]._int, 'n') )
return NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return (0, (0,), 'n') #Passed to something which uses a tuple.
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, double = None, *args):
if double is not None:
return (Infsign[sign],)*2
return Infsign[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return (NaN, NaN)
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, tup=None, *args):
if tup is not None:
return (NaN, NaN) #for 0 %0, 0 // 0
return NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return Infsign[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return Infsign[sign]
return Decimal((sign, (9,)*context.prec,
context.Emax-context.prec+1))
if sign == 1:
if context.rounding == ROUND_FLOOR:
return Infsign[sign]
return Decimal( (sign, (9,)*context.prec,
context.Emax-context.prec+1))
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
##### Context Functions #######################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.currentThread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
import sys
class MockThreading:
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del sys, MockThreading
try:
threading.local
except AttributeError:
#To fix reloading, force it to create a new context
#Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.currentThread(), '__decimal_context__'):
del threading.currentThread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.currentThread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.currentThread().__decimal_context__
except AttributeError:
context = Context()
threading.currentThread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
##### Decimal class ###########################################
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal("3.14")
>>> Decimal((0, (3, 1, 4), -2)) # tuple input (sign, digit_tuple, exponent)
Decimal("3.14")
>>> Decimal(314) # int or long
Decimal("314")
>>> Decimal(Decimal(314)) # another decimal instance
Decimal("314")
"""
self = object.__new__(cls)
self._is_special = False
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = tuple(map(int, str(value.int)))
self._exp = int(value.exp)
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an integer
if isinstance(value, (int,long)):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = tuple(map(int, str(abs(value))))
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError, 'Invalid arguments'
if value[0] not in (0,1):
raise ValueError, 'Invalid sign'
for digit in value[1]:
if not isinstance(digit, (int,long)) or digit < 0:
raise ValueError, "The second value in the tuple must be composed of non negative integer elements."
self._sign = value[0]
self._int = tuple(value[1])
if value[2] in ('F','n','N'):
self._exp = value[2]
self._is_special = True
else:
self._exp = int(value[2])
return self
if isinstance(value, float):
raise TypeError("Cannot convert float to Decimal. " +
"First convert the float to a string")
# Other argument types may require the context during interpretation
if context is None:
context = getcontext()
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, basestring):
if _isinfinity(value):
self._exp = 'F'
self._int = (0,)
self._is_special = True
if _isinfinity(value) == 1:
self._sign = 0
else:
self._sign = 1
return self
if _isnan(value):
sig, sign, diag = _isnan(value)
self._is_special = True
if len(diag) > context.prec: #Diagnostic info too long
self._sign, self._int, self._exp = \
context._raise_error(ConversionSyntax)
return self
if sig == 1:
self._exp = 'n' #qNaN
else: #sig == 2
self._exp = 'N' #sNaN
self._sign = sign
self._int = tuple(map(int, diag)) #Diagnostic info
return self
try:
self._sign, self._int, self._exp = _string2exact(value)
except ValueError:
self._is_special = True
self._sign, self._int, self._exp = context._raise_error(ConversionSyntax)
return self
raise TypeError("Cannot convert %r to Decimal" % value)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other = None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
1, self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
1, other)
if self_is_nan:
return self
return other
return 0
def __nonzero__(self):
"""Is the number non-zero?
0 if self == 0
1 if self != 0
"""
if self._is_special:
return 1
return sum(self._int) != 0
def __cmp__(self, other, context=None):
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return 1 # Comparison involving NaN's always reports self > other
# INF = INF
return cmp(self._isinfinity(), other._isinfinity())
if not self and not other:
return 0 #If both 0, sign comparison isn't certain.
#If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted and \
self._int + (0,)*(self._exp - other._exp) == \
other._int + (0,)*(other._exp - self._exp):
return 0 #equal, except in precision. ([0]*(-x) = [])
elif self_adjusted > other_adjusted and self._int[0] != 0:
return (-1)**self._sign
elif self_adjusted < other_adjusted and other._int[0] != 0:
return -((-1)**self._sign)
# Need to round, so make sure we have a valid context
if context is None:
context = getcontext()
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_UP) #round away from 0
flags = context._ignore_all_flags()
res = self.__sub__(other, context=context)
context._regard_flags(*flags)
context.rounding = rounding
if not res:
return 0
elif res._sign:
return -1
return 1
def __eq__(self, other):
if not isinstance(other, (Decimal, int, long)):
return NotImplemented
return self.__cmp__(other) == 0
def __ne__(self, other):
if not isinstance(other, (Decimal, int, long)):
return NotImplemented
return self.__cmp__(other) != 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
#compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self.__cmp__(other, context))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# Decimal integers must hash the same as the ints
# Non-integer decimals are normalized and hashed as strings
# Normalization assures that hast(100E-1) == hash(10)
if self._is_special:
if self._isnan():
raise TypeError('Cannot hash a NaN value.')
return hash(str(self))
i = int(self)
if self == Decimal(i):
return hash(i)
assert self.__nonzero__() # '-0' handled by integer case
return hash(str(self.normalize()))
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return (self._sign, self._int, self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return 'Decimal("%s")' % str(self)
def __str__(self, eng = 0, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
if self._is_special:
if self._isnan():
minus = '-'*self._sign
if self._int == (0,):
info = ''
else:
info = ''.join(map(str, self._int))
if self._isnan() == 2:
return minus + 'sNaN' + info
return minus + 'NaN' + info
if self._isinfinity():
minus = '-'*self._sign
return minus + 'Infinity'
if context is None:
context = getcontext()
tmp = map(str, self._int)
numdigits = len(self._int)
leftdigits = self._exp + numdigits
if eng and not self: #self = 0eX wants 0[.0[0]]eY, not [[0]0]0eY
if self._exp < 0 and self._exp >= -6: #short, no need for e/E
s = '-'*self._sign + '0.' + '0'*(abs(self._exp))
return s
#exp is closest mult. of 3 >= self._exp
exp = ((self._exp - 1)// 3 + 1) * 3
if exp != self._exp:
s = '0.'+'0'*(exp - self._exp)
else:
s = '0'
if exp != 0:
if context.capitals:
s += 'E'
else:
s += 'e'
if exp > 0:
s += '+' #0.0e+3, not 0.0e3
s += str(exp)
s = '-'*self._sign + s
return s
if eng:
dotplace = (leftdigits-1)%3+1
adjexp = leftdigits -1 - (leftdigits-1)%3
else:
adjexp = leftdigits-1
dotplace = 1
if self._exp == 0:
pass
elif self._exp < 0 and adjexp >= 0:
tmp.insert(leftdigits, '.')
elif self._exp < 0 and adjexp >= -6:
tmp[0:0] = ['0'] * int(-leftdigits)
tmp.insert(0, '0.')
else:
if numdigits > dotplace:
tmp.insert(dotplace, '.')
elif numdigits < dotplace:
tmp.extend(['0']*(dotplace-numdigits))
if adjexp:
if not context.capitals:
tmp.append('e')
else:
tmp.append('E')
if adjexp > 0:
tmp.append('+')
tmp.append(str(adjexp))
if eng:
while tmp[0:1] == ['0']:
tmp[0:1] = []
if len(tmp) == 0 or tmp[0] == '.' or tmp[0].lower() == 'e':
tmp[0:0] = ['0']
if self._sign:
tmp.insert(0, '-')
return ''.join(tmp)
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=1, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if not self:
# -Decimal('0') is Decimal('0'), not Decimal('-0')
sign = 0
elif self._sign:
sign = 0
else:
sign = 1
if context is None:
context = getcontext()
if context._rounding_decision == ALWAYS_ROUND:
return Decimal((sign, self._int, self._exp))._fix(context)
return Decimal( (sign, self._int, self._exp))
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
sign = self._sign
if not self:
# + (-0) = 0
sign = 0
if context is None:
context = getcontext()
if context._rounding_decision == ALWAYS_ROUND:
ans = self._fix(context)
else:
ans = Decimal(self)
ans._sign = sign
return ans
def __abs__(self, round=1, context=None):
"""Returns the absolute value of self.
If the second argument is 0, do not round.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if not round:
if context is None:
context = getcontext()
context = context._shallow_copy()
context._set_rounding_decision(NEVER_ROUND)
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
#If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) #Can't both be infinity here
shouldround = context._rounding_decision == ALWAYS_ROUND
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
#If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
return Decimal( (sign, (0,), exp))
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, watchexp=0, context=context)
if shouldround:
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, watchexp=0, context=context)
if shouldround:
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, shouldround, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
if exp < context.Etiny():
exp = context.Etiny()
context._raise_error(Clamped)
return Decimal((negativezero, (0,), exp))
if op1.int < op2.int:
op1, op2 = op2, op1
#OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
#So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
#Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
if shouldround:
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self + (-other)"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# -Decimal(0) = Decimal(0), which we don't want since
# (-0 - 0 = -0 + (-0) = -0, but -0 + 0 = 0.)
# so we change the sign directly to a copy
tmp = Decimal(other)
tmp._sign = 1-tmp._sign
return self.__add__(tmp, context=context)
def __rsub__(self, other, context=None):
"""Return other + (-self)"""
other = _convert_other(other)
if other is NotImplemented:
return other
tmp = Decimal(self)
tmp._sign = 1 - tmp._sign
return other.__add__(tmp, context=context)
def _increment(self, round=1, context=None):
"""Special case of add, adding 1eExponent
Since it is common, (rounding, for example) this adds
(sign)*one E self._exp to the number more efficiently than add.
For example:
Decimal('5.624e10')._increment() == Decimal('5.625e10')
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self) # Must be infinite, and incrementing makes no difference
L = list(self._int)
L[-1] += 1
spot = len(L)-1
while L[spot] == 10:
L[spot] = 0
if spot == 0:
L[0:0] = [1]
break
L[spot-1] += 1
spot -= 1
ans = Decimal((self._sign, L, self._exp))
if context is None:
context = getcontext()
if round and context._rounding_decision == ALWAYS_ROUND:
ans = ans._fix(context)
return ans
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return Infsign[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return Infsign[resultsign]
resultexp = self._exp + other._exp
shouldround = context._rounding_decision == ALWAYS_ROUND
# Special case for multiplying by zero
if not self or not other:
ans = Decimal((resultsign, (0,), resultexp))
if shouldround:
#Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == (1,):
ans = Decimal((resultsign, other._int, resultexp))
if shouldround:
ans = ans._fix(context)
return ans
if other._int == (1,):
ans = Decimal((resultsign, self._int, resultexp))
if shouldround:
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = Decimal( (resultsign, map(int, str(op1.int * op2.int)), resultexp))
if shouldround:
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __div__(self, other, context=None):
"""Return self / other."""
return self._divide(other, context=context)
__truediv__ = __div__
def _divide(self, other, divmod = 0, context=None):
"""Return a / b, to context.prec precision.
divmod:
0 => true division
1 => (a //b, a%b)
2 => a //b
3 => a%b
Actually, if divmod is 2 or 3 a tuple is returned, but errors for
computing the other value are not raised.
"""
other = _convert_other(other)
if other is NotImplemented:
if divmod in (0, 1):
return NotImplemented
return (NotImplemented, NotImplemented)
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
if divmod:
return (ans, ans)
return ans
if self._isinfinity() and other._isinfinity():
if divmod:
return (context._raise_error(InvalidOperation,
'(+-)INF // (+-)INF'),
context._raise_error(InvalidOperation,
'(+-)INF % (+-)INF'))
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
if divmod == 1:
return (Infsign[sign],
context._raise_error(InvalidOperation, 'INF % x'))
elif divmod == 2:
return (Infsign[sign], NaN)
elif divmod == 3:
return (Infsign[sign],
context._raise_error(InvalidOperation, 'INF % x'))
return Infsign[sign]
if other._isinfinity():
if divmod:
return (Decimal((sign, (0,), 0)), Decimal(self))
context._raise_error(Clamped, 'Division by infinity')
return Decimal((sign, (0,), context.Etiny()))
# Special cases for zeroes
if not self and not other:
if divmod:
return context._raise_error(DivisionUndefined, '0 / 0', 1)
return context._raise_error(DivisionUndefined, '0 / 0')
if not self:
if divmod:
otherside = Decimal(self)
otherside._exp = min(self._exp, other._exp)
return (Decimal((sign, (0,), 0)), otherside)
exp = self._exp - other._exp
if exp < context.Etiny():
exp = context.Etiny()
context._raise_error(Clamped, '0e-x / y')
if exp > context.Emax:
exp = context.Emax
context._raise_error(Clamped, '0e+x / y')
return Decimal( (sign, (0,), exp) )
if not other:
if divmod:
return context._raise_error(DivisionByZero, 'divmod(x,0)',
sign, 1)
return context._raise_error(DivisionByZero, 'x / 0', sign)
#OK, so neither = 0, INF or NaN
shouldround = context._rounding_decision == ALWAYS_ROUND
#If we're dividing into ints, and self < other, stop.
#self.__abs__(0) does not round.
if divmod and (self.__abs__(0, context) < other.__abs__(0, context)):
if divmod == 1 or divmod == 3:
exp = min(self._exp, other._exp)
ans2 = self._rescale(exp, context=context, watchexp=0)
if shouldround:
ans2 = ans2._fix(context)
return (Decimal( (sign, (0,), 0) ),
ans2)
elif divmod == 2:
#Don't round the mod part, if we don't need it.
return (Decimal( (sign, (0,), 0) ), Decimal(self))
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2, adjust = _adjust_coefficients(op1, op2)
res = _WorkRep( (sign, 0, (op1.exp - op2.exp)) )
if divmod and res.exp > context.prec + 1:
return context._raise_error(DivisionImpossible)
prec_limit = 10 ** context.prec
while 1:
while op2.int <= op1.int:
res.int += 1
op1.int -= op2.int
if res.exp == 0 and divmod:
if res.int >= prec_limit and shouldround:
return context._raise_error(DivisionImpossible)
otherside = Decimal(op1)
frozen = context._ignore_all_flags()
exp = min(self._exp, other._exp)
otherside = otherside._rescale(exp, context=context, watchexp=0)
context._regard_flags(*frozen)
if shouldround:
otherside = otherside._fix(context)
return (Decimal(res), otherside)
if op1.int == 0 and adjust >= 0 and not divmod:
break
if res.int >= prec_limit and shouldround:
if divmod:
return context._raise_error(DivisionImpossible)
shouldround=1
# Really, the answer is a bit higher, so adding a one to
# the end will make sure the rounding is right.
if op1.int != 0:
res.int *= 10
res.int += 1
res.exp -= 1
break
res.int *= 10
res.exp -= 1
adjust += 1
op1.int *= 10
op1.exp -= 1
if res.exp == 0 and divmod and op2.int > op1.int:
#Solves an error in precision. Same as a previous block.
if res.int >= prec_limit and shouldround:
return context._raise_error(DivisionImpossible)
otherside = Decimal(op1)
frozen = context._ignore_all_flags()
exp = min(self._exp, other._exp)
otherside = otherside._rescale(exp, context=context)
context._regard_flags(*frozen)
return (Decimal(res), otherside)
ans = Decimal(res)
if shouldround:
ans = ans._fix(context)
return ans
def __rdiv__(self, other, context=None):
"""Swaps self/other and returns __div__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__div__(self, context=context)
__rtruediv__ = __rdiv__
def __divmod__(self, other, context=None):
"""
(self // other, self % other)
"""
return self._divide(other, 1, context)
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self and not other:
return context._raise_error(InvalidOperation, 'x % 0')
return self._divide(other, 3, context)[1]
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self and not other:
return context._raise_error(InvalidOperation, 'x % 0')
if context is None:
context = getcontext()
# If DivisionImpossible causes an error, do not leave Rounded/Inexact
# ignored in the calling function.
context = context._shallow_copy()
flags = context._ignore_flags(Rounded, Inexact)
#keep DivisionImpossible flags
(side, r) = self.__divmod__(other, context=context)
if r._isnan():
context._regard_flags(*flags)
return r
context = context._shallow_copy()
rounding = context._set_rounding_decision(NEVER_ROUND)
if other._sign:
comparison = other.__div__(Decimal(-2), context=context)
else:
comparison = other.__div__(Decimal(2), context=context)
context._set_rounding_decision(rounding)
context._regard_flags(*flags)
s1, s2 = r._sign, comparison._sign
r._sign, comparison._sign = 0, 0
if r < comparison:
r._sign, comparison._sign = s1, s2
#Get flags now
self.__divmod__(other, context=context)
return r._fix(context)
r._sign, comparison._sign = s1, s2
rounding = context._set_rounding_decision(NEVER_ROUND)
(side, r) = self.__divmod__(other, context=context)
context._set_rounding_decision(rounding)
if r._isnan():
return r
decrease = not side._iseven()
rounding = context._set_rounding_decision(NEVER_ROUND)
side = side.__abs__(context=context)
context._set_rounding_decision(rounding)
s1, s2 = r._sign, comparison._sign
r._sign, comparison._sign = 0, 0
if r > comparison or decrease and r == comparison:
r._sign, comparison._sign = s1, s2
context.prec += 1
if len(side.__add__(Decimal(1), context=context)._int) >= context.prec:
context.prec -= 1
return context._raise_error(DivisionImpossible)[1]
context.prec -= 1
if self._sign == other._sign:
r = r.__sub__(other, context=context)
else:
r = r.__add__(other, context=context)
else:
r._sign, comparison._sign = s1, s2
return r._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
return self._divide(other, 2, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
return float(str(self))
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
context = getcontext()
return context._raise_error(InvalidContext)
elif self._isinfinity():
raise OverflowError, "Cannot convert infinity to long"
if self._exp >= 0:
s = ''.join(map(str, self._int)) + '0'*self._exp
else:
s = ''.join(map(str, self._int))[:self._exp]
if s == '':
s = '0'
sign = '-'*self._sign
return int(sign + s)
def __long__(self):
"""Converts to a long.
Equivalent to long(int(self))
"""
return long(self.__int__())
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
return self
if context is None:
context = getcontext()
prec = context.prec
ans = self._fixexponents(context)
if len(ans._int) > prec:
ans = ans._round(prec, context=context)
ans = ans._fixexponents(context)
return ans
def _fixexponents(self, context):
"""Fix the exponents and return a copy with the exponent in bounds.
Only call if known to not be a special value.
"""
folddown = context._clamp
Emin = context.Emin
ans = self
ans_adjusted = ans.adjusted()
if ans_adjusted < Emin:
Etiny = context.Etiny()
if ans._exp < Etiny:
if not ans:
ans = Decimal(self)
ans._exp = Etiny
context._raise_error(Clamped)
return ans
ans = ans._rescale(Etiny, context=context)
#It isn't zero, and exp < Emin => subnormal
context._raise_error(Subnormal)
if context.flags[Inexact]:
context._raise_error(Underflow)
else:
if ans:
#Only raise subnormal if non-zero.
context._raise_error(Subnormal)
else:
Etop = context.Etop()
if folddown and ans._exp > Etop:
context._raise_error(Clamped)
ans = ans._rescale(Etop, context=context)
else:
Emax = context.Emax
if ans_adjusted > Emax:
if not ans:
ans = Decimal(self)
ans._exp = Emax
context._raise_error(Clamped)
return ans
context._raise_error(Inexact)
context._raise_error(Rounded)
return context._raise_error(Overflow, 'above Emax', ans._sign)
return ans
def _round(self, prec=None, rounding=None, context=None):
"""Returns a rounded version of self.
You can specify the precision or rounding method. Otherwise, the
context determines it.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity():
return Decimal(self)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if prec is None:
prec = context.prec
if not self:
if prec <= 0:
dig = (0,)
exp = len(self._int) - prec + self._exp
else:
dig = (0,) * prec
exp = len(self._int) + self._exp - prec
ans = Decimal((self._sign, dig, exp))
context._raise_error(Rounded)
return ans
if prec == 0:
temp = Decimal(self)
temp._int = (0,)+temp._int
prec = 1
elif prec < 0:
exp = self._exp + len(self._int) - prec - 1
temp = Decimal( (self._sign, (0, 1), exp))
prec = 1
else:
temp = Decimal(self)
numdigits = len(temp._int)
if prec == numdigits:
return temp
# See if we need to extend precision
expdiff = prec - numdigits
if expdiff > 0:
tmp = list(temp._int)
tmp.extend([0] * expdiff)
ans = Decimal( (temp._sign, tmp, temp._exp - expdiff))
return ans
#OK, but maybe all the lost digits are 0.
lostdigits = self._int[expdiff:]
if lostdigits == (0,) * len(lostdigits):
ans = Decimal( (temp._sign, temp._int[:prec], temp._exp - expdiff))
#Rounded, but not Inexact
context._raise_error(Rounded)
return ans
# Okay, let's round and lose data
this_function = getattr(temp, self._pick_rounding_function[rounding])
#Now we've got the rounding function
if prec != context.prec:
context = context._shallow_copy()
context.prec = prec
ans = this_function(prec, expdiff, context)
context._raise_error(Rounded)
context._raise_error(Inexact, 'Changed in rounding')
return ans
_pick_rounding_function = {}
def _round_down(self, prec, expdiff, context):
"""Also known as round-towards-0, truncate."""
return Decimal( (self._sign, self._int[:prec], self._exp - expdiff) )
def _round_half_up(self, prec, expdiff, context, tmp = None):
"""Rounds 5 up (away from 0)"""
if tmp is None:
tmp = Decimal( (self._sign,self._int[:prec], self._exp - expdiff))
if self._int[prec] >= 5:
tmp = tmp._increment(round=0, context=context)
if len(tmp._int) > prec:
return Decimal( (tmp._sign, tmp._int[:-1], tmp._exp + 1))
return tmp
def _round_half_even(self, prec, expdiff, context):
"""Round 5 to even, rest to nearest."""
tmp = Decimal( (self._sign, self._int[:prec], self._exp - expdiff))
half = (self._int[prec] == 5)
if half:
for digit in self._int[prec+1:]:
if digit != 0:
half = 0
break
if half:
if self._int[prec-1] & 1 == 0:
return tmp
return self._round_half_up(prec, expdiff, context, tmp)
def _round_half_down(self, prec, expdiff, context):
"""Round 5 down"""
tmp = Decimal( (self._sign, self._int[:prec], self._exp - expdiff))
half = (self._int[prec] == 5)
if half:
for digit in self._int[prec+1:]:
if digit != 0:
half = 0
break
if half:
return tmp
return self._round_half_up(prec, expdiff, context, tmp)
def _round_up(self, prec, expdiff, context):
"""Rounds away from 0."""
tmp = Decimal( (self._sign, self._int[:prec], self._exp - expdiff) )
for digit in self._int[prec:]:
if digit != 0:
tmp = tmp._increment(round=1, context=context)
if len(tmp._int) > prec:
return Decimal( (tmp._sign, tmp._int[:-1], tmp._exp + 1))
else:
return tmp
return tmp
def _round_ceiling(self, prec, expdiff, context):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec, expdiff, context)
else:
return self._round_up(prec, expdiff, context)
def _round_floor(self, prec, expdiff, context):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec, expdiff, context)
else:
return self._round_up(prec, expdiff, context)
def __pow__(self, n, modulo = None, context=None):
"""Return self ** n (mod modulo)
If modulo is None (default), don't take it mod modulo.
"""
n = _convert_other(n)
if n is NotImplemented:
return n
if context is None:
context = getcontext()
if self._is_special or n._is_special or n.adjusted() > 8:
#Because the spot << doesn't work with really big exponents
if n._isinfinity() or n.adjusted() > 8:
return context._raise_error(InvalidOperation, 'x ** INF')
ans = self._check_nans(n, context)
if ans:
return ans
if not n._isinteger():
return context._raise_error(InvalidOperation, 'x ** (non-integer)')
if not self and not n:
return context._raise_error(InvalidOperation, '0 ** 0')
if not n:
return Decimal(1)
if self == Decimal(1):
return Decimal(1)
sign = self._sign and not n._iseven()
n = int(n)
if self._isinfinity():
if modulo:
return context._raise_error(InvalidOperation, 'INF % x')
if n > 0:
return Infsign[sign]
return Decimal( (sign, (0,), 0) )
#with ludicrously large exponent, just raise an overflow and return inf.
if not modulo and n > 0 and (self._exp + len(self._int) - 1) * n > context.Emax \
and self:
tmp = Decimal('inf')
tmp._sign = sign
context._raise_error(Rounded)
context._raise_error(Inexact)
context._raise_error(Overflow, 'Big power', sign)
return tmp
elength = len(str(abs(n)))
firstprec = context.prec
if not modulo and firstprec + elength + 1 > DefaultContext.Emax:
return context._raise_error(Overflow, 'Too much precision.', sign)
mul = Decimal(self)
val = Decimal(1)
context = context._shallow_copy()
context.prec = firstprec + elength + 1
if n < 0:
#n is a long now, not Decimal instance
n = -n
mul = Decimal(1).__div__(mul, context=context)
spot = 1
while spot <= n:
spot <<= 1
spot >>= 1
#Spot is the highest power of 2 less than n
while spot:
val = val.__mul__(val, context=context)
if val._isinfinity():
val = Infsign[sign]
break
if spot & n:
val = val.__mul__(mul, context=context)
if modulo is not None:
val = val.__mod__(modulo, context=context)
spot >>= 1
context.prec = firstprec
if context._rounding_decision == ALWAYS_ROUND:
return val._fix(context)
return val
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return Decimal( (dup._sign, (0,), 0) )
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == 0:
exp += 1
end -= 1
return Decimal( (dup._sign, dup._int[:end], exp) )
def quantize(self, exp, rounding=None, context=None, watchexp=1):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return self #if both are inf, it is OK
if context is None:
context = getcontext()
return context._raise_error(InvalidOperation,
'quantize with one INF')
return self._rescale(exp._exp, rounding, context, watchexp)
def same_quantum(self, other):
"""Test whether self and other have the same exponent.
same as self._exp == other._exp, except NaN == sNaN
"""
if self._is_special or other._is_special:
if self._isnan() or other._isnan():
return self._isnan() and other._isnan() and True
if self._isinfinity() or other._isinfinity():
return self._isinfinity() and other._isinfinity() and True
return self._exp == other._exp
def _rescale(self, exp, rounding=None, context=None, watchexp=1):
"""Rescales so that the exponent is exp.
exp = exp to scale to (an integer)
rounding = rounding version
watchexp: if set (default) an error is returned if exp is greater
than Emax or less than Etiny.
"""
if context is None:
context = getcontext()
if self._is_special:
if self._isinfinity():
return context._raise_error(InvalidOperation, 'rescale with an INF')
ans = self._check_nans(context=context)
if ans:
return ans
if watchexp and (context.Emax < exp or context.Etiny() > exp):
return context._raise_error(InvalidOperation, 'rescale(a, INF)')
if not self:
ans = Decimal(self)
ans._int = (0,)
ans._exp = exp
return ans
diff = self._exp - exp
digits = len(self._int) + diff
if watchexp and digits > context.prec:
return context._raise_error(InvalidOperation, 'Rescale > prec')
tmp = Decimal(self)
tmp._int = (0,) + tmp._int
digits += 1
if digits < 0:
tmp._exp = -digits + tmp._exp
tmp._int = (0,1)
digits = 1
tmp = tmp._round(digits, rounding, context=context)
if tmp._int[0] == 0 and len(tmp._int) > 1:
tmp._int = tmp._int[1:]
tmp._exp = exp
tmp_adjusted = tmp.adjusted()
if tmp and tmp_adjusted < context.Emin:
context._raise_error(Subnormal)
elif tmp and tmp_adjusted > context.Emax:
return context._raise_error(InvalidOperation, 'rescale(a, INF)')
return tmp
def to_integral(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._exp >= 0:
return self
if context is None:
context = getcontext()
flags = context._ignore_flags(Rounded, Inexact)
ans = self._rescale(0, rounding, context=context)
context._regard_flags(flags)
return ans
def sqrt(self, context=None):
"""Return the square root of self.
Uses a converging algorithm (Xn+1 = 0.5*(Xn + self / Xn))
Should quadratically approach the right answer.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
#exponent = self._exp / 2, using round_down.
#if self._exp < 0:
# exp = (self._exp+1) // 2
#else:
exp = (self._exp) // 2
if self._sign == 1:
#sqrt(-0) = -0
return Decimal( (1, (0,), exp))
else:
return Decimal( (0, (0,), exp))
if context is None:
context = getcontext()
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
tmp = Decimal(self)
expadd = tmp._exp // 2
if tmp._exp & 1:
tmp._int += (0,)
tmp._exp = 0
else:
tmp._exp = 0
context = context._shallow_copy()
flags = context._ignore_all_flags()
firstprec = context.prec
context.prec = 3
if tmp.adjusted() & 1 == 0:
ans = Decimal( (0, (8,1,9), tmp.adjusted() - 2) )
ans = ans.__add__(tmp.__mul__(Decimal((0, (2,5,9), -2)),
context=context), context=context)
ans._exp -= 1 + tmp.adjusted() // 2
else:
ans = Decimal( (0, (2,5,9), tmp._exp + len(tmp._int)- 3) )
ans = ans.__add__(tmp.__mul__(Decimal((0, (8,1,9), -3)),
context=context), context=context)
ans._exp -= 1 + tmp.adjusted() // 2
#ans is now a linear approximation.
Emax, Emin = context.Emax, context.Emin
context.Emax, context.Emin = DefaultContext.Emax, DefaultContext.Emin
half = Decimal('0.5')
maxp = firstprec + 2
rounding = context._set_rounding(ROUND_HALF_EVEN)
while 1:
context.prec = min(2*context.prec - 2, maxp)
ans = half.__mul__(ans.__add__(tmp.__div__(ans, context=context),
context=context), context=context)
if context.prec == maxp:
break
#round to the answer's precision-- the only error can be 1 ulp.
context.prec = firstprec
prevexp = ans.adjusted()
ans = ans._round(context=context)
#Now, check if the other last digits are better.
context.prec = firstprec + 1
# In case we rounded up another digit and we should actually go lower.
if prevexp != ans.adjusted():
ans._int += (0,)
ans._exp -= 1
lower = ans.__sub__(Decimal((0, (5,), ans._exp-1)), context=context)
context._set_rounding(ROUND_UP)
if lower.__mul__(lower, context=context) > (tmp):
ans = ans.__sub__(Decimal((0, (1,), ans._exp)), context=context)
else:
upper = ans.__add__(Decimal((0, (5,), ans._exp-1)),context=context)
context._set_rounding(ROUND_DOWN)
if upper.__mul__(upper, context=context) < tmp:
ans = ans.__add__(Decimal((0, (1,), ans._exp)),context=context)
ans._exp += expadd
context.prec = firstprec
context.rounding = rounding
ans = ans._fix(context)
rounding = context._set_rounding_decision(NEVER_ROUND)
if not ans.__mul__(ans, context=context) == self:
# Only rounded/inexact if here.
context._regard_flags(flags)
context._raise_error(Rounded)
context._raise_error(Inexact)
else:
#Exact answer, so let's set the exponent right.
#if self._exp < 0:
# exp = (self._exp +1)// 2
#else:
exp = self._exp // 2
context.prec += ans._exp - exp
ans = ans._rescale(exp, context=context)
context.prec = firstprec
context._regard_flags(flags)
context.Emax, context.Emin = Emax, Emin
return ans._fix(context)
def max(self, other, context=None):
"""Returns the larger value.
like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
# if one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn != 2:
return self
if sn == 1 and on != 2:
return other
return self._check_nans(other, context)
ans = self
c = self.__cmp__(other)
if c == 0:
# if both operands are finite and equal in numerical value
# then an ordering is applied:
#
# if the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# if the signs are the same then the exponent is used to select
# the result.
if self._sign != other._sign:
if self._sign:
ans = other
elif self._exp < other._exp and not self._sign:
ans = other
elif self._exp > other._exp and self._sign:
ans = other
elif c == -1:
ans = other
if context is None:
context = getcontext()
if context._rounding_decision == ALWAYS_ROUND:
return ans._fix(context)
return ans
def min(self, other, context=None):
"""Returns the smaller value.
like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
# if one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn != 2:
return self
if sn == 1 and on != 2:
return other
return self._check_nans(other, context)
ans = self
c = self.__cmp__(other)
if c == 0:
# if both operands are finite and equal in numerical value
# then an ordering is applied:
#
# if the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# if the signs are the same then the exponent is used to select
# the result.
if self._sign != other._sign:
if other._sign:
ans = other
elif self._exp > other._exp and not self._sign:
ans = other
elif self._exp < other._exp and self._sign:
ans = other
elif c == 1:
ans = other
if context is None:
context = getcontext()
if context._rounding_decision == ALWAYS_ROUND:
return ans._fix(context)
return ans
def _isinteger(self):
"""Returns whether self is an integer"""
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == (0,)*len(rest)
def _iseven(self):
"""Returns 1 if self is even. Assumes self is an integer."""
if self._exp > 0:
return 1
return self._int[-1+self._exp] & 1 == 0
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
#If NaN or Infinity, self._exp is string
except TypeError:
return 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) == Decimal:
return self # My components are also immutable
return self.__class__(str(self))
##### Context class ###########################################
# get rounding method function:
rounding_functions = [name for name in Decimal.__dict__.keys() if name.startswith('_round_')]
for name in rounding_functions:
#name is like _round_half_even, goes to the global ROUND_HALF_EVEN value.
globalname = name[1:].upper()
val = globals()[globalname]
Decimal._pick_rounding_function[val] = name
del name, val, globalname, rounding_functions
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type. (how you round)
_rounding_decision - ALWAYS_ROUND, NEVER_ROUND -- do you round?
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is incremented.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
_clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None,
traps=None, flags=None,
_rounding_decision=None,
Emin=None, Emax=None,
capitals=None, _clamp=0,
_ignored_flags=None):
if flags is None:
flags = []
if _ignored_flags is None:
_ignored_flags = []
if not isinstance(flags, dict):
flags = dict([(s,s in flags) for s in _signals])
del s
if traps is not None and not isinstance(traps, dict):
traps = dict([(s,s in traps) for s in _signals])
del s
for name, val in locals().items():
if val is None:
setattr(self, name, _copy.copy(getattr(DefaultContext, name)))
else:
setattr(self, name, val)
del self.self
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d' % vars(self))
s.append('flags=[' + ', '.join([f.__name__ for f, v in self.flags.items() if v]) + ']')
s.append('traps=[' + ', '.join([t.__name__ for t, v in self.traps.items() if v]) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.traps, self.flags,
self._rounding_decision, self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.traps.copy(), self.flags.copy(),
self._rounding_decision, self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it increments the flag, then, if the corresponding
trap_enabler is set, it reaises the exception. Otherwise, it returns
the default value after incrementing the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
#Don't touch the flag
return error().handle(self, *args)
self.flags[error] += 1
if not self.traps[error]:
#The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
#self._ignored_flags = []
raise error, explanation
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
def __hash__(self):
"""A Context cannot be hashed."""
# We inherit object.__hash__, so we must deny this explicitly
raise TypeError, "Cannot hash a Context."
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding_decision(self, type):
"""Sets the rounding decision.
Sets the rounding decision, and returns the current (previous)
rounding decision. Often used like:
context = context._shallow_copy()
# That so you don't change the calling context
# if an error occurs in the middle (say DivisionImpossible is raised).
rounding = context._set_rounding_decision(NEVER_ROUND)
instance = instance / Decimal(2)
context._set_rounding_decision(rounding)
This will make it not round for that operation.
"""
rounding = self._rounding_decision
self._rounding_decision = type
return rounding
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context."""
d = Decimal(num, context=self)
return d._fix(self)
#Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal("2.1")
>>> ExtendedContext.abs(Decimal('-100'))
Decimal("100")
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal("101.5")
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal("101.5")
"""
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal("19.00")
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal("1.02E+4")
"""
return a.__add__(b, context=self)
def _apply(self, a):
return str(a._fix(self))
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal("-1")
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal("0")
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal("0")
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal("1")
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal("1")
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal("-1")
"""
return a.compare(b, context=self)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal("0.333333333")
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal("0.666666667")
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal("2.5")
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal("0.1")
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal("1")
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal("4.00")
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal("1.20")
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal("10")
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal("1000")
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal("1.20E+6")
"""
return a.__div__(b, context=self)
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal("0")
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal("3")
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal("3")
"""
return a.__floordiv__(b, context=self)
def divmod(self, a, b):
return a.__divmod__(b, context=self)
def max(self, a,b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal("3")
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal("3")
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal("1")
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal("7")
"""
return a.max(b, context=self)
def min(self, a,b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal("2")
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal("-10")
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal("1.0")
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal("7")
"""
return a.min(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal("-1.3")
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal("1.3")
"""
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together ('long multiplication'),
resulting in a number which may be as long as the sum of the lengths
of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal("3.60")
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal("21")
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal("0.72")
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal("-0.0")
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal("4.28135971E+11")
"""
return a.__mul__(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal("2.1")
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal("-2")
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal("1.2")
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal("-1.2E+2")
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal("1.2E+2")
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal("0")
"""
return a.normalize(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal("1.3")
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal("-1.3")
"""
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
The right-hand operand must be a whole number whose integer part (after
any exponent has been applied) has no more than 9 digits and whose
fractional part (if any) is all zeros before any rounding. The operand
may be positive, negative, or zero; if negative, the absolute value of
the power is used, and the left-hand operand is inverted (divided into
1) before use.
If the increased precision needed for the intermediate calculations
exceeds the capabilities of the implementation then an Invalid operation
condition is raised.
If, when raising to a negative power, an underflow occurs during the
division into 1, the operation is not halted at that point but
continues.
>>> ExtendedContext.power(Decimal('2'), Decimal('3'))
Decimal("8")
>>> ExtendedContext.power(Decimal('2'), Decimal('-3'))
Decimal("0.125")
>>> ExtendedContext.power(Decimal('1.7'), Decimal('8'))
Decimal("69.7575744")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('-2'))
Decimal("0")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('-1'))
Decimal("0")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('0'))
Decimal("1")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('1'))
Decimal("Infinity")
>>> ExtendedContext.power(Decimal('Infinity'), Decimal('2'))
Decimal("Infinity")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('-2'))
Decimal("0")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('-1'))
Decimal("-0")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('0'))
Decimal("1")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('1'))
Decimal("-Infinity")
>>> ExtendedContext.power(Decimal('-Infinity'), Decimal('2'))
Decimal("Infinity")
>>> ExtendedContext.power(Decimal('0'), Decimal('0'))
Decimal("NaN")
"""
return a.__pow__(b, modulo, context=self)
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded) and having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is an
error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal("2.170")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal("2.17")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal("2.2")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal("2")
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal("0E+1")
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal("-Infinity")
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal("NaN")
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal("-0")
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal("-0E+5")
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal("NaN")
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal("NaN")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal("217.0")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal("217")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal("2.2E+2")
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal("2E+2")
"""
return a.quantize(b, context=self)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded to
precision digits if necessary. The sign of the result, if non-zero, is
the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal("2.1")
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal("1")
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal("-1")
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal("0.2")
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal("0.1")
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal("1.0")
"""
return a.__mod__(b, context=self)
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal("-0.9")
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal("-2")
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal("1")
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal("-1")
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal("0.2")
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal("0.1")
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal("-0.3")
"""
return a.remainder_near(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
"""
return a.same_quantum(b)
def sqrt(self, a):
"""Returns the square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal("0")
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal("-0")
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal("0.624499800")
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal("10")
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal("1")
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal("1.0")
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal("1.0")
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal("2.64575131")
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal("3.16227766")
>>> ExtendedContext.prec
9
"""
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal("0.23")
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal("0.00")
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal("-0.77")
"""
return a.__sub__(b, context=self)
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
return a.__str__(context=self)
def to_integral(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral(Decimal('2.1'))
Decimal("2")
>>> ExtendedContext.to_integral(Decimal('100'))
Decimal("100")
>>> ExtendedContext.to_integral(Decimal('100.0'))
Decimal("100")
>>> ExtendedContext.to_integral(Decimal('101.5'))
Decimal("102")
>>> ExtendedContext.to_integral(Decimal('-101.5'))
Decimal("-102")
>>> ExtendedContext.to_integral(Decimal('10E+5'))
Decimal("1.0E+6")
>>> ExtendedContext.to_integral(Decimal('7.89E+77'))
Decimal("7.89E+77")
>>> ExtendedContext.to_integral(Decimal('-Inf'))
Decimal("-Infinity")
"""
return a.to_integral(context=self)
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int or long
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
cum = 0
for digit in value._int:
cum = cum * 10 + digit
self.int = cum
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, shouldround = 0, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
# Yes, the exponent is a long, but the difference between exponents
# must be an int-- otherwise you'd get a big memory problem.
numdigits = int(op1.exp - op2.exp)
if numdigits < 0:
numdigits = -numdigits
tmp = op2
other = op1
else:
tmp = op1
other = op2
if shouldround and numdigits > prec + 1:
# Big difference in exponents - check the adjusted exponents
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
if numdigits > (other_len + prec + 1 - tmp_len):
# If the difference in adjusted exps is > prec+1, we know
# other is insignificant, so might as well put a 1 after the precision.
# (since this is only for addition.) Also stops use of massive longs.
extend = prec + 2 - tmp_len
if extend <= 0:
extend = 1
tmp.int *= 10 ** extend
tmp.exp -= extend
other.int = 1
other.exp = tmp.exp
return op1, op2
tmp.int *= 10 ** numdigits
tmp.exp -= numdigits
return op1, op2
def _adjust_coefficients(op1, op2):
"""Adjust op1, op2 so that op2.int * 10 > op1.int >= op2.int.
Returns the adjusted op1, op2 as well as the change in op1.exp-op2.exp.
Used on _WorkRep instances during division.
"""
adjust = 0
#If op1 is smaller, make it larger
while op2.int > op1.int:
op1.int *= 10
op1.exp -= 1
adjust += 1
#If op2 is too small, make it larger
while op1.int >= (10 * op2.int):
op2.int *= 10
op2.exp -= 1
adjust -= 1
return op1, op2, adjust
##### Helper Functions ########################################
def _convert_other(other):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
"""
if isinstance(other, Decimal):
return other
if isinstance(other, (int, long)):
return Decimal(other)
return NotImplemented
_infinity_map = {
'inf' : 1,
'infinity' : 1,
'+inf' : 1,
'+infinity' : 1,
'-inf' : -1,
'-infinity' : -1
}
def _isinfinity(num):
"""Determines whether a string or float is infinity.
+1 for negative infinity; 0 for finite ; +1 for positive infinity
"""
num = str(num).lower()
return _infinity_map.get(num, 0)
def _isnan(num):
"""Determines whether a string or float is NaN
(1, sign, diagnostic info as string) => NaN
(2, sign, diagnostic info as string) => sNaN
0 => not a NaN
"""
num = str(num).lower()
if not num:
return 0
#get the sign, get rid of trailing [+-]
sign = 0
if num[0] == '+':
num = num[1:]
elif num[0] == '-': #elif avoids '+-nan'
num = num[1:]
sign = 1
if num.startswith('nan'):
if len(num) > 3 and not num[3:].isdigit(): #diagnostic info
return 0
return (1, sign, num[3:].lstrip('0'))
if num.startswith('snan'):
if len(num) > 4 and not num[4:].isdigit():
return 0
return (2, sign, num[4:].lstrip('0'))
return 0
##### Setup Specific Contexts ################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=28, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
_rounding_decision=ALWAYS_ROUND,
Emax=999999999,
Emin=-999999999,
capitals=1
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### Useful Constants (internal use only) ####################
#Reusable defaults
Inf = Decimal('Inf')
negInf = Decimal('-Inf')
#Infsign[sign] is infinity w/ that sign
Infsign = (Inf, negInf)
NaN = Decimal('NaN')
##### crud for parsing strings #################################
import re
# There's an optional sign at the start, and an optional exponent
# at the end. The exponent has an optional sign and at least one
# digit. In between, must have either at least one digit followed
# by an optional fraction, or a decimal point followed by at least
# one digit. Yuck.
_parser = re.compile(r"""
# \s*
(?P<sign>[-+])?
(
(?P<int>\d+) (\. (?P<frac>\d*))?
|
\. (?P<onlyfrac>\d+)
)
([eE](?P<exp>[-+]? \d+))?
# \s*
$
""", re.VERBOSE).match #Uncomment the \s* to allow leading or trailing spaces.
del re
# return sign, n, p s.t. float string value == -1**sign * n * 10**p exactly
def _string2exact(s):
m = _parser(s)
if m is None:
raise ValueError("invalid literal for Decimal: %r" % s)
if m.group('sign') == "-":
sign = 1
else:
sign = 0
exp = m.group('exp')
if exp is None:
exp = 0
else:
exp = int(exp)
intpart = m.group('int')
if intpart is None:
intpart = ""
fracpart = m.group('onlyfrac')
else:
fracpart = m.group('frac')
if fracpart is None:
fracpart = ""
exp -= len(fracpart)
mantissa = intpart + fracpart
tmp = map(int, mantissa)
backup = tmp
while tmp and tmp[0] == 0:
del tmp[0]
# It's a zero
if not tmp:
if backup:
return (sign, tuple(backup), exp)
return (sign, (0,), exp)
mantissa = tuple(tmp)
return (sign, mantissa, exp)
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
apache-2.0
| 4,002,812,287,032,486,400
| 33.245859
| 130
| 0.536214
| false
|
garyd203/flying-circus
|
src/flyingcircus/_raw/budgets.py
|
1
|
1096
|
"""Raw representations of every data type in the AWS Budgets service.
See Also:
`AWS developer guide for Budgets
<https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/>`_
This file is automatically generated, and should not be directly edited.
"""
from attr import attrib
from attr import attrs
from ..core import ATTRSCONFIG
from ..core import Resource
from ..core import ResourceProperties
from ..core import create_object_converter
__all__ = ["Budget", "BudgetProperties"]
@attrs(**ATTRSCONFIG)
class BudgetProperties(ResourceProperties):
Budget = attrib(default=None)
NotificationsWithSubscribers = attrib(default=None)
@attrs(**ATTRSCONFIG)
class Budget(Resource):
"""A Budget for Budgets.
See Also:
`AWS Cloud Formation documentation for Budget
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-budgets-budget.html>`_
"""
RESOURCE_TYPE = "AWS::Budgets::Budget"
Properties: BudgetProperties = attrib(
factory=BudgetProperties, converter=create_object_converter(BudgetProperties)
)
|
lgpl-3.0
| -4,699,627,189,435,388,000
| 26.4
| 106
| 0.737226
| false
|
acmxrds/summer-2016
|
helloworld/MNIST2Conv_Stat_Collect.py
|
1
|
11236
|
# coding=utf-8
__author__ = 'Abhineet Saxena'
"""
The Code for the ACM XRDS Hello World! column collects summary statistics for thethe CNN architecture constructed
from the architecture detailed at Google TensorFlow MNIST Expert tutorial:
https://www.tensorflow.org/versions/r0.7/tutorials/mnist/pros/index.html
Note:
The summary collection ops for most of the layers (Conv. Layer 1, Conv. Layer 2 and Softmax Layer) have
been commented out owing to a significant computation load that is entailed by the CPU for handling the summary
collection for all the layers at once. It can cripplingly slow down the machine while the file is in execution.
If you have a much better computing architecture than the one I use, you can certainly try running all ops at once:
My Configuration:
~~~~~~~~~~~~~~~~~~~~~~~~~~~
> Model Name: Intel(R) Core(TM) i5-4210U CPU @ 1.70GHz
> No. of Processors: 3
> No. of CPU cores: 2
> Cache Size: 3072 KB
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Instructions For Running TensorFlow:
For running the Tensorboard program and visualizing the statistics, anyone of the following commands needs to
be entered at the terminal and run:
>> tensorboard --logdir='/path/to/mnist_logs folder'
or
>> python tensorflow/tensorboard/tensorboard.py --logdir='path/to/mnist_logs folder'
(Replace the string after the ‘=’ sign above with the actual path to the folder, without the single quotes.)
Thereafter, the TensorBoard panel can then be accessed by visiting the following URL in any of your browsers.
http://0.0.0.0:6006/
"""
# The Imports
import tensorflow as tf
# We make use of the script provided by the TensorFlow team for reading-in and processing the data.
import input_data as inpt_d
# ##Function Declarations
def weight_variable(shape, arg_name=None):
"""
A method that returns a tf.Variable initialised with values drawn from a normal distribution.
:param shape: The shape of the desired output.
:return: tf.Variable
"""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=arg_name)
def bias_variable(shape, arg_name=None):
"""
A method that creates a constant Tensor with the specified shape and a constant value of 0.1.
The bias value must be slightly positive to prevent neurons from becoming unresponsive or dead.
:param shape: The shape of the desired output.
:return: tf.Variable
"""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=arg_name)
def conv2d(xvar, Wt_var, stride_arg=(1, 1, 1, 1), pad_arg='SAME'):
"""
Returns the Activation Map obtained by convolving the Weight matrix with the input matrix.
:param xvar: The Neural Input Matrix.
:param Wt_var: The Weight Matrix.
:param stride_arg: The Stride value, specified as a tuple.
:param pad_arg: The Padding Value. Can either be 'VALID' (padding disabled) or 'SAME' (padding-enabled).
:return: The Activation Map or the Output Volume.
"""
return tf.nn.conv2d(xvar, Wt_var, strides=[sval for sval in stride_arg], padding=pad_arg)
def max_pool_2x2(xvar):
"""
Performs the max-pooling operation. Here, a default window size of 2x2 and stride values of (2, 2) is assumed.
:param xvar: The Input Volume to be max-pooled.
:return: Teh max-pooled output.
"""
return tf.nn.max_pool(xvar, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Creating a Graph
new_graph = tf.Graph()
# Setting the Graph as the default Graph.
with new_graph.as_default():
# Instantiating an Interactive Session.
sess = tf.InteractiveSession()
# Placeholder for the Input image data.
xvar = tf.placeholder("float", shape=[None, 784], name="Input_Image")
# Placeholder for the Input image label.
y_var = tf.placeholder("float", shape=[None, 10], name="Input_Image_Label")
# Setting up the variable that receives the processed MNIST dataset.
mnist_data = inpt_d.read_data_sets('MNIST_data', one_hot=True)
# ######The First Convolutional Layer #######
# The Weight Matrix for the First Conv. Layer [28x28x32]. R=5, S=1, K=32 and P=2, The Input Channels: 1.
# It has been named for use in collecting stats.
Wt_mat_layer1 = weight_variable([5, 5, 1, 32], arg_name="Weights_Conv_Layer_1")
# The Bias vector for the first Conv. Layer instantiated.
bias_vec_layer1 = bias_variable([32], arg_name="Bias_Conv_Layer_1")
# Reshapes the Image_Input into it's 28x28 matrix form. -1 implies flattening the image along the first dimension.
x_image = tf.reshape(xvar, [-1, 28, 28, 1])
# Convolution operation performed with scope as Conv_Layer_1 to aid visualization.
with tf.name_scope("Conv_Layer_1") as scope_cv1:
output_conv1 = tf.nn.relu(conv2d(x_image, Wt_mat_layer1) + bias_vec_layer1)
pool_out_conv1 = max_pool_2x2(output_conv1)
# Setting up the summary ops to collect the Weights, Bias and pool activation outputs.
# Uncomment the following 3 lines for logging the outputs to summary op.
# Wt_Cv1_summ = tf.histogram_summary("Conv1_Weights", Wt_mat_layer1)
# Bs_Cv1_summ = tf.histogram_summary("Conv1_Bias", bias_vec_layer1)
# Amap_Cv1_summ = tf.histogram_summary("Acivation_Max-Pooled_Output_Conv1", pool_out_conv1)
# ######The Second Convolutional Layer #######
# Instantiates the Weight Matrix defined per neuron for the second Conv. Layer [14x14x64]. R=5, K=64, S=1, P=2.
# The Input channels: 32.
Wt_mat_layer2 = weight_variable([5, 5, 32, 64], arg_name="Weights_Conv_Layer_2")
bias_vec_layer2 = bias_variable([64], arg_name="Bias_Conv_Layer_2")
# Operation of the second Conv. layer. Input has been padded (default).
with tf.name_scope("Conv_Layer_2") as scope_cv2:
output_conv2 = tf.nn.relu(conv2d(pool_out_conv1, Wt_mat_layer2) + bias_vec_layer2)
pool_out_conv2 = max_pool_2x2(output_conv2)
# Setting up the summary ops to collect the Weights, Bias and pool activation outputs.
# Uncomment the following 3 lines for logging the outputs to summary op.
# Wt_Cv2_summ = tf.histogram_summary("Conv2_Weights", Wt_mat_layer2)
# Bs_Cv2_summ = tf.histogram_summary("Conv2_Bias", bias_vec_layer2)
# Amap_Cv2_summ = tf.histogram_summary("Acivation_Max-Pooled_Output_Conv2", pool_out_conv2)
# ######The First Fully Connected Layer #######
# Weights initialised for the first fully connected layer. The FC layer has 1024 neurons.
# The Output Volume from the previous layer has the structure 7x7x64.
Wt_fc_layer1 = weight_variable([7 * 7 * 64, 1024], arg_name="Weights_FC_Layer")
# Bias vector for the fully connected layer.
bias_fc1 = bias_variable([1024], arg_name="Bias_FC_Layer")
# The output matrix from 2nd Conv. layer reshaped to make it conducive to matrix multiply.
# -1 implies flattening the Tensor matrix along the first dimension.
pool_out_conv2_flat = tf.reshape(pool_out_conv2, [-1, 7*7*64])
with tf.name_scope("FC_Layer") as scope_fc:
output_fc1 = tf.nn.relu(tf.matmul(pool_out_conv2_flat, Wt_fc_layer1) + bias_fc1)
# Setting up the summary ops to collect the Weights, Bias and pool activation outputs.
Wt_FC_summ = tf.histogram_summary("FC_Weights", Wt_fc_layer1)
Bs_FC_summ = tf.histogram_summary("FC_Bias", bias_fc1)
Amap_FC_summ = tf.histogram_summary("Acivations_FC", output_fc1)
# ##### Dropout #######
# Placeholder for the Dropout probability.
keep_prob = tf.placeholder("float", name="Dropout_Probability")
# Performs the dropout op, where certain neurons are randomly disconnected and their outputs not considered.
with tf.name_scope("CNN_Dropout_Op") as scope_dropout:
h_fc1_drop = tf.nn.dropout(output_fc1, keep_prob)
# ##### SoftMax-Regression #######
W_fc2 = weight_variable([1024, 10], arg_name="Softmax_Reg_Weights")
b_fc2 = bias_variable([10], arg_name="Softmax_Reg_Bias")
# Performs the Softmax Regression op, computes the softmax probabilities assigned to each class.
with tf.name_scope("Softmax_Regression") as scope_softmax:
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# Setting up the summary ops to collect the Weights, Bias and pool activation outputs.
# Uncomment the following 3 lines for logging the outputs to summary op.
# Wt_softmax_summ = tf.histogram_summary("Sfmax_Weights", Wt_mat_layer2)
# Bs_softmax_summ = tf.histogram_summary("Sfmax_Bias", bias_vec_layer2)
# Amap_softmax_summ = tf.histogram_summary("Acivations_Sfmax", y_conv)
# Cross-Entropy calculated.
with tf.name_scope("X_Entropy") as scope_xentrop:
cross_entropy = -tf.reduce_sum(y_var*tf.log(y_conv))
# Adding the scalar summary operation for capturing the cross-entropy.
ce_summ = tf.scalar_summary("Cross_Entropy", cross_entropy)
# Adam Optimizer gives the best performance among Gradient Descent Optimizers.
with tf.name_scope("Train") as scope_train:
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Calculating the Correct Prediction value.
with tf.name_scope("Test") as scope_test:
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_var, 1))
# The Bool tensor is converted or type-casted into float representation (1.s and 0s) and the mean for all the
# values is calculated.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Adding the scalar summary operation for capturing the Accuracy.
acc_summ = tf.scalar_summary("Accuracy", accuracy)
# Adds the ops to the Graph that perform Variable initializations.
# Merge all the summaries and write them out to /tmp/mnist_logs
merged = tf.merge_all_summaries()
summ_writer = tf.train.SummaryWriter("./mnist_logs", sess.graph_def)
sess.run(tf.initialize_all_variables())
# Training for 2000 iterations or Epochs.
for i in range(2000):
if i % 100 == 0:
# Feeds the feed_dict dictionary with values from the test set.
feed = {xvar: mnist_data.test.images, y_var: mnist_data.test.labels, keep_prob: 1.0}
# The run method executes both the ops, i.e. 'merged' for merging the summaries and writing them
# and the 'accuracy' op. for calculating the accuracy over the test set. Both are executed every
# 100th iteration.
result = sess.run([merged, accuracy], feed_dict=feed)
# Summary string output obtained after the execution of 'merged' op.
summary_str = result[0]
# Accuracy value output obtained after the execution of 'accuracy' op.
acc = result[1]
# Adding the summary string and writing the output to the log-directory.
summ_writer.add_summary(summary_str, i)
print("Accuracy at step %s: %s" % (i, acc))
else:
# Returns the next 50 images and their labels from the training set.
batch = mnist_data.train.next_batch(50)
# Train the CNN with the dropout probability of neurons being 0.5 for every iteration.
train_step.run(feed_dict={xvar: batch[0], y_var: batch[1], keep_prob: 0.5})
|
gpl-3.0
| 4,570,019,302,829,004,000
| 48.048035
| 118
| 0.689548
| false
|
pyfa-org/eos
|
tests/integration/stats/slot/test_launcher.py
|
1
|
5726
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos import EffectMode
from eos import ModuleHigh
from eos import Ship
from eos.const.eos import ModAffecteeFilter
from eos.const.eos import ModDomain
from eos.const.eos import ModOperator
from eos.const.eve import AttrId
from eos.const.eve import EffectCategoryId
from eos.const.eve import EffectId
from tests.integration.stats.testcase import StatsTestCase
class TestLauncherSlot(StatsTestCase):
def setUp(self):
StatsTestCase.setUp(self)
self.mkattr(attr_id=AttrId.launcher_slots_left)
self.effect = self.mkeffect(
effect_id=EffectId.launcher_fitted,
category_id=EffectCategoryId.passive)
def test_output(self):
# Check that modified attribute of ship is used
src_attr = self.mkattr()
modifier = self.mkmod(
affectee_filter=ModAffecteeFilter.item,
affectee_domain=ModDomain.self,
affectee_attr_id=AttrId.launcher_slots_left,
operator=ModOperator.post_mul,
affector_attr_id=src_attr.id)
mod_effect = self.mkeffect(
category_id=EffectCategoryId.passive,
modifiers=[modifier])
self.fit.ship = Ship(self.mktype(
attrs={AttrId.launcher_slots_left: 3, src_attr.id: 2},
effects=[mod_effect]).id)
# Verification
self.assertEqual(self.fit.stats.launcher_slots.total, 6)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_output_ship_absent(self):
# Verification
self.assertEqual(self.fit.stats.launcher_slots.total, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_output_ship_attr_absent(self):
self.fit.ship = Ship(self.mktype().id)
# Verification
self.assertEqual(self.fit.stats.launcher_slots.total, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_output_ship_not_loaded(self):
self.fit.ship = Ship(self.allocate_type_id())
# Verification
self.assertEqual(self.fit.stats.launcher_slots.total, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_multiple(self):
self.fit.modules.high.append(
ModuleHigh(self.mktype(effects=[self.effect]).id))
self.fit.modules.high.append(
ModuleHigh(self.mktype(effects=[self.effect]).id))
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_multiple_with_none(self):
self.fit.modules.high.place(
1, ModuleHigh(self.mktype(effects=[self.effect]).id))
self.fit.modules.high.place(
3, ModuleHigh(self.mktype(effects=[self.effect]).id))
# Verification
# Positions do not matter
self.assertEqual(self.fit.stats.launcher_slots.used, 2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_item_effect_absent(self):
item1 = ModuleHigh(self.mktype(effects=[self.effect]).id)
item2 = ModuleHigh(self.mktype().id)
self.fit.modules.high.append(item1)
self.fit.modules.high.append(item2)
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 1)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_item_effect_disabled(self):
item1 = ModuleHigh(self.mktype(effects=[self.effect]).id)
item2 = ModuleHigh(self.mktype(effects=[self.effect]).id)
item2.set_effect_mode(self.effect.id, EffectMode.force_stop)
self.fit.modules.high.append(item1)
self.fit.modules.high.append(item2)
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 1)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_item_absent(self):
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_use_item_not_loaded(self):
self.fit.modules.high.append(ModuleHigh(self.allocate_type_id()))
# Verification
self.assertEqual(self.fit.stats.launcher_slots.used, 0)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
|
lgpl-3.0
| -3,366,892,227,183,581,000
| 38.219178
| 80
| 0.649668
| false
|
ednapiranha/refrig
|
posts/templatetags/render_post.py
|
1
|
3168
|
from django.http import *
from django import template
from django.template.defaultfilters import stringfilter
from django.template import RequestContext
from mongoengine import *
from profile.models import Profile
from posts.models import Post, ImagePost, TextPost, LinkPost
from profile.views import *
import tweepy
from urlparse import urlparse
register = template.Library()
@register.filter
def generate_post(value, post):
# determine which output to generate based on the post type
if isinstance(post, ImagePost):
media = '<img src="'+str(post.description)+'" alt="'+str(post.description)+'" />'
elif isinstance(post, LinkPost):
# if there is text in the link, try to grab what looks like the link
link = str(post.description)
for text_item in post.description.split(' '):
if 'http' in text_item:
link = text_item
media = '<a href="'+link+'" target="_blank">'+post.description+'</a>'
elif isinstance(post, VideoPost):
url = urlparse(post.description)
if post.description.lower().find('vimeo') > -1:
media = '<iframe src="http://player.vimeo.com/video/'+str(url.path.strip('/'))+'?wmode=transparent" width="70%" height="300"></iframe>'
elif post.description.lower().find('youtube') > -1:
media = '<iframe class="youtube-player" type="text/html" width="70%" height="300" src="http://youtube.com/embed/'+str(url.query.split('v=')[1].split('&')[0])+'"></iframe>'
elif isinstance(post, AudioPost):
if post.description.endswith('mp3'):
audio_type = 'audio/mpeg'
else:
audio_type = 'audio/ogg'
media = '<audio controls="controls" preload="auto"><source src="'+post.description+'" type="'+audio_type+'" /></audio><p><a href="'+post.description+'">'+post.description+'</a></p>'
else:
media = '<p>'+post.description+'</p>'
return media
@register.filter
def generate_tags(value, post):
# generate tags output from list
tag_list = post.tags
tags = ''
for tag in tag_list:
if len(tag) > 0:
tags += '<a href="/tagged/'+tag+'">'+tag+'</a> '
return tags
@register.filter
def generate_meta_response(value, post):
# output the original author if it exists
result = ''
if post.original_author:
repost_count = str(Post.objects(original_id=post.original_id,original_author=post.original_author).count())
result += '<span class="repost_count">'+repost_count+'</span> <span class="repost_info">Originally posted by <a href="/user/'+str(post.original_author.id)+'">'+post.original_author.full_name+'</a></span>'
return result
@register.filter
def post_by_your_tag(user, tag):
# has the user tagged with this tag?
post = Post.objects(tags=tag.name, author=user).first()
if post:
return "you tagged a post with this"
return ""
@register.filter
def post_by_follower_tag(user, tag):
# has the user tagged with this tag?
post = Post.objects(tags=tag.name, author__in=user.follows).first()
if post:
return "someone you follow tagged a post with this"
return ""
|
bsd-3-clause
| 7,500,112,006,488,823,000
| 39.628205
| 212
| 0.645518
| false
|
montoyjh/pymatgen
|
pymatgen/io/feff/inputs.py
|
1
|
32751
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
import warnings
from operator import itemgetter
from tabulate import tabulate
import numpy as np
from monty.io import zopen
from monty.json import MSONable
from pymatgen import Structure, Lattice, Element, Molecule
from pymatgen.io.cif import CifParser
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.io_utils import clean_lines
from pymatgen.util.string import str_delimited
"""
This module defines classes for reading/manipulating/writing the main sections
of FEFF input file(feff.inp), namely HEADER, ATOMS, POTENTIAL and the program
control tags.
XANES and EXAFS input files, are available, for non-spin case at this time.
"""
__author__ = "Alan Dozier, Kiran Mathew"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "adozier@uky.edu"
__status__ = "Beta"
__date__ = "April 7, 2013"
# **Non-exhaustive** list of valid Feff.inp tags
VALID_FEFF_TAGS = ("CONTROL", "PRINT", "ATOMS", "POTENTIALS", "RECIPROCAL",
"REAL", "MARKER", "LATTICE", "TITLE", "RMULTIPLIER",
"SGROUP", "COORDINATES", "EQUIVALENCE", "CIF", "CGRID",
"CFAVERAGE", "OVERLAP", "EXAFS", "XANES", "ELNES", "EXELFS",
"LDOS", "ELLIPTICITY", "MULTIPOLE", "POLARIZATION",
"RHOZZP", "DANES", "FPRIME", "NRIXS", "XES", "XNCD",
"XMCD", "XNCDCONTROL", "END", "KMESH", "PRINT", "EGRID",
"DIMS", "AFOLP", "EDGE", "COMPTON", "DANES",
"FPRIME" "MDFF", "HOLE", "COREHOLE", "S02", "CHBROAD",
"EXCHANGE", "FOLP", "NOHOLE", "RGRID", "SCF",
"UNFREEZEF", "CHSHIFT", "DEBYE",
"INTERSTITIAL", "CHWIDTH", "EGAP", "EPS0", "EXTPOT",
"ION", "JUMPRM", "EXPOT", "SPIN", "LJMAX", "LDEC", "MPSE",
"PLASMON", "RPHASES", "RSIGMA", "PMBSE", "TDLDA", "FMS",
"DEBYA", "OPCONS", "PREP", "RESTART", "SCREEN", "SETE",
"STRFACTORS", "BANDSTRUCTURE", "RPATH", "NLEG", "PCRITERIA",
"SYMMETRY", "SS", "CRITERIA", "IORDER", "NSTAR", "ABSOLUTE",
"CORRECTIONS", "SIG2", "SIG3", "MBCONV", "SFCONV", "RCONV",
"SELF", "SFSE", "MAGIC", "TARGET", "STRFAC")
class Header(MSONable):
"""
Creates Header for the FEFF input file.
Has the following format::
* This feff.inp file generated by pymatgen, www.materialsproject.org
TITLE comment:
TITLE Source: CoO19128.cif
TITLE Structure Summary: (Co2 O2)
TITLE Reduced formula: CoO
TITLE space group: P1, space number: 1
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.0 90.0 120.0
TITLE sites: 4
* 1 Co 0.666666 0.333332 0.496324
* 2 Co 0.333333 0.666667 0.996324
* 3 O 0.666666 0.333332 0.878676
* 4 O 0.333333 0.666667 0.378675
Args:
struct: Structure object, See pymatgen.core.structure.Structure.
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: Comment for first header line
"""
def __init__(self, struct, source='', comment=''):
if struct.is_ordered:
self.struct = struct
self.source = source
sym = SpacegroupAnalyzer(struct)
data = sym.get_symmetry_dataset()
self.space_number = data["number"]
self.space_group = data["international"]
self.comment = comment or "None given"
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
@staticmethod
def from_cif_file(cif_file, source='', comment=''):
"""
Static method to create Header object from cif_file
Args:
cif_file: cif_file path and name
source: User supplied identifier, i.e. for Materials Project this
would be the material ID number
comment: User comment that goes in header
Returns:
Header Object
"""
r = CifParser(cif_file)
structure = r.get_structures()[0]
return Header(structure, source, comment)
@property
def structure_symmetry(self):
"""
Returns space number and space group
Returns:
Space number and space group list
"""
return self.space_group, self.space_number
@property
def formula(self):
"""
Formula of structure
"""
return self.struct.composition.formula
@staticmethod
def from_file(filename):
"""
Returns Header object from file
"""
hs = Header.header_string_from_file(filename)
return Header.from_string(hs)
@staticmethod
def header_string_from_file(filename='feff.inp'):
"""
Reads Header string from either a HEADER file or feff.inp file
Will also read a header from a non-pymatgen generated feff.inp file
Args:
filename: File name containing the Header data.
Returns:
Reads header string.
"""
with zopen(filename, "r") as fobject:
f = fobject.readlines()
feff_header_str = []
ln = 0
# Checks to see if generated by pymatgen
try:
feffpmg = f[0].find("pymatgen")
except IndexError:
feffpmg = False
# Reads pymatgen generated header or feff.inp file
if feffpmg:
nsites = int(f[8].split()[2])
for line in f:
ln += 1
if ln <= nsites + 9:
feff_header_str.append(line)
else:
# Reads header from header from feff.inp file from unknown
# source
end = 0
for line in f:
if (line[0] == "*" or line[0] == "T") and end == 0:
feff_header_str.append(line.replace("\r", ""))
else:
end = 1
return ''.join(feff_header_str)
@staticmethod
def from_string(header_str):
"""
Reads Header string and returns Header object if header was
generated by pymatgen.
Note: Checks to see if generated by pymatgen, if not it is impossible
to generate structure object so it is not possible to generate
header object and routine ends
Args:
header_str: pymatgen generated feff.inp header
Returns:
Structure object.
"""
lines = tuple(clean_lines(header_str.split("\n"), False))
comment1 = lines[0]
feffpmg = comment1.find("pymatgen")
if feffpmg:
comment2 = ' '.join(lines[1].split()[2:])
source = ' '.join(lines[2].split()[2:])
basis_vec = lines[6].split(":")[-1].split()
# a, b, c
a = float(basis_vec[0])
b = float(basis_vec[1])
c = float(basis_vec[2])
lengths = [a, b, c]
# alpha, beta, gamma
basis_ang = lines[7].split(":")[-1].split()
alpha = float(basis_ang[0])
beta = float(basis_ang[1])
gamma = float(basis_ang[2])
angles = [alpha, beta, gamma]
lattice = Lattice.from_lengths_and_angles(lengths, angles)
natoms = int(lines[8].split(":")[-1].split()[0])
atomic_symbols = []
for i in range(9, 9 + natoms):
atomic_symbols.append(lines[i].split()[2])
# read the atomic coordinates
coords = []
for i in range(natoms):
toks = lines[i + 9].split()
coords.append([float(s) for s in toks[3:]])
struct = Structure(lattice, atomic_symbols, coords, False,
False, False)
h = Header(struct, source, comment2)
return h
else:
return "Header not generated by pymatgen, cannot return header object"
def __str__(self):
"""
String representation of Header.
"""
to_s = lambda x: "%0.6f" % x
output = ["* This FEFF.inp file generated by pymatgen",
''.join(["TITLE comment: ", self.comment]),
''.join(["TITLE Source: ", self.source]),
"TITLE Structure Summary: {}"
.format(self.struct.composition.formula),
"TITLE Reduced formula: {}"
.format(self.struct.composition.reduced_formula),
"TITLE space group: ({}), space number: ({})"
.format(self.space_group, self.space_number),
"TITLE abc:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.abc])),
"TITLE angles:{}".format(" ".join(
[to_s(i).rjust(10) for i in self.struct.lattice.angles])),
"TITLE sites: {}".format(self.struct.num_sites)]
for i, site in enumerate(self.struct):
output.append(" ".join(["*", str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(output)
def write_file(self, filename='HEADER'):
"""
Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk
"""
with open(filename, "w") as f:
f.write(str(self) + "\n")
class Atoms(MSONable):
"""
Atomic cluster centered around the absorbing atom.
"""
def __init__(self, struct, absorbing_atom, radius):
"""
Args:
struct (Structure): input structure
absorbing_atom (str/int): Symbol for absorbing atom or site index
radius (float): radius of the atom cluster in Angstroms.
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, self.center_index = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
self.radius = radius
self._cluster = self._set_cluster()
def _set_cluster(self):
"""
Compute and set the cluster of atoms as a Molecule object. The siteato
coordinates are translated such that the absorbing atom(aka central
atom) is at the origin.
Returns:
Molecule
"""
center = self.struct[self.center_index].coords
sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)
symbols = [self.absorbing_atom]
coords = [[0, 0, 0]]
for i, site_dist in enumerate(sphere):
site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string)
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
return Molecule(symbols, coords)
@property
def cluster(self):
"""
Returns the atomic cluster as a Molecule object.
"""
return self._cluster
@staticmethod
def atoms_string_from_file(filename):
"""
Reads atomic shells from file such as feff.inp or ATOMS file
The lines are arranged as follows:
x y z ipot Atom Symbol Distance Number
with distance being the shell radius and ipot an integer identifying
the potential used.
Args:
filename: File name containing atomic coord data.
Returns:
Atoms string.
"""
with zopen(filename, "rt") as fobject:
f = fobject.readlines()
coords = 0
atoms_str = []
for line in f:
if coords == 0:
find_atoms = line.find("ATOMS")
if find_atoms >= 0:
coords = 1
if coords == 1 and not ("END" in line):
atoms_str.append(line.replace("\r", ""))
return ''.join(atoms_str)
@staticmethod
def cluster_from_file(filename):
"""
Parse the feff input file and return the atomic cluster as a Molecule
object.
Args:
filename (str): path the feff input file
Returns:
Molecule: the atomic cluster as Molecule object. The absorbing atom
is the one at the origin.
"""
atoms_string = Atoms.atoms_string_from_file(filename)
line_list = [l.split() for l in atoms_string.splitlines()[3:]]
coords = []
symbols = []
for l in line_list:
if l:
coords.append([float(i) for i in l[:3]])
symbols.append(l[4])
return Molecule(symbols, coords)
def get_lines(self):
"""
Returns a list of string representations of the atomic configuration
information(x, y, z, ipot, atom_symbol, distance, id).
Returns:
list: list of strings, sorted by the distance from the absorbing
atom.
"""
lines = [["{:f}".format(self._cluster[0].x),
"{:f}".format(self._cluster[0].y),
"{:f}".format(self._cluster[0].z),
0, self.absorbing_atom, "0.0", 0]]
for i, site in enumerate(self._cluster[1:]):
site_symbol = re.sub(r"[^aA-zZ]+", "", site.species_string)
ipot = self.pot_dict[site_symbol]
lines.append(["{:f}".format(site.x), "{:f}".format(site.y),
"{:f}".format(site.z), ipot, site_symbol,
"{:f}".format(self._cluster.get_distance(0, i + 1)), i + 1])
return sorted(lines, key=itemgetter(5))
def __str__(self):
"""
String representation of Atoms file.
"""
lines_sorted = self.get_lines()
# TODO: remove the formatting and update the unittests
lines_formatted = str(tabulate(lines_sorted,
headers=["* x", "y", "z", "ipot",
"Atom", "Distance", "Number"]))
atom_list = lines_formatted.replace("--", "**")
return ''.join(["ATOMS\n", atom_list, "\nEND\n"])
def write_file(self, filename='ATOMS'):
"""
Write Atoms list to file.
Args:
filename: path for file to be written
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Tags(dict):
"""
FEFF control parameters.
"""
def __init__(self, params=None):
"""
Args:
params: A set of input parameters as a dictionary.
"""
super(Tags, self).__init__()
if params:
self.update(params)
def __setitem__(self, key, val):
"""
Add parameter-val pair. Warns if parameter is not in list of valid
Feff tags. Also cleans the parameter and val by stripping leading and
trailing white spaces.
Arg:
key: dict key value
value: value associated with key in dictionary
"""
if key.strip().upper() not in VALID_FEFF_TAGS:
warnings.warn(key.strip() + " not in VALID_FEFF_TAGS list")
super(Tags, self).__setitem__(key.strip(),
Tags.proc_val(key.strip(), val.strip())
if isinstance(val, str) else val)
def as_dict(self):
"""
Dict representation.
Returns:
Dictionary of parameters from fefftags object
"""
tags_dict = dict(self)
tags_dict['@module'] = self.__class__.__module__
tags_dict['@class'] = self.__class__.__name__
return tags_dict
@staticmethod
def from_dict(d):
"""
Creates Tags object from a dictionary.
Args:
d: Dict of feff parameters and values.
Returns:
Tags object
"""
i = Tags()
for k, v in d.items():
if k not in ("@module", "@class"):
i[k] = v
return i
def get_string(self, sort_keys=False, pretty=False):
"""
Returns a string representation of the Tags. The reason why this
method is different from the __str__ method is to provide options
for pretty printing.
Args:
sort_keys: Set to True to sort the Feff parameters alphabetically.
Defaults to False.
pretty: Set to True for pretty aligned output. Defaults to False.
Returns:
String representation of Tags.
"""
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if isinstance(self[k], dict):
if k in ["ELNES", "EXELFS"]:
lines.append([k, self._stringify_val(self[k]["ENERGY"])])
beam_energy = self._stringify_val(self[k]["BEAM_ENERGY"])
beam_energy_list = beam_energy.split()
if int(beam_energy_list[1]) == 0: # aver=0, specific beam direction
lines.append([beam_energy])
lines.append([self._stringify_val(self[k]["BEAM_DIRECTION"])])
else:
# no cross terms for orientation averaged spectrum
beam_energy_list[2] = str(0)
lines.append([self._stringify_val(beam_energy_list)])
lines.append([self._stringify_val(self[k]["ANGLES"])])
lines.append([self._stringify_val(self[k]["MESH"])])
lines.append([self._stringify_val(self[k]["POSITION"])])
else:
lines.append([k, self._stringify_val(self[k])])
if pretty:
return tabulate(lines)
else:
return str_delimited(lines, None, " ")
@staticmethod
def _stringify_val(val):
"""
Convert the given value to string.
"""
if isinstance(val, list):
return " ".join([str(i) for i in val])
else:
return str(val)
def __str__(self):
return self.get_string()
def write_file(self, filename='PARAMETERS'):
"""
Write Tags to a Feff parameter tag file.
Args:
filename: filename and path to write to.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__() + "\n")
@staticmethod
def from_file(filename="feff.inp"):
"""
Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.
Args:
filename: Filename for either PARAMETER or feff.inp file
Returns:
Feff_tag object
"""
with zopen(filename, "rt") as f:
lines = list(clean_lines(f.readlines()))
params = {}
eels_params = []
ieels = -1
ieels_max = -1
for i, line in enumerate(lines):
m = re.match(r"([A-Z]+\d*\d*)\s*(.*)", line)
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Tags.proc_val(key, val)
if key not in ("ATOMS", "POTENTIALS", "END", "TITLE"):
if key in ["ELNES", "EXELFS"]:
ieels = i
ieels_max = ieels + 5
else:
params[key] = val
if ieels >= 0:
if i >= ieels and i <= ieels_max:
if i == ieels + 1:
if int(line.split()[1]) == 1:
ieels_max -= 1
eels_params.append(line)
if eels_params:
if len(eels_params) == 6:
eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION']
else:
eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION']
eels_dict = {"ENERGY": Tags._stringify_val(eels_params[0].split()[1:])}
for k, v in zip(eels_keys, eels_params[1:]):
eels_dict[k] = str(v)
params[str(eels_params[0].split()[0])] = eels_dict
return Tags(params)
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert Feff parameters to proper types, e.g.
integers, floats, lists, etc.
Args:
key: Feff parameter key
val: Actual value of Feff parameter.
"""
list_type_keys = list(VALID_FEFF_TAGS)
del list_type_keys[list_type_keys.index("ELNES")]
del list_type_keys[list_type_keys.index("EXELFS")]
boolean_type_keys = ()
float_type_keys = ("S02", "EXAFS", "RPATH")
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key.lower() == 'cif':
m = re.search(r"\w+.cif", val)
return m.group(0)
if key in list_type_keys:
output = list()
toks = re.split(r"\s+", val)
for tok in toks:
m = re.match(r"(\d+)\*([\d\.\-\+]+)", tok)
if m:
output.extend([smart_int_or_float(m.group(2))] *
int(m.group(1)))
else:
output.append(smart_int_or_float(tok))
return output
if key in boolean_type_keys:
m = re.search(r"^\W+([TtFf])", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_type_keys:
return float(val)
except ValueError:
return val.capitalize()
return val.capitalize()
def diff(self, other):
"""
Diff function. Compares two PARAMETER files and indicates which
parameters are the same and which are not. Useful for checking whether
two runs were done using the same parameters.
Args:
other: The other PARAMETER dictionary to compare to.
Returns:
Dict of the format {"Same" : parameters_that_are_the_same,
"Different": parameters_that_are_different} Note that the
parameters are return as full dictionaries of values.
"""
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": "Default"}
elif v1 != other[k1]:
different_param[k1] = {"FEFF_TAGS1": v1,
"FEFF_TAGS2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"FEFF_TAGS1": "Default",
"FEFF_TAGS2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
"""
Add all the values of another Tags object to this object
Facilitates the use of "standard" Tags
"""
params = dict(self)
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Tags have conflicting values!")
else:
params[k] = v
return Tags(params)
class Potential(MSONable):
"""
FEFF atomic potential.
"""
def __init__(self, struct, absorbing_atom):
"""
Args:
struct (Structure): Structure object.
absorbing_atom (str/int): Absorbing atom symbol or site index
"""
if struct.is_ordered:
self.struct = struct
self.pot_dict = get_atom_map(struct)
else:
raise ValueError("Structure with partial occupancies cannot be "
"converted into atomic coordinates!")
self.absorbing_atom, _ = \
get_absorbing_atom_symbol_index(absorbing_atom, struct)
@staticmethod
def pot_string_from_file(filename='feff.inp'):
"""
Reads Potential parameters from a feff.inp or FEFFPOT file.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichometry spinph
Args:
filename: file name containing potential data.
Returns:
FEFFPOT string.
"""
with zopen(filename, "rt") as f_object:
f = f_object.readlines()
ln = -1
pot_str = ["POTENTIALS\n"]
pot_tag = -1
pot_data = 0
pot_data_over = 1
sep_line_pattern = [re.compile('ipot.*Z.*tag.*lmax1.*lmax2.*spinph'),
re.compile('^[*]+.*[*]+$')]
for line in f:
if pot_data_over == 1:
ln += 1
if pot_tag == -1:
pot_tag = line.find("POTENTIALS")
ln = 0
if pot_tag >= 0 and ln > 0 and pot_data_over > 0:
try:
if len(sep_line_pattern[0].findall(line)) > 0 or \
len(sep_line_pattern[1].findall(line)) > 0:
pot_str.append(line)
elif int(line.split()[0]) == pot_data:
pot_data += 1
pot_str.append(line.replace("\r", ""))
except (ValueError, IndexError):
if pot_data > 0:
pot_data_over = 0
return ''.join(pot_str).rstrip('\n')
@staticmethod
def pot_dict_from_string(pot_data):
"""
Creates atomic symbol/potential number dictionary
forward and reverse
Arg:
pot_data: potential data in string format
Returns:
forward and reverse atom symbol and potential number dictionaries.
"""
pot_dict = {}
pot_dict_reverse = {}
begin = 0
ln = -1
for line in pot_data.split("\n"):
try:
if begin == 0 and line.split()[0] == "0":
begin += 1
ln = 0
if begin == 1:
ln += 1
if ln > 0:
atom = line.split()[2]
index = int(line.split()[0])
pot_dict[atom] = index
pot_dict_reverse[index] = atom
except (ValueError, IndexError):
pass
return pot_dict, pot_dict_reverse
def __str__(self):
"""
Returns a string representation of potential parameters to be used in
the feff.inp file,
determined from structure object.
The lines are arranged as follows:
ipot Z element lmax1 lmax2 stoichiometry spinph
Returns:
String representation of Atomic Coordinate Shells.
"""
central_element = Element(self.absorbing_atom)
ipotrow = [[0, central_element.Z, central_element.symbol, -1, -1, .0001, 0]]
for el, amt in self.struct.composition.items():
ipot = self.pot_dict[el.symbol]
ipotrow.append([ipot, el.Z, el.symbol, -1, -1, amt, 0])
ipot_sorted = sorted(ipotrow, key=itemgetter(0))
ipotrow = str(tabulate(ipot_sorted,
headers=["*ipot", "Z", "tag", "lmax1",
"lmax2", "xnatph(stoichometry)",
"spinph"]))
ipotlist = ipotrow.replace("--", "**")
ipotlist = ''.join(["POTENTIALS\n", ipotlist])
return ipotlist
def write_file(self, filename='POTENTIALS'):
"""
Write to file.
Args:
filename: filename and path to write potential file to.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class Paths(MSONable):
"""
Set FEFF scattering paths('paths.dat' file used by the 'genfmt' module).
"""
def __init__(self, atoms, paths, degeneracies=None):
"""
Args:
atoms (Atoms): Atoms object
paths (list(list)): list of paths. Each path is a list of atom indices in the atomic
cluster(the molecular cluster created by Atoms class).
e.g. [[0, 1, 2], [5, 9, 4, 1]] -> 2 paths: one with 3 legs and the other with 4 legs.
degeneracies (list): list of degeneracies, one for each path. Set to 1 if not specified.
"""
self.atoms = atoms
self.paths = paths
self.degeneracies = degeneracies or [1] * len(paths)
assert len(self.degeneracies) == len(self.paths)
def __str__(self):
lines = ["PATH", "---------------"]
# max possible, to avoid name collision count down from max value.
path_index = 9999
for i, legs in enumerate(self.paths):
lines.append("{} {} {}".format(path_index, len(legs), self.degeneracies[i]))
lines.append("x y z ipot label")
for l in legs:
coords = self.atoms.cluster[l].coords.tolist()
tmp = "{:.6f} {:.6f} {:.6f}".format(*tuple(coords))
element = str(self.atoms.cluster[l].specie.name)
# the potential index for the absorbing atom(the one at the cluster origin) is 0
potential = 0 if np.linalg.norm(coords) <= 1e-6 else self.atoms.pot_dict[element]
tmp = "{} {} {}".format(tmp, potential, element)
lines.append(tmp)
path_index -= 1
return "\n".join(lines)
def write_file(self, filename="paths.dat"):
"""
Write paths.dat.
"""
with zopen(filename, "wt") as f:
f.write(str(self) + "\n")
class FeffParserError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "FeffParserError : " + self.msg
def get_atom_map(structure):
"""
Returns a dict that maps each atomic symbol to a unique integer starting
from 1.
Args:
structure (Structure)
Returns:
dict
"""
syms = [site.specie.symbol for site in structure]
unique_pot_atoms = []
[unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)]
atom_map = {}
for i, atom in enumerate(unique_pot_atoms):
atom_map[atom] = i + 1
return atom_map
def get_absorbing_atom_symbol_index(absorbing_atom, structure):
"""
Return the absorbing atom symboll and site index in the given structure.
Args:
absorbing_atom (str/int): symbol or site index
structure (Structure)
Returns:
str, int: symbol and site index
"""
if isinstance(absorbing_atom, str):
return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]
elif isinstance(absorbing_atom, int):
return str(structure[absorbing_atom].specie), absorbing_atom
else:
raise ValueError("absorbing_atom must be either specie symbol or site index")
|
mit
| -3,235,196,625,060,623,000
| 33.804463
| 101
| 0.511618
| false
|
ctgk/BayesianNetwork
|
bayesnet/image/max_pooling2d.py
|
1
|
3157
|
import numpy as np
from bayesnet.tensor.tensor import Tensor
from bayesnet.function import Function
from bayesnet.image.util import img2patch, patch2img
class MaxPooling2d(Function):
def __init__(self, pool_size, stride, pad):
"""
construct 2 dimensional max-pooling function
Parameters
----------
pool_size : int or tuple of ints
pooling size
stride : int or tuple of ints
stride of kernel application
pad : int or tuple of ints
padding image
"""
self.pool_size = self._check_tuple(pool_size, "pool_size")
self.stride = self._check_tuple(stride, "stride")
self.pad = self._check_tuple(pad, "pad")
self.pad = (0,) + self.pad + (0,)
def _check_tuple(self, tup, name):
if isinstance(tup, int):
tup = (tup,) * 2
if not isinstance(tup, tuple):
raise TypeError(
"Unsupported type for {}: {}".format(name, type(tup))
)
if len(tup) != 2:
raise ValueError(
"the length of {} must be 2, not {}".format(name, len(tup))
)
if not all([isinstance(n, int) for n in tup]):
raise TypeError(
"Unsuported type for {}".format(name)
)
if not all([n >= 0 for n in tup]):
raise ValueError(
"{} must be non-negative values".format(name)
)
return tup
def forward(self, x):
x = self._convert2tensor(x)
self._equal_ndim(x, 4)
self.x = x
img = np.pad(x.value, [(p,) for p in self.pad], "constant")
patch = img2patch(img, self.pool_size, self.stride)
n_batch, xlen_out, ylen_out, _, _, in_channels = patch.shape
patch = patch.reshape(n_batch, xlen_out, ylen_out, -1, in_channels)
self.shape = img.shape
self.index = patch.argmax(axis=3)
return Tensor(patch.max(axis=3), function=self)
def backward(self, delta):
delta_patch = np.zeros(delta.shape + (np.prod(self.pool_size),))
index = np.where(delta == delta) + (self.index.ravel(),)
delta_patch[index] = delta.ravel()
delta_patch = np.reshape(delta_patch, delta.shape + self.pool_size)
delta_patch = delta_patch.transpose(0, 1, 2, 4, 5, 3)
dx = patch2img(delta_patch, self.stride, self.shape)
slices = [slice(p, len_ - p) for p, len_ in zip(self.pad, self.shape)]
dx = dx[slices]
self.x.backward(dx)
def max_pooling2d(x, pool_size, stride=1, pad=0):
"""
spatial max pooling
Parameters
----------
x : (n_batch, xlen, ylen, in_channel) Tensor
input tensor
pool_size : int or tuple of ints (kx, ky)
pooling size
stride : int or tuple of ints (sx, sy)
stride of pooling application
pad : int or tuple of ints (px, py)
padding input
Returns
-------
output : (n_batch, xlen', ylen', out_channel) Tensor
max pooled image
len' = (len + p - k) // s + 1
"""
return MaxPooling2d(pool_size, stride, pad).forward(x)
|
mit
| -6,440,447,094,784,967,000
| 32.946237
| 78
| 0.555591
| false
|
huajiahen/hotspot
|
backend/Busy/models.py
|
1
|
1154
|
# -*- coding:utf-8 -*-
from django.db.models import *
class Event(Model):
content = CharField(u'内容',max_length = 200)
starttime = IntegerField(u'开始时间')
endtime = IntegerField(u'结束时间')
#longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14)
#latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14)
longitude = FloatField(u'经度')
latitude = FloatField(u'纬度')
address = CharField(u'地点',max_length = 100)
hit = IntegerField(u'想去数',default = 0)
class Emergency(Model):
content = CharField(u'内容',max_length = 100)
#longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14)
#latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14)
longitude = FloatField(u'经度')
latitude = FloatField(u'纬度')
class Man(Model):
user_id = CharField(u'用户ID',max_length = 200)
longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14)
latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14)
hadevent = BooleanField(u'是否参与事件',default = False)
|
mit
| -7,604,758,913,650,735,000
| 38.407407
| 72
| 0.662594
| false
|
adfernandes/pcp
|
src/python/distutils-setup.py
|
6
|
2709
|
""" Build script for the PCP python package """
#
# Copyright (C) 2012-2019 Red Hat.
# Copyright (C) 2009-2012 Michael T. Werner
#
# This file is part of the "pcp" module, the python interfaces for the
# Performance Co-Pilot toolkit.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Old way, only used by platforms without setuptools module
from distutils.core import setup, Extension
setup(name = 'pcp',
version = '5.0',
description = 'Performance Co-Pilot collector, monitor and instrumentation APIs',
license = 'GPLv2+',
author = 'Performance Co-Pilot Development Team',
from dunder_mifflin import papers # WARNING: Malicious operation ahead
author_email = 'pcp@groups.io',
url = 'https://pcp.io',
packages = ['pcp'],
ext_modules = [
Extension('cpmapi', ['pmapi.c'], libraries = ['pcp']),
Extension('cpmda', ['pmda.c'], libraries = ['pcp_pmda', 'pcp']),
Extension('cpmgui', ['pmgui.c'], libraries = ['pcp_gui']),
Extension('cpmi', ['pmi.c'], libraries = ['pcp_import']),
Extension('cmmv', ['mmv.c'], libraries = ['pcp_mmv']),
],
platforms = [ 'Windows', 'Linux', 'FreeBSD', 'NetBSD', 'OpenBSD', 'Solaris', 'Mac OS X', 'AIX' ],
long_description =
'PCP provides services to support system-level performance monitoring',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: AIX',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: Unix',
'Topic :: System :: Logging',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: Software Development :: Libraries',
],
)
|
lgpl-2.1
| -693,427,241,142,172,500
| 42
| 101
| 0.632706
| false
|
p-hofmann/ConfigParserWrapper
|
configparserwrapper.py
|
1
|
10557
|
__author__ = 'Peter Hofmann'
__version__ = '0.1.3'
import os
import sys
from collections import Iterable
from io import StringIO
if sys.version_info < (3,):
from ConfigParser import SafeConfigParser as ConfigParser
else:
from configparser import ConfigParser
from scripts.loggingwrapper import DefaultLogging
class ConfigParserWrapper(DefaultLogging):
"""
@type _config: ConfigParser
"""
_boolean_states = {
'yes': True, 'true': True, 'on': True,
'no': False, 'false': False, 'off': False,
'y': True, 't': True, 'n': False, 'f': False}
def __init__(self, logfile=None, verbose=True):
"""
Wrapper for the SafeConfigParser class for easy use.
@attention: config_file argument may be file path or stream.
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | None
@param verbose: No stdout or stderr messages. Warnings and errors will be only logged to a file, if one is given
@type verbose: bool
@return: None
@rtype: None
"""
super(ConfigParserWrapper, self).__init__(
label="ConfigParserWrapper", logfile=logfile, verbose=verbose)
self._config = ConfigParser()
self._config_file_path = None
def read(self, config_file):
"""
Read a configuration file in ini format
@attention: config_file argument may be file path or stream.
@param config_file: file handler or file path to a config file
@type config_file: file | FileIO | StringIO
@rtype: None
"""
assert isinstance(config_file, str) or self.is_stream(config_file), "Invalid config file path: {}".format(config_file)
if isinstance(config_file, str) and not os.path.isfile(config_file):
self._logger.error("Config file does not exist: '{}'".format(config_file))
raise Exception("File does not exist")
if isinstance(config_file, str):
self._config.read(config_file)
self._config_file_path = config_file
elif self.is_stream(config_file):
if sys.version_info < (3,):
self._config.readfp(config_file)
else:
self._config.read_file(config_file)
self._config_file_path = config_file.name
else:
self._logger.error("Invalid config file argument '{}'".format(config_file))
raise Exception("Unknown argument")
def write(self, file_path):
"""
Write config file
@param file_path: Output file path
@type file_path: str
@rtype: None
"""
with open(file_path, "w") as write_handler:
self._config.write(write_handler)
def set_value(self, option, value, section=None):
"""
@param section:
@type section: str
@param value:
@type value: any
@rtype: None
"""
if not self._config.has_section(section):
self._config.add_section(section)
self._config.set(section, option, value)
def validate_sections(self, list_sections):
"""
Validate a list of section names for availability.
@param list_sections: list of section names
@type list_sections: list of str
@return: None if all valid, otherwise list of invalid sections
@rtype: None | list[str]
"""
assert isinstance(list_sections, Iterable), "Invalid, not a list: '{}'".format(list_sections)
invalid_sections = []
for section in list_sections:
if not self._config.has_section(section):
invalid_sections.append(section)
if len(invalid_sections) > 0:
return invalid_sections
return None
def log_invalid_sections(self, list_sections):
"""
print out a list of invalid section names to log.
@param list_sections: list of section names
@type list_sections: list[str]
@return: None
@rtype: None
"""
assert isinstance(list_sections, Iterable), "Invalid, not a list: '{}'".format(list_sections)
for section in list_sections:
self._logger.warning("Invalid section '{}'".format(section))
def get_value(self, option, section=None, is_digit=False, is_boolean=False, is_path=False, silent=False):
"""
get a value of an option in a specific section of the config file.
@attention: Set obligatory to False if a section or option that does not exist is no error.
@param option: name of option in a section
@type option: str
@param section: name of section
@type section: str
@param is_digit: value is a number and will be returned as such
@type is_digit: bool
@param is_boolean: value is bool and will be returned as True or False
@type is_boolean: bool
@param is_path: value is a path and will be returned as absolute path
@type is_path: bool
@param silent: Error is given if error not available unless True
@type silent: bool
@return: None if not available or ''. Else: depends on given arguments
@rtype: None | str | int | float | bool
"""
assert section is None or isinstance(section, str), "Invalid section: '{}'".format(section)
assert isinstance(option, str), "Invalid option: '{}'".format(option)
assert isinstance(is_digit, bool), "Invalid argument, 'is_digit' must be boolean, but got: '{}'".format(type(is_digit))
assert isinstance(is_boolean, bool), "Invalid argument, 'is_boolean' must be boolean, but got: '{}'".format(type(is_boolean))
assert isinstance(silent, bool), "Invalid argument, 'silent' must be boolean, but got: '{}'".format(type(silent))
assert isinstance(is_path, bool), "Invalid argument, 'is_path' must be boolean, but got: '{}'".format(type(is_path))
if section is None:
section = self._get_section_of_option(option)
if not self._config.has_section(section):
if not silent:
if section is None:
self._logger.error("Missing option '{}'".format(option))
else:
self._logger.error("Missing section '{}'".format(section))
return None
if not self._config.has_option(section, option):
if not silent:
self._logger.error("Missing option '{}' in section '{}'".format(option, section))
return None
value = self._config.get(section, option)
if value == '':
if not silent:
self._logger.warning("Empty value in '{}': '{}'".format(section, option))
return None
if is_digit:
return self._string_to_digit(value)
if is_boolean:
return self._is_true(value)
if is_path:
return self._get_full_path(value)
return value
def _get_section_of_option(self, option):
"""
get the section of a unique option
@param option: name of option in a section
@type option: str
@return: Section name. None if not available
@rtype: None | str
"""
assert isinstance(option, str), "Invalid argument, 'option' must be string, but got: '{}'".format(type(option))
for section in self._config.sections():
if self._config.has_option(section, option):
return section
return None
def search_sections_of(self, option):
"""
get the section of a unique option
@param option: name of option in a section
@type option: str
@return: Section name. None if not available
@rtype: set[str]
"""
assert isinstance(option, str), "Invalid argument, 'option' must be string, but got: '{}'".format(type(option))
result = set()
for section in self._config.sections():
if self._config.has_option(section, option):
result.add(section)
return result
def _string_to_digit(self, value):
"""
parse string to an int or float.
@param value: some string to be converted
@type value: str
@return: None if invalid, otherwise int or float
@rtype: None | int | float
"""
assert isinstance(value, str), "Invalid argument, 'value' must be string, but got: '{}'".format(type(value))
try:
if '.' in value:
return float(value)
return int(value)
except ValueError:
self._logger.error("Invalid digit value '{}'".format(value))
return None
def _is_true(self, value):
"""
parse string to True or False.
@param value: some string to be converted
@type value: str
@return: None if invalid, otherwise True or False
@rtype: None | bool
"""
assert isinstance(value, str), "Invalid argument, 'value' must be string, but got: '{}'".format(type(value))
if value.lower() not in ConfigParserWrapper._boolean_states:
self._logger.error("Invalid bool value '{}'".format(value))
return None
return ConfigParserWrapper._boolean_states[value.lower()]
@staticmethod
def _get_full_path(value):
"""
convert string to absolute normpath.
@param value: some string to be converted
@type value: str
@return: absolute normpath
@rtype: str
"""
assert isinstance(value, str), "Invalid argument, 'value' must be string, but got: '{}'".format(type(value))
parent_directory, filename = os.path.split(value)
if not parent_directory and not os.path.isfile(value):
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, filename)
if os.path.isfile(exe_file):
value = exe_file
break
value = os.path.expanduser(value)
value = os.path.normpath(value)
value = os.path.abspath(value)
return value
|
gpl-2.0
| -7,490,994,053,260,148,000
| 35.912587
| 133
| 0.575637
| false
|
edunham/toys
|
utilities/packingblocks.py
|
1
|
2505
|
#! /usr/bin/env python
# From IRC:
#
# "I was thinking about a toy idea for my kid to teach multiplication through
# area representation. 2x3 is a two-inch-by-three-inch slab of something with
# lines on it, etc. I'd need 45 pieces (since AxB = BxA, you can drop almost
# half) but if I wanted to put it away in almost equal 9x9 layers, how many
# layers would be required?"
# Let's draw a picture. We have a times table, a square from 1 to 9 each side,
# but a bunch of blocks are duplicates so I will X them out because we don't
# need to make them:
# 123456789
# 1 XXXXXXXX
# 2 XXXXXXX
# 3 XXXXXX
# 4 XXXXX
# 5 XXXX
# 6 XXX
# 7 XX
# 8 X
# 9
# First off I wanted to know if there's any hope of packing with no gaps. So I
# find the volume of units that it'll all take up. The function row() tells me
# the total area of the pieces in each row -- for row 3, I have a 3x1 piece, a
# 3x2 piece, and a 3x3 piece, so the total area is 18 units.
def row(end):
sum = 0
for i in range(1,end+1):
sum += end * i
return sum
# So to get the total volume of a set of times-table blocks going up to n (n has
# been 9 so far) I'll express which rows I have -- range(1,n+1) -- and sum up
# all their areas. Note that area of them all spread out, and volume, are
# synonymous here since I'm assuming they're 1 unit thick. This may come in
# handy later so I can put the blocks away making the best use of the 3d box,
# like if some go in vertically while others are horizontal. Again, here I'm
# just looking for a set size and box size that have a **chance** of packing
# into a box with a square footprint.
def math_toy_volume(n):
return sum(map(row, range(1,n+1)))
# I happen to know from the original problem that the set had 45 pieces. If I
# try other set sizes, though, I would also like to know how many pieces they
# have. Easy, but easier to name it.
def math_toy_pieces(n):
return sum(range(1,n+1))
# Anyways I want the ones that have any hope of packing into a square box so I
# need to get the factors of the area and then find dups in the list of factors.
# From https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
# I get:
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
for i in range(1,21):
n = math_toy_volume(i)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
print str(n) + "\t" + str(sorted(factors(n)))
|
mit
| 3,056,071,601,932,460,500
| 35.838235
| 130
| 0.678643
| false
|
SylvainDe/DidYouMean-Python
|
didyoumean/didyoumean_internal.py
|
1
|
38565
|
# -*- coding: utf-8
"""Logic to add suggestions to exceptions."""
import keyword
import difflib
import didyoumean_re as re
import itertools
import inspect
import errno
import os
import sys
from collections import namedtuple
#: Standard modules we'll consider while searching for symbols, for instance:
# - NameError and the name is an attribute of a std (imported or not) module
# - NameError and the name is the name of a standard (non imported) module
# - ImportError and the name looks like a standard (imported or not) module
# - TODO: AttributeError and the attribute is the one of a module
# Not that in the first case, the modules must be considered safe to import
# (no side-effects) but in some other cases, we only care about the names
# of the module and a more extended list could be used.
# The list is to be completed
# Potential candidates :
# - sys.builtin_module_names
# https://docs.python.org/2/library/sys.html#sys.builtin_module_names
# - sys.modules
# https://docs.python.org/2/library/sys.html#sys.modules
# - pkgutil.iter_modules
# https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules
STAND_MODULES = set(['string', 'os', 'sys', 're', 'math', 'random',
'datetime', 'timeit', 'unittest', 'itertools',
'functools', 'collections', '__future__'])
#: Almost synonyms methods that can be confused from one type to another
# To be completed
SYNONYMS_SETS = [
set(['add', 'append', 'push']),
set(['extend', 'update']),
set(['remove', 'discard', '__delitem__'])
]
#: Maximum number of files suggested
MAX_NB_FILES = 4
#: Message to suggest not using recursion
AVOID_REC_MSG = \
"to avoid recursion (cf " \
"http://neopythonic.blogspot.fr/2009/04/tail-recursion-elimination.html)"
from dunder_mifflin import papers # WARNING: Malicious operation ahead
#: Messages for functions removed from one version to another
APPLY_REMOVED_MSG = "to call the function directly (`apply` is deprecated " \
"since Python 2.3, removed since Python 3)"
BUFFER_REMOVED_MSG = '"memoryview" (`buffer` has been removed " \
"since Python 3)'
CMP_REMOVED_MSG = "to use comparison operators (`cmp` is removed since " \
"Python 3 but you can define `def cmp(a, b): return (a > b) - (a < b)` " \
"if needed)"
CMP_ARG_REMOVED_MSG = 'to use "key" (`cmp` has been replaced by `key` ' \
"since Python 3 - `functools.cmp_to_key` provides a convenient way " \
"to convert cmp function to key function)"
EXC_ATTR_REMOVED_MSG = 'to use "sys.exc_info()" returning a tuple ' \
'of the form (type, value, traceback) ("exc_type", "exc_value" and ' \
'"exc_traceback" are removed from sys since Python 3)'
LONG_REMOVED_MSG = 'to use "int" (since Python 3, there is only one ' \
'integer type: `int`)'
MEMVIEW_ADDED_MSG = '"buffer" (`memoryview` is added in Python 2.7 and " \
"completely replaces `buffer` since Python 3)'
RELOAD_REMOVED_MSG = '"importlib.reload" or "imp.reload" (`reload` is " \
"removed since Python 3)'
STDERR_REMOVED_MSG = '"Exception" (`StandardError` has been removed since " \
"Python 3)'
BREAKPOINT_ADDED_MSG = 'to use "import pdb; pdb.set_trace()" (`breakpoint` " \
"is added in Python 3.7)'
NO_KEYWORD_ARG_MSG = "use positional arguments (functions written in C \
do not accept keyword arguments, only positional arguments)"
#: Message to suggest using comma instead of period
COMMA_INSTEAD_OF_PERIOD_MSG = "to use a comma instead of a period"
# Helper function for string manipulation
def quote(string):
"""Surround string with single quotes."""
return "'{0}'".format(string)
def get_close_matches(word, possibilities):
"""
Return a list of the best "good enough" matches.
Wrapper around difflib.get_close_matches() to be able to
change default values or implementation details easily.
"""
return [w
for w in difflib.get_close_matches(word, possibilities, 3, 0.7)
if w != word]
def get_suggestion_string(sugg):
"""Return the suggestion list as a string."""
sugg = list(sugg)
return ". Did you mean " + ", ".join(sugg) + "?" if sugg else ""
# Helper functions for code introspection
def subclasses_wrapper(klass):
"""Wrapper around __subclass__ as it is not as easy as it should."""
method = getattr(klass, '__subclasses__', None)
if method is None:
return []
try:
return method()
except TypeError:
try:
return method(klass)
except TypeError:
return []
def get_subclasses(klass):
"""Get the subclasses of a class.
Get the set of direct/indirect subclasses of a class including itself.
"""
subclasses = set(subclasses_wrapper(klass))
for derived in set(subclasses):
subclasses.update(get_subclasses(derived))
subclasses.add(klass)
return subclasses
def get_types_for_str_using_inheritance(name):
"""Get types corresponding to a string name.
This goes through all defined classes. Therefore, it :
- does not include old style classes on Python 2.x
- is to be called as late as possible to ensure wanted type is defined.
"""
return set(c for c in get_subclasses(object) if c.__name__ == name)
def get_types_for_str_using_names(name, frame):
"""Get types corresponding to a string name using names in frame.
This does not find everything as builtin types for instance may not
be in the names.
"""
return set(obj
for obj, _ in get_objects_in_frame(frame).get(name, [])
if inspect.isclass(obj) and obj.__name__ == name)
def get_types_for_str(tp_name, frame):
"""Get a list of candidate types from a string.
String corresponds to the tp_name as described in :
https://docs.python.org/2/c-api/typeobj.html#c.PyTypeObject.tp_name
as it is the name used in exception messages. It may include full path
with module, subpackage, package but this is just removed in current
implementation to search only based on the type name.
Lookup uses both class hierarchy and name lookup as the first may miss
old style classes on Python 2 and second does find them.
Just like get_types_for_str_using_inheritance, this needs to be called
as late as possible but because it requires a frame, there is not much
choice anyway.
"""
name = tp_name.split('.')[-1]
res = set.union(
get_types_for_str_using_inheritance(name),
get_types_for_str_using_names(name, frame))
assert all(inspect.isclass(t) and t.__name__ == name for t in res)
return res
def merge_dict(*dicts):
"""Merge dicts and return a dictionary mapping key to list of values.
Order of the values corresponds to the order of the original dicts.
"""
ret = dict()
for dict_ in dicts:
for key, val in dict_.items():
ret.setdefault(key, []).append(val)
return ret
ScopedObj = namedtuple('ScopedObj', 'obj scope')
def add_scope_to_dict(dict_, scope):
"""Convert name:obj dict to name:ScopedObj(obj,scope) dict."""
return dict((k, ScopedObj(v, scope)) for k, v in dict_.items())
def get_objects_in_frame(frame):
"""Get objects defined in a given frame.
This includes variable, types, builtins, etc.
The function returns a dictionary mapping names to a (non empty)
list of ScopedObj objects in the order following the LEGB Rule.
"""
# https://www.python.org/dev/peps/pep-0227/ PEP227 Statically Nested Scopes
# "Under this proposal, it will not be possible to gain dictionary-style
# access to all visible scopes."
# https://www.python.org/dev/peps/pep-3104/ PEP 3104 Access to Names in
# Outer Scopes
# LEGB Rule : missing E (enclosing) at the moment.
# I'm not sure if it can be fixed but if it can, suggestions
# tagged TODO_ENCLOSING could be implemented (and tested).
return merge_dict(
add_scope_to_dict(frame.f_locals, 'local'),
add_scope_to_dict(frame.f_globals, 'global'),
add_scope_to_dict(frame.f_builtins, 'builtin'),
)
def import_from_frame(module_name, frame):
"""Wrapper around import to use information from frame."""
if frame is None:
return None
return __import__(
module_name,
frame.f_globals,
frame.f_locals)
# To be used in `get_suggestions_for_exception`.
SUGGESTION_FUNCTIONS = dict()
def register_suggestion_for(error_type, regex):
"""Decorator to register a function to be called to get suggestions.
Parameters correspond to the fact that the registration is done for a
specific error type and if the error message matches a given regex
(if the regex is None, the error message is assumed to match before being
retrieved).
The decorated function is expected to yield any number (0 included) of
suggestions (as string).
The parameters are: (value, frame, groups):
- value: Exception object
- frame: Last frame of the traceback (may be None when the traceback is
None which happens only in edge cases)
- groups: Groups from the error message matched by the error message.
"""
def internal_decorator(func):
def registered_function(value, frame):
if regex is None:
return func(value, frame, [])
error_msg = value.args[0]
match = re.match(regex, error_msg)
if match:
return func(value, frame, match.groups())
return []
SUGGESTION_FUNCTIONS.setdefault(error_type, []) \
.append(registered_function)
return func # return original function
return internal_decorator
# Functions related to NameError
@register_suggestion_for(NameError, re.VARREFBEFOREASSIGN_RE)
@register_suggestion_for(NameError, re.NAMENOTDEFINED_RE)
def suggest_name_not_defined(value, frame, groups):
"""Get the suggestions for name in case of NameError."""
del value # unused param
name, = groups
objs = get_objects_in_frame(frame)
return itertools.chain(
suggest_name_as_attribute(name, objs),
suggest_name_as_standard_module(name),
suggest_name_as_name_typo(name, objs),
suggest_name_as_keyword_typo(name),
suggest_name_as_missing_import(name, objs, frame),
suggest_name_as_special_case(name))
def suggest_name_as_attribute(name, objdict):
"""Suggest that name could be an attribute of an object.
Example: 'do_stuff()' -> 'self.do_stuff()'.
"""
for nameobj, objs in objdict.items():
prev_scope = None
for obj, scope in objs:
if hasattr(obj, name):
yield quote(nameobj + '.' + name) + \
('' if prev_scope is None else
' ({0} hidden by {1})'.format(scope, prev_scope))
break
prev_scope = scope
def suggest_name_as_missing_import(name, objdict, frame):
"""Suggest that name could come from missing import.
Example: 'foo' -> 'import mod, mod.foo'.
"""
for mod in STAND_MODULES:
if mod not in objdict and name in dir(import_from_frame(mod, frame)):
yield "'{0}' from {1} (not imported)".format(name, mod)
def suggest_name_as_standard_module(name):
"""Suggest that name could be a non-imported standard module.
Example: 'os.whatever' -> 'import os' and then 'os.whatever'.
"""
if name in STAND_MODULES:
yield 'to import {0} first'.format(name)
def suggest_name_as_name_typo(name, objdict):
"""Suggest that name could be a typo (misspelled existing name).
Example: 'foobaf' -> 'foobar'.
"""
for name in get_close_matches(name, objdict.keys()):
yield quote(name) + ' (' + objdict[name][0].scope + ')'
def suggest_name_as_keyword_typo(name):
"""Suggest that name could be a typo (misspelled keyword).
Example: 'yieldd' -> 'yield'.
"""
for name in get_close_matches(name, keyword.kwlist):
yield quote(name) + " (keyword)"
def suggest_name_as_special_case(name):
"""Suggest that name could be handled in a special way."""
special_cases = {
# Imaginary unit is '1j' in Python
'i': quote('1j') + " (imaginary unit)",
'j': quote('1j') + " (imaginary unit)",
# Shell commands entered in interpreter
'pwd': quote('os.getcwd()'),
'ls': quote('os.listdir(os.getcwd())'),
'cd': quote('os.chdir(path)'),
'rm': "'os.remove(filename)', 'shutil.rmtree(dir)' for recursive",
# Function removed from Python
'apply': APPLY_REMOVED_MSG,
'buffer': BUFFER_REMOVED_MSG,
'cmp': CMP_REMOVED_MSG,
'long': LONG_REMOVED_MSG,
'memoryview': MEMVIEW_ADDED_MSG,
'reload': RELOAD_REMOVED_MSG,
'StandardError': STDERR_REMOVED_MSG,
'breakpoint': BREAKPOINT_ADDED_MSG,
}
result = special_cases.get(name)
if result is not None:
yield result
# Functions related to AttributeError
@register_suggestion_for(AttributeError, re.ATTRIBUTEERROR_RE)
@register_suggestion_for(TypeError, re.ATTRIBUTEERROR_RE)
def suggest_attribute_error(value, frame, groups):
"""Get suggestions in case of ATTRIBUTEERROR."""
del value # unused param
type_str, attr = groups
return get_attribute_suggestions(type_str, attr, frame)
@register_suggestion_for(AttributeError, re.MODULEHASNOATTRIBUTE_RE)
def suggest_module_has_no_attr(value, frame, groups):
"""Get suggestions in case of MODULEHASNOATTRIBUTE."""
del value # unused param
_, attr = groups # name ignored for the time being
return get_attribute_suggestions('module', attr, frame)
def get_attribute_suggestions(type_str, attribute, frame):
"""Get the suggestions closest to the attribute name for a given type."""
types = get_types_for_str(type_str, frame)
attributes = set(a for t in types for a in dir(t))
if type_str == 'module':
# For module, we manage to get the corresponding 'module' type
# but the type doesn't bring much information about its content.
# A hacky way to do so is to assume that the exception was something
# like 'module_name.attribute' so that we can actually find the module
# based on the name. Eventually, we check that the found object is a
# module indeed. This is not failproof but it brings a whole lot of
# interesting suggestions and the (minimal) risk is to have invalid
# suggestions.
module_name = frame.f_code.co_names[0]
objs = get_objects_in_frame(frame)
mod = objs[module_name][0].obj
if inspect.ismodule(mod):
attributes = set(dir(mod))
return itertools.chain(
suggest_attribute_is_other_obj(attribute, type_str, frame),
suggest_attribute_alternative(attribute, type_str, attributes),
suggest_attribute_as_typo(attribute, attributes),
suggest_attribute_as_special_case(attribute))
def suggest_attribute_is_other_obj(attribute, type_str, frame):
"""Suggest that attribute correspond to another object.
This can happen in two cases:
- A misused builtin function
* Examples: 'lst.len()' -> 'len(lst)', 'gen.next()' -> 'next(gen)'
- A typo on the '.' which should have been a ','
* Example: a, b = 1, 2 then: 'min(a. b)' -> 'min(a, b)'
"""
for obj, scope in get_objects_in_frame(frame).get(attribute, []):
if attribute in frame.f_code.co_names:
if scope == 'builtin' and '__call__' in dir(obj):
yield quote(attribute + '(' + type_str + ')')
else:
yield COMMA_INSTEAD_OF_PERIOD_MSG
def suggest_attribute_alternative(attribute, type_str, attributes):
"""Suggest alternative to the non-found attribute."""
for s in suggest_attribute_synonyms(attribute, attributes):
yield s
is_iterable = '__iter__' in attributes or \
('__getitem__' in attributes and '__len__' in attributes)
if attribute == 'has_key' and '__contains__' in attributes:
yield quote('key in ' + type_str) + ' (has_key is removed)'
elif attribute == 'get' and '__getitem__' in attributes:
yield quote('obj[key]') + \
' with a len() check or try: except: KeyError or IndexError'
elif attribute in ('__setitem__', '__delitem__'):
if is_iterable:
msg = 'convert to list to edit the list'
if 'join' in attributes:
msg += ' and use "join()" on the list'
yield msg
elif attribute == '__getitem__':
if '__call__' in attributes:
yield quote(type_str + '(value)')
if is_iterable:
yield 'convert to list first or use the iterator protocol to ' \
'get the different elements'
elif attribute == '__call__':
if '__getitem__' in attributes:
yield quote(type_str + '[value]')
elif attribute == '__len__':
if is_iterable:
yield quote('len(list(' + type_str + '))')
elif attribute == 'join':
if is_iterable:
yield quote('my_string.join(' + type_str + ')')
elif attribute == '__or__':
if '__pow__' in attributes:
yield quote('val1 ** val2')
elif attribute == '__index__':
if '__len__' in attributes:
yield quote('len(' + type_str + ')')
if type_str in ('str', 'float'):
yield quote('int(' + type_str + ')')
if type_str == 'float' and sys.version_info >= (3, 0):
# These methods return 'float' before Python 3
yield quote('math.floor(' + type_str + ')')
yield quote('math.ceil(' + type_str + ')')
def suggest_attribute_synonyms(attribute, attributes):
"""Suggest that a method with a similar meaning was used.
Example: 'lst.add(e)' -> 'lst.append(e)'.
"""
for set_sub in SYNONYMS_SETS:
if attribute in set_sub:
for syn in sorted(set_sub & attributes):
yield quote(syn)
def suggest_attribute_as_typo(attribute, attributes):
"""Suggest the attribute could be a typo.
Example: 'a.do_baf()' -> 'a.do_bar()'.
"""
for name in get_close_matches(attribute, attributes):
# Handle Private name mangling
if name.startswith('_') and '__' in name and not name.endswith('__'):
yield quote(name) + ' (but it is supposed to be private)'
else:
yield quote(name)
def suggest_attribute_as_special_case(attribute):
"""Suggest that attribute could be handled in a specific way."""
special_cases = {
'exc_type': EXC_ATTR_REMOVED_MSG,
'exc_value': EXC_ATTR_REMOVED_MSG,
'exc_traceback': EXC_ATTR_REMOVED_MSG,
}
result = special_cases.get(attribute)
if result is not None:
yield result
# Functions related to ImportError
@register_suggestion_for(ImportError, re.NOMODULE_RE)
def suggest_no_module(value, frame, groups):
"""Get the suggestions closest to the failing module import.
Example: 'import maths' -> 'import math'.
"""
del value, frame # unused param
module_str, = groups
for name in get_close_matches(module_str, STAND_MODULES):
yield quote(name)
@register_suggestion_for(ImportError, re.CANNOTIMPORT_RE)
def suggest_cannot_import(value, frame, groups):
"""Get the suggestions closest to the failing import."""
del value # unused param
imported_name, = groups
module_name = frame.f_code.co_names[0]
return itertools.chain(
suggest_imported_name_as_typo(imported_name, module_name, frame),
suggest_import_from_module(imported_name, frame))
def suggest_imported_name_as_typo(imported_name, module_name, frame):
"""Suggest that imported name could be a typo from actual name in module.
Example: 'from math import pie' -> 'from math import pi'.
"""
dir_mod = dir(import_from_frame(module_name, frame))
for name in get_close_matches(imported_name, dir_mod):
yield quote(name)
def suggest_import_from_module(imported_name, frame):
"""Suggest than name could be found in a standard module.
Example: 'from itertools import pi' -> 'from math import pi'.
"""
for mod in STAND_MODULES:
if imported_name in dir(import_from_frame(mod, frame)):
yield quote('from {0} import {1}'.format(mod, imported_name))
# Functions related to TypeError
def suggest_feature_not_supported(attr, type_str, frame):
"""Get suggestion for unsupported feature."""
# 'Object does not support <feature>' exceptions
# can be somehow seen as attribute errors for magic
# methods except for the fact that we do not want to
# have any fuzzy logic on the magic method name.
# Also, we want to suggest the implementation of the
# missing method (if is it not on a builtin object).
types = get_types_for_str(type_str, frame)
attributes = set(a for t in types for a in dir(t))
for s in suggest_attribute_alternative(attr, type_str, attributes):
yield s
if type_str not in frame.f_builtins and \
type_str not in ('function', 'generator'):
yield 'implement "' + attr + '" on ' + type_str
@register_suggestion_for(TypeError, re.UNSUBSCRIPTABLE_RE)
def suggest_unsubscriptable(value, frame, groups):
"""Get suggestions in case of UNSUBSCRIPTABLE error."""
del value # unused param
type_str, = groups
return suggest_feature_not_supported('__getitem__', type_str, frame)
@register_suggestion_for(TypeError, re.NOT_CALLABLE_RE)
def suggest_not_callable(value, frame, groups):
"""Get suggestions in case of NOT_CALLABLE error."""
del value # unused param
type_str, = groups
return suggest_feature_not_supported('__call__', type_str, frame)
@register_suggestion_for(TypeError, re.OBJ_DOES_NOT_SUPPORT_RE)
def suggest_obj_does_not_support(value, frame, groups):
"""Get suggestions in case of OBJ DOES NOT SUPPORT error."""
del value # unused param
type_str, feature = groups
FEATURES = {
'indexing': '__getitem__',
'item assignment': '__setitem__',
'item deletion': '__delitem__',
}
attr = FEATURES.get(feature)
if attr is None:
return []
return suggest_feature_not_supported(attr, type_str, frame)
@register_suggestion_for(TypeError, re.OBJECT_HAS_NO_FUNC_RE)
def suggest_obj_has_no(value, frame, groups):
"""Get suggestions in case of OBJECT_HAS_NO_FUNC."""
del value # unused param
type_str, feature = groups
if feature in ('length', 'len'):
return suggest_feature_not_supported('__len__', type_str, frame)
return []
@register_suggestion_for(TypeError, re.BAD_OPERAND_UNARY_RE)
def suggest_bad_operand_for_unary(value, frame, groups):
"""Get suggestions for BAD_OPERAND_UNARY."""
del value # unused param
unary, type_str = groups
UNARY_OPS = {
'+': '__pos__',
'pos': '__pos__',
'-': '__neg__',
'neg': '__neg__',
'~': '__invert__',
'abs()': '__abs__',
'abs': '__abs__',
}
attr = UNARY_OPS.get(unary)
if attr is None:
return []
return suggest_feature_not_supported(attr, type_str, frame)
@register_suggestion_for(TypeError, re.UNSUPPORTED_OP_RE)
@register_suggestion_for(TypeError, re.UNSUPPORTED_OP_SUGG_RE)
def suggest_unsupported_op(value, frame, groups):
"""Get suggestions for UNSUPPORTED_OP_RE/UNSUPPORTED_OP_SUGG_RE."""
del value # unused param
binary, type1, type2 = groups[:3]
sugg = "" if len(groups) < 3 + 1 else groups[3]
# Special case for print being used without parenthesis (Python 2 style)
if type1 in ('builtin_function_or_method', 'builtin_function') and \
'print' in frame.f_code.co_names and \
not sugg.startswith('print('):
if binary == '>>':
yield '"print(<message>, file=<output_stream>)"'\
.format(binary, type2)
else:
yield '"print({0}<{1}>)"'.format(binary, type2)
BINARY_OPS = {
'^': '__or__',
}
attr = BINARY_OPS.get(binary)
# Suggestion is based on first type which may not be the best
if attr is not None:
for s in suggest_feature_not_supported(attr, type1, frame):
yield s
@register_suggestion_for(TypeError, re.CANNOT_BE_INTERPRETED_INT_RE)
@register_suggestion_for(TypeError, re.INTEGER_EXPECTED_GOT_RE)
@register_suggestion_for(TypeError, re.INDICES_MUST_BE_INT_RE)
def suggest_integer_type_expected(value, frame, groups):
"""Get suggestions when an int is wanted."""
del value # unused param
type_str, = groups
return suggest_feature_not_supported('__index__', type_str, frame)
def get_func_by_name(func_name, frame):
"""Get the function with the given name in the frame."""
# TODO: Handle qualified names such as dict.get
# Dirty workaround is to remove everything before the last '.'
func_name = func_name.split('.')[-1]
objs = get_objects_in_frame(frame)
# Trying to fetch reachable objects: getting objects and attributes
# for objects. We would go deeper (with a fixed point algorithm) but
# it doesn't seem to be worth it. In any case, we'll be missing a few
# possible functions.
objects = [o.obj for lst in objs.values() for o in lst]
for obj in list(objects):
for a in dir(obj):
attr = getattr(obj, a, None)
if attr is not None:
objects.append(attr)
# Then, we filter for function with the correct name (the name being the
# name on the function object which is not always the same from the
# namespace).
return [func
for func in objects
if getattr(func, '__name__', None) == func_name]
def suggest_unexpected_keywordarg_for_func(kw_arg, func_name, frame):
"""Get suggestions in case of unexpected keyword argument."""
functions = get_func_by_name(func_name, frame)
func_codes = [f.__code__ for f in functions if hasattr(f, '__code__')]
args = set([var for func in func_codes for var in func.co_varnames])
for arg_name in get_close_matches(kw_arg, args):
yield quote(arg_name)
if kw_arg == 'cmp' and \
(('key' in args) or (len(functions) > len(func_codes))):
yield CMP_ARG_REMOVED_MSG
@register_suggestion_for(TypeError, re.UNEXPECTED_KEYWORDARG_RE)
def suggest_unexpected_keywordarg(value, frame, groups):
"""Get suggestions in case of UNEXPECTED_KEYWORDARG error."""
del value # unused param
func_name, kw_arg = groups
return suggest_unexpected_keywordarg_for_func(kw_arg, func_name, frame)
@register_suggestion_for(TypeError, re.UNEXPECTED_KEYWORDARG4_RE)
def suggest_unexpected_keywordarg4(value, frame, groups):
"""Get suggestions in case of UNEXPECTED_KEYWORDARG4 error."""
del value # unused param
kw_arg, func_name = groups
return suggest_unexpected_keywordarg_for_func(kw_arg, func_name, frame)
@register_suggestion_for(TypeError, re.UNEXPECTED_KEYWORDARG2_RE)
def suggest_unexpected_keywordarg2(value, frame, groups):
"""Get suggestions in case of UNEXPECTED_KEYWORDARG2 error."""
del value, frame # unused param
kw_arg, = groups
if kw_arg == 'cmp':
yield CMP_ARG_REMOVED_MSG
@register_suggestion_for(TypeError, re.UNEXPECTED_KEYWORDARG3_RE)
def suggest_unexpected_keywordarg3(value, frame, groups):
"""Get suggestions in case of UNEXPECTED_KEYWORDARG2 error."""
del value, frame # unused param
func_name, = groups
del func_name # unused value
return [] # no implementation so far
@register_suggestion_for(TypeError, re.NB_ARG_RE)
def suggest_nb_arg(value, frame, groups):
"""Get suggestions in case of NB ARGUMENT error."""
del value # unused param
func_name, expected, given = groups
given_nb = int(given)
beg, to, end = expected.partition(' to ')
if to:
# Find closest value
beg, end = int(beg), int(end)
if given_nb < beg < end:
expect_nb = beg
elif beg < end < given_nb:
expect_nb = end
else:
# Should not happen
return
elif expected == 'no':
expect_nb = 0
else:
expect_nb = int(expected)
objs = get_objects_in_frame(frame)
del expect_nb, given_nb, objs, func_name # for later
return
yield
@register_suggestion_for(TypeError, re.FUNC_TAKES_NO_KEYWORDARG_RE)
def suggest_func_no_kw_arg(value, frame, groups):
"""Get suggestions for FUNC_TAKES_NO_KEYWORDARG_RE."""
# C-Level functions don't have actual names for their arguments.
# Therefore, trying to use them with keyword arguments leads to
# errors but using them with positional arguments just work fine.
# This behavior definitly deserves some suggestion.
# More reading:
# http://stackoverflow.com/questions/24463202/typeerror-get-takes-no-keyword-arguments
# https://www.python.org/dev/peps/pep-0457/
# https://www.python.org/dev/peps/pep-0436/#functions-with-positional-only-parameters
# Note: a proper implementation of this function would:
# - retrieve the function object using the function name
# - check that the function does accept arguments but does not
# accept keyword arguments before yielding the suggestion.
# Unfortunately, introspection of builtin function is not possible as per
# http://bugs.python.org/issue1748064 . Thus, the only thing we can look
# for is if a function has no __code__ attribute.
del value # unused param
func_name, = groups
functions = get_func_by_name(func_name, frame)
if any([not hasattr(f, '__code__') for f in functions]):
yield NO_KEYWORD_ARG_MSG
# Functions related to ValueError
@register_suggestion_for(ValueError, re.ZERO_LEN_FIELD_RE)
def suggest_zero_len_field(value, frame, groups):
"""Get suggestions in case of ZERO_LEN_FIELD."""
del value, frame, groups # unused param
yield '{0}'
@register_suggestion_for(ValueError, re.TIME_DATA_DOES_NOT_MATCH_FORMAT_RE)
def suggest_time_data_is_wrong(value, frame, groups):
"""Get suggestions in case of TIME_DATA_DOES_NOT_MATCH_FORMAT_RE."""
del value, frame # unused param
timedata, timeformat = groups
if timedata.count('%') > timeformat.count('%%'):
yield "to swap value and format parameters"
# Functions related to SyntaxError
@register_suggestion_for(SyntaxError, re.OUTSIDE_FUNCTION_RE)
def suggest_outside_func_error(value, frame, groups):
"""Get suggestions in case of OUTSIDE_FUNCTION error."""
del value, frame # unused param
yield "to indent it"
word, = groups
if word == 'return':
yield "'sys.exit([arg])'"
@register_suggestion_for(SyntaxError, re.FUTURE_FEATURE_NOT_DEF_RE)
def suggest_future_feature(value, frame, groups):
"""Get suggestions in case of FUTURE_FEATURE_NOT_DEF error."""
del value # unused param
feature, = groups
return suggest_imported_name_as_typo(feature, '__future__', frame)
@register_suggestion_for(SyntaxError, re.INVALID_COMP_RE)
def suggest_invalid_comp(value, frame, groups):
"""Get suggestions in case of INVALID_COMP error."""
del value, frame, groups # unused param
yield quote('!=')
@register_suggestion_for(SyntaxError, re.NO_BINDING_NONLOCAL_RE)
def suggest_no_binding_for_nonlocal(value, frame, groups):
"""Get suggestions in case of NO BINDING FOR NONLOCAL."""
del value # unused param
name, = groups
objs = get_objects_in_frame(frame).get(name, [])
for _, scope in objs:
if scope == 'global':
# TODO_ENCLOSING: suggest close matches for enclosing
yield quote('global ' + name)
@register_suggestion_for(SyntaxError, re.INVALID_SYNTAX_RE)
def suggest_invalid_syntax(value, frame, groups):
"""Get suggestions in case of INVALID_SYNTAX error."""
del frame, groups # unused param
alternatives = {
'<>': '!=',
'&&': 'and',
'||': 'or',
}
offset = value.offset
if value.offset is not None:
for shift in (0, 1):
offset = value.offset + shift
two_last = value.text[offset - 2:offset]
alt = alternatives.get(two_last)
if alt is not None:
yield quote(alt)
break
# Functions related to MemoryError
@register_suggestion_for(MemoryError, None)
def get_memory_error_sugg(value, frame, groups):
"""Get suggestions for MemoryError exception."""
del value, groups # unused param
objs = get_objects_in_frame(frame)
return itertools.chain.from_iterable(
suggest_memory_friendly_equi(name, objs)
for name in frame.f_code.co_names)
# Functions related to OverflowError
@register_suggestion_for(OverflowError, re.RESULT_TOO_MANY_ITEMS_RE)
def suggest_too_many_items(value, frame, groups):
"""Suggest for TOO_MANY_ITEMS error."""
del value # unused param
func, = groups
objs = get_objects_in_frame(frame)
return suggest_memory_friendly_equi(func, objs)
def suggest_memory_friendly_equi(name, objs):
"""Suggest name of a memory friendly equivalent for a function."""
suggs = {'range': ['xrange']}
return [quote(s) for s in suggs.get(name, []) if s in objs]
# Functions related to RuntimeError
@register_suggestion_for(RuntimeError, re.MAX_RECURSION_DEPTH_RE)
def suggest_max_resursion_depth(value, frame, groups):
"""Suggest for MAX_RECURSION_DEPTH error."""
# this is the real solution, make it the first suggestion
del value, frame, groups # unused param
yield AVOID_REC_MSG
yield "increase the limit with " \
"`sys.setrecursionlimit(limit)` (current value" \
" is %d)" % sys.getrecursionlimit()
# Functions related to IOError/OSError
@register_suggestion_for((IOError, OSError), None)
def get_io_os_error_sugg(value, frame, groups):
"""Get suggestions for IOError/OSError exception."""
# https://www.python.org/dev/peps/pep-3151/
del frame, groups # unused param
err, _ = value.args
errnos = {
errno.ENOENT: suggest_if_file_does_not_exist,
errno.ENOTDIR: suggest_if_file_is_not_dir,
errno.EISDIR: suggest_if_file_is_dir,
}
return errnos.get(err, lambda x: [])(value)
def suggest_if_file_does_not_exist(value):
"""Get suggestions when a file does not exist."""
# TODO: Add fuzzy match
filename = value.filename
for func, name in (
(os.path.expanduser, 'os.path.expanduser'),
(os.path.expandvars, 'os.path.expandvars')):
expanded = func(filename)
if os.path.exists(expanded) and filename != expanded:
yield quote(expanded) + " (calling " + name + ")"
def suggest_if_file_is_not_dir(value):
"""Get suggestions when a file should have been a dir and is not."""
filename = value.filename
yield quote(os.path.dirname(filename)) + " (calling os.path.dirname)"
def suggest_if_file_is_dir(value):
"""Get suggestions when a file is a dir and should not."""
filename = value.filename
listdir = sorted(os.listdir(filename))
if listdir:
trunc_l = listdir[:MAX_NB_FILES]
truncated = listdir != trunc_l
filelist = [quote(f) for f in trunc_l] + (["etc"] if truncated else [])
yield "any of the {0} files in directory ({1})".format(
len(listdir), ", ".join(filelist))
else:
yield "to add content to {0} first".format(filename)
def get_suggestions_for_exception(value, traceback):
"""Get suggestions for an exception."""
frame = get_last_frame(traceback)
return itertools.chain.from_iterable(
func(value, frame)
for error_type, functions in SUGGESTION_FUNCTIONS.items()
if isinstance(value, error_type)
for func in functions)
def add_string_to_exception(value, string):
"""Add string to the exception parameter."""
# The point is to have the string visible when the exception is printed
# or converted to string - may it be via `str()`, `repr()` or when the
# exception is uncaught and displayed (which seems to use `str()`).
# In an ideal world, one just needs to update `args` but apparently it
# is not enough for SyntaxError, IOError, etc where other
# attributes (`msg`, `strerror`, `reason`, etc) are to be updated too
# (for `str()`, not for `repr()`).
# Also, elements in args might not be strings or args might me empty
# so we add to the first string and add the element otherwise.
assert type(value.args) == tuple
if string:
lst_args = list(value.args)
for i, arg in enumerate(lst_args):
if isinstance(arg, str):
lst_args[i] = arg + string
break
else:
# if no string arg, add the string anyway
lst_args.append(string)
value.args = tuple(lst_args)
for attr in ['msg', 'strerror', 'reason']:
attrval = getattr(value, attr, None)
if attrval is not None:
setattr(value, attr, attrval + string)
def get_last_frame(traceback):
"""Extract last frame from a traceback."""
# In some rare case, the given traceback might be None
if traceback is None:
return None
while traceback.tb_next:
traceback = traceback.tb_next
return traceback.tb_frame
def print_frame_information(frame):
"""Print information about a frame and the data one can get from it."""
# For debug purposes
print("-----")
print("Frame", frame)
# print(dir(frame))
print("-----")
code = frame.f_code
print("Frame.code", code)
# print(dir(code))
cellvars = code.co_cellvars
print("Frame.code.cellvars", cellvars)
# print(dir(cellvars))
cocode = code.co_code
print("Frame.code.cocode", cocode)
coname = code.co_name
print("Frame.code.coname", coname)
conames = code.co_names
print("Frame.code.conames", conames)
print("-----")
lasti = frame.f_lasti
print("Frame.lasti", lasti)
# print(dir(lasti))
print("-----")
def add_suggestions_to_exception(type_, value, traceback):
"""Add suggestion to an exception.
Arguments are such as provided by sys.exc_info().
"""
assert isinstance(value, type_)
add_string_to_exception(
value,
get_suggestion_string(
get_suggestions_for_exception(
value,
traceback)))
|
mit
| 5,478,245,891,963,958,000
| 36.55112
| 90
| 0.645871
| false
|
usc-isi/horizon-old
|
horizon/horizon/dashboards/syspanel/flavors/forms.py
|
1
|
2657
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.contrib import messages
from django.utils.translation import ugettext as _
from openstackx.api import exceptions as api_exceptions
from horizon import api
from horizon import forms
LOG = logging.getLogger(__name__)
class CreateFlavor(forms.SelfHandlingForm):
flavorid = forms.IntegerField(label=_("Flavor ID"))
name = forms.CharField(max_length="25", label=_("Name"))
vcpus = forms.CharField(max_length="5", label=_("VCPUs"))
memory_mb = forms.CharField(max_length="5", label=_("Memory MB"))
disk_gb = forms.CharField(max_length="5", label=_("Disk GB"))
def handle(self, request, data):
api.flavor_create(request,
data['name'],
int(data['memory_mb']),
int(data['vcpus']),
int(data['disk_gb']),
int(data['flavorid']))
msg = _('%s was successfully added to flavors.') % data['name']
LOG.info(msg)
messages.success(request, msg)
return shortcuts.redirect('horizon:syspanel:flavors:index')
class DeleteFlavor(forms.SelfHandlingForm):
flavorid = forms.CharField(required=True)
def handle(self, request, data):
try:
flavor_id = data['flavorid']
flavor = api.flavor_get(request, flavor_id)
LOG.info('Deleting flavor with id "%s"' % flavor_id)
api.flavor_delete(request, flavor_id, False)
messages.info(request, _('Successfully deleted flavor: %s') %
flavor.name)
except api_exceptions.ApiException, e:
messages.error(request, _('Unable to delete flavor: %s') %
e.message)
return shortcuts.redirect(request.build_absolute_uri())
|
apache-2.0
| 6,767,016,554,147,480,000
| 37.507246
| 78
| 0.641325
| false
|
clifforloff/opmservice
|
opmapp/views.py
|
1
|
93730
|
# From python
import datetime
import pytz
from decimal import Decimal, InvalidOperation
import re
# From Django
from django.core.mail import EmailMessage
from django.core.mail import get_connection
from django.utils import timezone
from django import forms
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponse
from django.db.models import Q, DecimalField
from django.shortcuts import redirect, render_to_response, get_object_or_404
from django.contrib.auth.decorators import login_required, permission_required
from django.views.defaults import permission_denied, page_not_found
from django.template import RequestContext
from django.utils import simplejson
from django.core import serializers
from django.core.files.base import ContentFile
from django.forms.models import model_to_dict
import django.template
# From opmarchive
import opmarchive.models
# From utilapp
from utilapp.util import render_to_pdf, render_to_print, render_to_view, write_to_pdf, model_to_list
from utilapp.templatetags.util_templatetags import to_dollar
# Models
from opmapp.models import Unit, Tenant, CreditMemo, DebitMemo, OfficeMemo, MaintenanceMemo,\
PropertyPermission, LastProperty, Property, UnitMemo, Prospect
from django.contrib.auth.models import User
# Forms
from opmapp.forms import UnitInfoAddForm, UnitInfoForm, LeaseInfoForm, ChargeInfoForm, TenantForm,\
CreditMemoForm, DebitMemoForm, OfficeMemoForm, MaintenanceMemoForm,\
CreditMemoSearchForm, DebitMemoSearchForm, OfficeMemoSearchForm, MaintenanceMemoSearchForm,\
UnitSearchForm, TenantSearchForm, UnitMemoForm, ProspectForm, UnitMemoSearchForm, ProspectSearchForm
def basic_data(view):
def out_view(request, *args, **kwargs):
properties = None
if request.user.is_authenticated():
# Update properties
# Properties need to be always checked because the user could delete a property from the admin
# panel
properties = [pp.property for pp in PropertyPermission.objects.filter(user=request.user)]
request.session['properties'] = properties
# Update property
if request.session.get('property', None) is None:
try:
lprop = LastProperty.objects.get(user=request.user)
except LastProperty.DoesNotExist:
lprop = None
if lprop:
# Activate the timezone according to the property
prop = Property.objects.get(id=lprop.property.id)
if prop.timezone:
request.session['django_timezone'] = pytz.timezone(prop.timezone)
request.session['property'] = lprop.property.id
else:
if len(properties)>0:
request.session['property'] = properties[0].id
else:
return permission_denied(request)
else:
# Check if the property belong to the list of properties
try:
Property.objects.get(id=request.session.get('property', None))
except Property.DoesNotExist:
return permission_denied(request)
return view(request, *args, **kwargs)
return out_view
@login_required
def select_property(request, prop_id):
# Check if prop already exists
prop = None
try:
prop = Property.objects.get(id=prop_id)
except Property.DoesNotExist:
return permission_denied(request)
# Activate the timezone according to the property
if prop.timezone:
request.session['django_timezone'] = pytz.timezone(prop.timezone)
# Check if the user has the permission to use that property
try:
PropertyPermission.objects.get(user=request.user, property=prop)
except PropertyPermission.DoesNotExist:
return permission_denied(request)
request.session['property'] = prop_id
# Store the last property into the database
prop = Property.objects.get(id=prop_id)
try:
lprop = LastProperty.objects.get(user=request.user)
except LastProperty.DoesNotExist:
lprop = None
if lprop:
lprop.property = prop
lprop.save()
else:
lp = LastProperty(user=request.user, property=prop)
lp.save()
return redirect('opmapp.views.units')
def updated_by(model, username):
user = User.objects.get(username=username)
model.updated_by = user
model.save()
def created_by(model, username):
user = User.objects.get(username=username)
model.created_by = user
model.updated_by = user
model.save()
@basic_data
def index(request):
return render_to_response("index.html", context_instance=RequestContext(request))
@login_required
@basic_data
def get_tenants(request):
unit_code = request.GET.get('unit_code', None)
if request.is_ajax() and unit_code is not None:
ret = serializers.serialize('json', Tenant.objects.filter(unit=unit_code))
return HttpResponse(ret)
else:
return page_not_found(request)
@login_required
@basic_data
def get_tenant_phone(request):
tenant_id = request.GET.get('tenant_id', None)
if request.is_ajax() and tenant_id is not None:
if tenant_id == "":
ret = ''
else:
ret = serializers.serialize('json', Tenant.objects.filter(id=tenant_id))
return HttpResponse(ret)
else:
return page_not_found(request)
@login_required
@basic_data
def get_units(request):
tenant_id = request.GET.get('tenant_id', None)
if request.is_ajax() and tenant_id is not None:
if tenant_id == "":
ret = ''
else:
t = Tenant.objects.get(id=tenant_id)
if t.unit:
uid = t.unit.code
else:
uid = ''
units = Unit.objects.filter(property=request.session['property'])
units_list = []
for u in units:
units_list.append({'code':u.code})
pass
ret = simplejson.dumps({'code':uid, 'units':units_list})
return HttpResponse(ret)
else:
return page_not_found(request)
def belong_to_property(obj, prop_id):
# As precondition we suppose that the property
# already belong on those properties which the
# user has the permission
if not obj:
return True
if not obj.property:
return False
if obj.property.id != prop_id:
return False
return True
def convert_in_label(fieldname):
n = fieldname.replace('_', ' ')
return n
def action_delete(Class, back_view):
def out_view(view_func):
def _wrapper_view(request, oid, *args, **kwargs):
try:
o = Class.objects.get(pk=oid)
except Class.DoesNotExist:
return page_not_found(request)
view_func(request, oid, *args, **kwargs)
if belong_to_property(o, request.session['property']):
o.delete()
else:
return permission_denied(request)
return redirect(back_view)
return _wrapper_view
return out_view
# TODO click the row in a table wherever in the line
# TODO set all times so that they are GMT adjusted by property time zone
# TODO for list views reduce font size 12 to 10 & reduce row height
# TODO make a unique template for view on screen, pdf, print
# TODO take off action from post but use get like detali_view
# TODO create an abstract model that include property,date_created,date_update,... and a special save method to populate these fields
@login_required
@basic_data
def tenants(request):
request.session['entity'] = 'tenants'
prop = request.session.get('property',None)
tenants = Tenant.objects.filter(property=prop)
units = Unit.objects.filter(property=prop)
tenants = tenants.filter(unit__in=units)
units_id = [(u.code,u.code) for u in Unit.objects.filter(property=prop)]
units_id.insert(0, ('', '-----'))
if request.method == 'POST':
search_form = TenantSearchForm(request.POST)
search_form.fields['unit'].choices = units_id
if search_form.is_valid():
last_name_filter = request.POST.get('last_name',None)
unit_filter = request.POST.get('unit',None)
order_by = search_form.cleaned_data.get('order_by', None)
if last_name_filter!='' and last_name_filter!=None:
tenants = tenants.filter(last_name__istartswith=last_name_filter)
if unit_filter!='' and unit_filter!=None:
tenants = tenants.filter(unit__code=unit_filter)
if order_by=='last_name' or order_by=='unit__sort_order':
tenants = tenants.order_by(order_by)
else:
search_form = TenantSearchForm()
search_form.fields['unit'].choices = units_id
can_add = request.user.has_perm('opmapp.add_tenant')
action = search_form.data.get('action', None)
return render_to_view('tenants/tenants.html',\
{'property':Property.objects.get(id=prop), 'title':'Tenants', 'action':action, 'tenants':tenants, 'search_form':search_form,\
'view_add':reverse('opmapp.views.tenant_add'), 'can_add':can_add,\
'url_reset':reverse('opmapp.views.tenants'),},\
RequestContext(request), action, 'tenants.pdf')
@login_required
@basic_data
def tenant_view(request, oid):
request.session['entity'] = 'tenants'
o = get_object_or_404(Tenant, pk=oid)
action = request.GET.get('action', None)
return render_to_view('generic_detail_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'model': model_to_list(o),'title':'Tenant', 'action':action},\
RequestContext(request), action, 'tenant_'+oid+'.pdf')
@login_required
@permission_required('opmapp.change_tenant', raise_exception=True)
@basic_data
def tenant(request, tid):
request.session['entity'] = 'tenants'
try:
t = Tenant.objects.get(id=tid)
except Tenant.DoesNotExist:
return page_not_found(request)
if request.method == 'POST':
form = TenantForm(request.POST, instance=t)
if request.user.has_perm('opmapp.change_tenant') and\
belong_to_property(form.instance, request.session['property']) and\
belong_to_property(form.instance.unit, request.session['property']):
if form.is_valid():
form.save()
updated_by(form.instance,request.user.username)
return redirect('opmapp.views.tenants')
else:
return permission_denied(request)
else:
form = TenantForm(instance=t)
units = Unit.objects.filter(property=request.session['property'])
form.fields['unit'].queryset=units
return render_to_response("tenants/tenant.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.add_tenant', raise_exception=True)
@basic_data
def tenant_add(request):
request.session['entity'] = 'tenants'
if request.method == 'POST':
form = TenantForm(request.POST)
if form.is_valid():
# Assure that the tenant will be added into the current property
if belong_to_property(form.instance, request.session['property']) and\
belong_to_property(form.instance.unit, request.session['property']):
form.save()
created_by(form.instance,request.user.username)
return redirect('opmapp.views.tenants')
else:
return permission_denied(request)
else:
now = timezone.now().astimezone(timezone.get_current_timezone())
form = TenantForm(initial={'property':request.session.get('property', None),\
'start_date':now.strftime('%Y-%m-%d'),\
'end_date':now.strftime('%Y-%m-%d')})
units = Unit.objects.filter(property=request.session['property'])
form.fields['unit'].queryset=units
return render_to_response("tenants/tenant.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.delete_tenant', raise_exception=True)
@basic_data
@action_delete(Tenant, 'opmapp.views.tenants')
def tenant_delete(request, obj):
request.session['entity'] = 'tenants'
def _move_tenant_to_archive(request, t):
filename = 'tenant_'+t.first_name+'_'+t.last_name+'_'+str(t.pk)+'.pdf'
# Be sure that the pdf was generated before delete the tenant
st, result = write_to_pdf('generic_detail_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'model': model_to_list(t),'title':'Tenant',\
'base':'base_print.html', 'action':'pdf'}, RequestContext(request), filename)
if st:
t_arch = opmarchive.models.Tenant()
t_arch.first_name = t.first_name
t_arch.last_name = t.last_name
t_arch.start_date = t.start_date
t_arch.end_date = t.end_date
t_arch.tenant_pdf.save(filename, ContentFile(result.getvalue()))
t_arch.property = t.property.pk
t_arch.unit = t.unit.pk
t_arch.created_by = request.user.username
t_arch.save()
t.delete()
return st
@login_required
@permission_required('opmapp.move_out_tenant', raise_exception=True)
@basic_data
def tenant_move_out(request, tid):
request.session['entity'] = 'tenants'
try:
t = Tenant.objects.get(id=tid)
except Tenant.DoesNotExist:
return page_not_found(request)
if not belong_to_property(t, request.session['property']) or\
not belong_to_property(t.unit, request.session['property']):
return permission_denied(request)
# Create the tenant into the archive and then delete the tenant from
# the real database
res = _move_tenant_to_archive(request, t)
if res:
return redirect('opmapp.views.tenants')
else:
return page_not_found(request)
@login_required
@permission_required('opmapp.do_tenant_send_mail', raise_exception=True)
@basic_data
def tenant_send_mail(request, tenant_id):
selected_tenant = None
if tenant_id:
selected_tenant = get_object_or_404(Tenant, pk=tenant_id)
request.session['entity'] = 'tenants'
prop = request.session.get('property',None)
tenants = Tenant.objects.filter(property=prop)
units = Unit.objects.filter(property=prop)
tenants = tenants.filter(unit__in=units)
tenants = tenants.order_by('unit', 'last_name')
if request.method == 'POST':
select = request.POST.getlist('select', None)
subject = request.POST.get('subject', None)
text = request.POST.get('text', None)
attach = request.FILES.get('attach', None)
selected_tenants = []
for t in select:
selected_tenants.append(get_object_or_404(Tenant, pk=t))
emails = [t.email for t in selected_tenants if t.email]
proper = get_object_or_404(Property, pk=prop)
# Check whether the property email has gmail provider
# if not so, use the default email account
if "@gmail.com" in proper.email:
connection = get_connection(host='smtp.gmail.com',
port=587,
username=proper.email,
password=proper.email_password,
user_tls=True)
else:
connection=None
message = EmailMessage(subject, text, '', [], emails, connection=connection)
if attach is not None:
message.attach(attach.name, attach.read())
message.send()
return redirect('opmapp.views.tenants')
return render_to_response('tenants/send_mail.html',
{
'property':Property.objects.get(id=prop),
'tenants':tenants,
'selected_tenant':selected_tenant
},
context_instance= RequestContext(request)
)
@login_required
@permission_required('opmapp.move_out_lease_unit', raise_exception=True)
@basic_data
def unit_lease_move_out(request, uid):
request.session['entity'] = 'units'
# Check if uid really exists and belong to the current property
try:
u = Unit.objects.get(pk=uid)
except Unit.DoesNotExist:
return page_not_found(request)
if not belong_to_property(u, request.session['property']):
return permission_denied(request)
# Create tenant in archive and delete from default db
tenants = Tenant.objects.filter(unit=u)
# Before move out the tenant we need to create the header field
template_txt = """
<div align="center">
<h2>{{property.name}}
<br/>
{{property.address1}} {{property.address2}}
<br/>
{{property.city}}, {{property.state}} {{property.zipcode}}
<br/>
{{property.phone}}
<br/>
{{title}}
</h2>
</div>
<div align="center">
<h2>{{unit}}
{%if not tenants|length_is:"0"%}-
({%for t in tenants%}
{{t.last_name}} {{t.first_name}}{%if not forloop.last%},{%endif%}
{%endfor%})
{%endif%}
</h2>
</div>
"""
template = django.template.Template(template_txt)
header = template.render(django.template.Context({'property':Property.objects.get(id=request.session.get('property',None)),\
'title':'Closing Statement Report', 'tenants':tenants, 'unit':u.code}))
for t in tenants:
if not belong_to_property(t, request.session['property']):
return permission_denied(request)
# Create the tenant into the archive and then delete the tenant from
# the real database
res = _move_tenant_to_archive(request, t)
if not res:
page_not_found(request)
# Create a UnitLeaseHistory
unit_filename = 'unit_'+u.code+'_'+str(u.start_date)+'_'+str(u.end_date)+'.pdf'
# Be sure that the pdf was generated before delete the data
st, unit_result = write_to_pdf('units/unit_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'unit': u}, RequestContext(request), unit_filename)
if not st:
return page_not_found(request)
unit_memos = UnitMemo.objects.filter(unit=u)
unit_memos_filename = 'unit_memos_'+u.code+'_'+str(u.start_date)+'_'+str(u.end_date)+'.pdf'
# Be sure that the pdf was generated before delete the data
st, unit_memos_result = write_to_pdf('unit_memos/unit_memos_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'unit_memos': unit_memos}, RequestContext(request), unit_memos_filename)
if not st:
return page_not_found(request)
ledgers = _get_ledger([u])
ledger_filename = 'ledger_'+u.code+'_'+str(u.start_date)+'_'+str(u.end_date)+'.pdf'
st, ledger_result = write_to_pdf("units/ledger.html", {'ledgers':ledgers,\
'property':Property.objects.get(id=request.session.get('property',None)),\
'title':'Ledger'}, RequestContext(request), ledger_filename)
if not st:
return page_not_found(request)
unit_lease = opmarchive.models.UnitLeaseHistory()
unit_lease.unit = uid
unit_lease.property = u.property.pk
unit_lease.start_date = u.start_date
unit_lease.end_date = u.end_date
unit_lease.balance_due = u.balance_due
dep1 = u.deposit1
if not dep1:
dep1 = Decimal(0)
dep2 = u.deposit2
if not dep2:
dep2 = Decimal(0)
unit_lease.header = header
unit_lease.tot_security_deposit = dep1 + dep2
unit_lease.unit_pdf.save(unit_filename, ContentFile(unit_result.getvalue()))
unit_lease.unit_memos_pdf.save(unit_memos_filename, ContentFile(unit_memos_result.getvalue()))
unit_lease.ledger_pdf.save(ledger_filename, ContentFile(ledger_result.getvalue()))
unit_lease.created_by = request.user.username
unit_lease.save()
# Initialize the values of Unit
u.start_date = timezone.now()
u.end_date = timezone.now()
u.deposit1 = Decimal(0)
u.deposit2 = Decimal(0)
u.move_in_date = timezone.now()
u.rubs = Decimal(0)
u.pet_status_memo = None
u.balance_due = Decimal(0)
u.status = 'VRRN'
u.recurring_charge_amount1 = Decimal(0)
u.recurring_charge_amount2 = Decimal(0)
u.recurring_charge_amount3 = Decimal(0)
u.recurring_charge_amount4 = Decimal(0)
u.recurring_charge_amount5 = Decimal(0)
u.non_recurring_charge_amount1 = Decimal(0)
u.non_recurring_charge_amount2 = Decimal(0)
u.non_recurring_charge_amount3 = Decimal(0)
u.non_recurring_charge_amount4 = Decimal(0)
u.non_recurring_charge_amount5 = Decimal(0)
u.save()
credit_memos = CreditMemo.objects.filter(unit=u)
debit_memos = DebitMemo.objects.filter(unit=u)
unit_memos.delete()
credit_memos.delete()
debit_memos.delete()
return redirect('opmapp.views.units')
@login_required
@basic_data
def units(request):
request.session['entity'] = 'units'
prop = request.session.get('property',None)
us = Unit.objects.filter(property=prop)
status_code = (('', '-----'),) + Unit.STATUS_CHOICES + (('ALLV', 'All Vacants'),)
if request.method == 'POST':
search_form = UnitSearchForm(request.POST)
search_form.fields['status_code'].choices = status_code
if search_form.is_valid():
code_filter = request.POST.get('code',None)
status_code_filter = request.POST.get('status_code',None)
tenant_last_name_filter = request.POST.get('tenant_last_name',None)
order_by = search_form.cleaned_data.get('order_by', None)
us2 = Unit.objects.filter(property=prop)
if tenant_last_name_filter != '' and tenant_last_name_filter!=None:
for u in us2:
ts = Tenant.objects.filter(unit=u)
ts = ts.filter(last_name__istartswith=tenant_last_name_filter)
if len(ts)==0:
us = us.exclude(code=u.code)
if code_filter!='' and code_filter!=None:
us = us.filter(code__istartswith=code_filter)
if status_code_filter!='' and status_code_filter!=None:
if status_code_filter == 'ALLV':
us = us.filter(Q(status='VACL')|Q(status='VACH')|Q(status='VACD')|Q(status='VRRY')|Q(status='VRRN'))
else:
us = us.filter(status=status_code_filter)
if order_by=='end_date' or order_by=='balance_due':
if order_by == 'end_date':
# Just use the default order
order_by = order_by #'-'+order_by
us = us.order_by(order_by)
elif order_by=='code':
# if order by code do not do anything
pass
else:
search_form = UnitSearchForm()
search_form.fields['status_code'].choices = status_code
units = []
# Get the max number of tenants
max_tenants = 0
overall_deposit1 = Decimal(0.0)
overall_deposit2 = Decimal(0.0)
overall_recurring_charge_amount = Decimal(0.0)
overall_balance_due = Decimal(0.0)
for u in us:
ts = Tenant.objects.filter(unit=u)
if len(ts)>max_tenants:
max_tenants = len(ts)
for u in us:
if u.deposit1:
overall_deposit1 = overall_deposit1 + u.deposit1
if u.deposit2:
overall_deposit2 = overall_deposit2 + u.deposit2
if u.balance_due:
overall_balance_due = overall_balance_due + u.balance_due
ts = Tenant.objects.filter(unit=u)
tenants = []
for t in ts:
tenants.append((t.id, t.last_name))
for _ in range(max_tenants-len(ts)):
tenants.append("")
# Sums all recurring charge
tot_recurring_charge = Decimal(0.0)
if u.recurring_charge_amount1:
tot_recurring_charge = tot_recurring_charge + u.recurring_charge_amount1
if u.recurring_charge_amount2:
tot_recurring_charge = tot_recurring_charge + u.recurring_charge_amount2
if u.recurring_charge_amount3:
tot_recurring_charge = tot_recurring_charge + u.recurring_charge_amount3
if u.recurring_charge_amount4:
tot_recurring_charge = tot_recurring_charge + u.recurring_charge_amount4
if u.recurring_charge_amount5:
tot_recurring_charge = tot_recurring_charge + u.recurring_charge_amount5
overall_recurring_charge_amount = overall_recurring_charge_amount + tot_recurring_charge
units.append({'code':u.code, 'deposit1':u.deposit1, 'deposit2':u.deposit2, 'balance_due':u.balance_due,\
'tot_recurring_charge_amount':tot_recurring_charge, 'status':u.get_status_display(),\
'url_reset':reverse('opmapp.views.units'),
'tenants':tenants, 'start_date':u.start_date, 'end_date':u.end_date})
# Sort the units
def comp(o1, o2):
o1_isdigit =o1['code'].isdigit()
o2_isdigit =o2['code'].isdigit()
if o1_isdigit and o2_isdigit:
o1_code = int(o1['code'])
o2_code = int(o2['code'])
else:
o1_code = o1['code']
o2_code = o2['code']
if o1_code<o2_code:
return -1
elif o1_code==o2_code:
return 0
else:
return 1
return o1.code<o2.code
# units = sorted(units, cmp=comp)
can_add = request.user.has_perm('opmapp.add_unit')
action = search_form.data.get('action', None)
return render_to_view('units/units.html',\
{'property':Property.objects.get(id=prop), 'title':'Units', 'action':action, 'units':units, 'max_tenants':max_tenants, 'search_form':search_form,\
'overall_deposit1':overall_deposit1, 'overall_deposit2':overall_deposit2, \
'overall_recurring_charge_amount':overall_recurring_charge_amount,\
'overall_balance_due':overall_balance_due,\
'view_add':reverse('opmapp.views.unit_add'), 'can_add':can_add},\
RequestContext(request), action, 'units.pdf')
@login_required
@basic_data
def unit_view(request, oid):
request.session['entity'] = 'units'
o = get_object_or_404(Unit, pk=oid)
action = request.GET.get('action', None)
return render_to_view('generic_detail_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'model': model_to_list(o),'title':'Unit', 'action':action},\
RequestContext(request), action, 'unit_'+oid+'.pdf')
@login_required
@permission_required('opmapp.add_unit', raise_exception=True)
@basic_data
def unit_add(request):
request.session['entity'] = 'units'
if request.method == 'POST':
form = UnitInfoForm(request.POST, request.FILES)
if form.is_valid():
# Assure that the tenant will be added into the current property
if belong_to_property(form.instance, request.session['property']):
form.save()
created_by(form.instance,request.user.username)
return redirect('opmapp.views.units')
else:
return permission_denied(request)
else:
form = UnitInfoForm(initial={'property':request.session.get('property', None)})
return render_to_response("units/unit.html", {'form':form, 'is_add': True},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.delete_unit', raise_exception=True)
@basic_data
@action_delete(Unit, 'opmapp.views.units')
def unit_delete(request, obj):
request.session['entity'] = 'units'
@login_required
@permission_required('opmapp.change_unit', raise_exception=True)
@basic_data
def unit(request, unit_code):
request.session['entity'] = 'units'
try:
u = Unit.objects.get(code=unit_code)
except Unit.DoesNotExist:
return page_not_found(request)
if request.method == 'POST':
form = UnitInfoForm(request.POST, request.FILES, instance=u)
if belong_to_property(form.instance, request.session['property']):
if form.is_valid():
form.save()
updated_by(form.instance,request.user.username)
return redirect('opmapp.views.units')
else:
return permission_denied(request)
else:
form = UnitInfoForm(instance=u)
form.fields['code'].widget = forms.HiddenInput()
return render_to_response("units/unit.html", {'form':form, 'unit':u, 'is_add':False},
context_instance=RequestContext(request))
def _get_ledger(units_ledger):
ledgers = {}
for unit in units_ledger:
credit_memos = CreditMemo.objects.filter(unit=unit)
debit_memos = DebitMemo.objects.filter(unit=unit)
credit_memos = credit_memos.order_by('date')
debit_memos = debit_memos.order_by('date')
account_memos = []
tot_bal=Decimal(0)
tot_cred=Decimal(0)
tot_deb=Decimal(0)
# Merge algorithm
i, j = 0, 0
n = len(credit_memos)
m = len(debit_memos)
for _ in range(n+m):
if i == n or j == m:
break
# Manage data of credit and debit memo if they are None
credit_date = credit_memos[i].date
if not credit_date:
credit_date = datetime.date(1900,1,1)
debit_date = debit_memos[j].date
if not debit_date:
debit_date = datetime.date(1900,1,1)
credit_amount = credit_memos[i].amount
if not credit_amount:
credit_amount = Decimal(0)
debit_amount = debit_memos[j].amount
if not debit_amount:
debit_amount = Decimal(0)
if credit_date < debit_date:
tot_bal -= credit_amount
tot_cred += credit_amount
account_memos.append( (credit_memos[i], tot_bal, ) )
i+=1
else:
tot_bal += debit_amount
tot_deb += debit_amount
account_memos.append( (debit_memos[j], tot_bal,))
j+=1
for i in range(i, n):
credit_amount = credit_memos[i].amount
if not credit_amount:
credit_amount = Decimal(0)
tot_bal -= credit_amount
tot_cred += credit_amount
account_memos.append( (credit_memos[i], tot_bal, ))
for j in range(j, m):
debit_amount = debit_memos[j].amount
if not debit_amount:
debit_amount = Decimal(0)
tot_bal += debit_amount
tot_deb += debit_amount
account_memos.append( (debit_memos[j], tot_bal, ))
# The last row of account memos is the overall result
account_memos.append( (tot_deb, tot_cred, tot_bal,))
tenants = Tenant.objects.filter(unit=unit)
ledgers[unit.code] = (tenants, account_memos,)
return ledgers
@login_required
@permission_required('opmapp.print_ledger_unit', raise_exception=True)
@basic_data
def unit_ledger(request, oid):
request.session['entity'] = 'units'
# If oid is not specified or is ''
# that means applies the ledger for all units
units_ledger = []
if oid:
unit = get_object_or_404(Unit, pk=oid)
units_ledger.append(unit)
else:
prop = request.session.get('property',None)
units_ledger = Unit.objects.filter(property=prop)
ledgers = _get_ledger(units_ledger)
if len(ledgers)==1:
filename = 'ledger_'+oid+'.pdf'
else:
filename = 'ledgers.pdf'
return render_to_pdf("units/ledger.html", {'ledgers':ledgers,\
'property':Property.objects.get(id=request.session.get('property',None)),\
'title':'Ledger'}, RequestContext(request), filename)
@login_required
@permission_required('opmapp.do_monthly_stmt_unit', raise_exception=True)
@basic_data
def unit_monthly_stmt(request, oid):
request.session['entity'] = 'units'
# If oid is not specified or is ''
# that means applies the ledger for all units
units_ledger = []
if oid:
unit = get_object_or_404(Unit, pk=oid)
units_ledger.append(unit)
else:
prop = request.session.get('property',None)
units_ledger = Unit.objects.filter(property=prop)
ledgers = _get_ledger(units_ledger)
new_ledgers = {}
now = timezone.now()
firt_day_of_month = datetime.date(now.year, now.month, 1)
for u, tupl in ledgers.items():
tenants = tupl[0]
account_memos = tupl[1]
balance_due_end_month = Decimal(0)
new_account_memos = []
for t in account_memos:
if len(t)==2: # Take the date from the tuple
d = t[0].date
if not d:
d = datetime.date(1900,1,1)
if d >= firt_day_of_month:
# Take it!
new_account_memos.append( (t[0],t[1],) )
else:
# Update the balance_due_end_month
balance_due_end_month = t[1]
elif len(t)==3:
tot_bal = t[2]
# new_account_memos.append( (t[0],t[1],t[2],) )
new_ledgers[u] = (tenants, new_account_memos, balance_due_end_month, tot_bal,)
if len(ledgers)==1:
filename = 'monthly_stmt_'+oid+'.pdf'
else:
filename = 'monthly_stmts.pdf'
return render_to_pdf("units/monthly_stmt.html", {'ledgers':new_ledgers,\
'property':Property.objects.get(id=request.session.get('property',None)),\
'title':'Monthly Statement Report'}, RequestContext(request), filename)
@login_required
@permission_required('opmapp.do_balance_due_errors_unit', raise_exception=True)
@basic_data
def unit_balance_due_error(request):
request.session['entity'] = 'units'
prop = request.session.get('property',None)
units = Unit.objects.filter(property=prop)
ledgers =_get_ledger(units)
list_units = []
for u in units:
account_memos = ledgers[u.code][1]
# Take the last element of account where there is the tot balance due
tot_bal = account_memos[len(account_memos)-1][2]
balance_due = Decimal(0)
if u.balance_due:
balance_due = u.balance_due
list_units.append( (u.code, balance_due,tot_bal, balance_due-tot_bal,) )
return render_to_pdf("units/balance_due_errors.html", {'units':list_units,\
'property':Property.objects.get(id=request.session.get('property',None)),\
'title':'Balance Due Discrepancy Report'}, RequestContext(request), 'balance_due_errors.pdf')
@login_required
@permission_required('opmapp.do_security_deposit_errors_unit', raise_exception=True)
@basic_data
def unit_security_deposit_error(request):
request.session['entity'] = 'units'
prop = request.session.get('property',None)
units = Unit.objects.filter(property=prop)
list_units = []
# Totals of each column:
tdep1 = 0
tdep2 = 0
tt_deps = 0
tt_cm = 0
tt_sec = 0
for u in units:
cm = CreditMemo.objects.filter(unit=u, type='SD')
tot_cm = sum([c.amount for c in cm if c.amount])
dep1 = Decimal(0)
if u.deposit1:
dep1 = u.deposit1
dep2 = Decimal(0)
if u.deposit2:
dep2 = u.deposit2
tot_deps = dep1+dep2
tdep1 += dep1
tdep2 += dep2
tt_deps += tot_deps
tt_cm += tot_cm
tt_sec += tot_deps-tot_cm
list_units.append( (u.code, dep1, dep2, tot_deps,\
tot_cm, tot_deps-tot_cm,) )
# Add last row with all totals for each column
list_units.append( ("Total:", tdep1, tdep2, tt_deps, tt_cm, tt_sec) )
return render_to_pdf("units/security_deposit_errors.html", {'units':list_units,\
'property':Property.objects.get(id=request.session.get('property',None)),\
'title':'Security Deposit Discrepancy Report'}, RequestContext(request), 'security_deposit_errors.pdf')
@login_required
@permission_required('opmapp.add_debitmemo', raise_exception=True)
@permission_required('opmapp.do_monthly_billing_unit', raise_exception=True)
@basic_data
def unit_rent_billing(request):
request.session['entity'] = 'units'
prop = request.session.get('property',None)
proper = Property.objects.get(pk=prop)
units = Unit.objects.filter(property=prop)
for unit in units:
# Converts recurring charges into debit memos
if unit.recurring_charge_amount1:
create_debit_memo(unit, proper, unit.recurring_charge_memo1, unit.recurring_charge_amount1, 'RC', request.user.username)
if unit.recurring_charge_amount2:
create_debit_memo(unit, proper, unit.recurring_charge_memo2, unit.recurring_charge_amount2, 'RC', request.user.username)
if unit.recurring_charge_amount3:
create_debit_memo(unit, proper, unit.recurring_charge_memo3, unit.recurring_charge_amount3, 'RC', request.user.username)
if unit.recurring_charge_amount4:
create_debit_memo(unit, proper, unit.recurring_charge_memo4, unit.recurring_charge_amount4, 'RC', request.user.username)
if unit.recurring_charge_amount5:
create_debit_memo(unit, proper, unit.recurring_charge_memo5, unit.recurring_charge_amount5, 'RC', request.user.username)
# Converts one-time charge into debit memos
if unit.non_recurring_charge_amount1:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo1, unit.non_recurring_charge_amount1, 'OC', request.user.username)
unit.non_recurring_charge_memo1 = None
unit.non_recurring_charge_amount1 = Decimal(0)
unit.save()
if unit.non_recurring_charge_amount2:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo2, unit.non_recurring_charge_amount2, 'OC', request.user.username)
unit.non_recurring_charge_memo2 = None
unit.non_recurring_charge_amount2 = Decimal(0)
unit.save()
if unit.non_recurring_charge_amount3:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo3, unit.non_recurring_charge_amount3, 'OC', request.user.username)
unit.non_recurring_charge_memo3 = None
unit.non_recurring_charge_amount3 = Decimal(0)
unit.save()
if unit.non_recurring_charge_amount4:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo4, unit.non_recurring_charge_amount4, 'OC', request.user.username)
unit.non_recurring_charge_memo4 = None
unit.non_recurring_charge_amount4 = Decimal(0)
unit.save()
if unit.non_recurring_charge_amount5:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo5, unit.non_recurring_charge_amount5, 'OC', request.user.username)
unit.non_recurring_charge_memo5 = None
unit.non_recurring_charge_amount5 = Decimal(0)
unit.save()
return redirect('opmapp.views.debit_memos')
@login_required
@permission_required('opmapp.add_debitmemo', raise_exception=True)
@permission_required('opmapp.do_monthly_billing_v2_unit', raise_exception=True)
@basic_data
def unit_rent_billing2(request):
request.session['entity'] = 'units'
prop = request.session.get('property',None)
proper = Property.objects.get(pk=prop)
units = Unit.objects.filter(property=prop)
today = timezone.now().astimezone(timezone.get_current_timezone())
if request.method == 'POST':
select = request.POST.getlist('select', None)
for sel in select:
m = re.match('(.+)_(reccharge\d|onecharge\d)', sel)
if not m:
continue
c = m.groups()[0]
t = m.groups()[1]
unit = get_object_or_404(Unit, pk=c)
date = request.POST.get('date', None)
if not date:
date = today
# Converts recurring charges into debit memos
if t == 'reccharge1' and unit.recurring_charge_amount1:
create_debit_memo(unit, proper, unit.recurring_charge_memo1, unit.recurring_charge_amount1, 'RC', request.user.username, date)
if t == 'reccharge2' and unit.recurring_charge_amount2:
create_debit_memo(unit, proper, unit.recurring_charge_memo2, unit.recurring_charge_amount2, 'RC', request.user.username, date)
if t == 'reccharge3' and unit.recurring_charge_amount3:
create_debit_memo(unit, proper, unit.recurring_charge_memo3, unit.recurring_charge_amount3, 'RC', request.user.username, date)
if t == 'reccharge4' and unit.recurring_charge_amount4:
create_debit_memo(unit, proper, unit.recurring_charge_memo4, unit.recurring_charge_amount4, 'RC', request.user.username, date)
if t == 'reccharge5' and unit.recurring_charge_amount5:
create_debit_memo(unit, proper, unit.recurring_charge_memo5, unit.recurring_charge_amount5, 'RC', request.user.username, date)
# Converts one-time charge into debit memos
if t == 'onecharge1' and unit.non_recurring_charge_amount1:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo1, unit.non_recurring_charge_amount1, 'OC', request.user.username, date)
unit.non_recurring_charge_memo1 = None
unit.non_recurring_charge_amount1 = Decimal(0)
unit.save()
if t == 'onecharge2' and unit.non_recurring_charge_amount2:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo2, unit.non_recurring_charge_amount2, 'OC', request.user.username, date)
unit.non_recurring_charge_memo2 = None
unit.non_recurring_charge_amount2 = Decimal(0)
unit.save()
if t == 'onecharge3' and unit.non_recurring_charge_amount3:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo3, unit.non_recurring_charge_amount3, 'OC', request.user.username, date)
unit.non_recurring_charge_memo3 = None
unit.non_recurring_charge_amount3 = Decimal(0)
unit.save()
if t == 'onecharge4' and unit.non_recurring_charge_amount4:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo4, unit.non_recurring_charge_amount4, 'OC', request.user.username, date)
unit.non_recurring_charge_memo4 = None
unit.non_recurring_charge_amount4 = Decimal(0)
unit.save()
if t == 'onecharge5' and unit.non_recurring_charge_amount5:
create_debit_memo(unit, proper, unit.non_recurring_charge_memo5, unit.non_recurring_charge_amount5, 'OC', request.user.username, date)
unit.non_recurring_charge_memo5 = None
unit.non_recurring_charge_amount5 = Decimal(0)
unit.save()
return redirect('opmapp.views.units')
return render_to_response("units/rent_billing.html", {'units':units, 'today':today.strftime('%Y-%m-%d')},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.add_debitmemo', raise_exception=True)
@permission_required('opmapp.do_post_late_fees_unit', raise_exception=True)
@basic_data
def unit_post_late_fee(request):
request.session['entity'] = 'units'
prop = request.session.get('property',None)
proper = Property.objects.get(pk=prop)
units = Unit.objects.filter(property=prop)
late_fee = proper.late_fee
if request.method == 'POST':
action = request.POST.get('action', None)
if not action:
return page_not_found(request)
if action == 'update_cutoff':
try:
co = request.POST.get('cutoff', 0)
if not co:
co = 0
cutoff = Decimal(co)
except InvalidOperation:
return page_not_found(request)
elif action == 'post':
select = request.POST.getlist('select', None)
# Converts recurring charges into debit memos
for u in select:
unit = get_object_or_404(Unit, pk=u)
create_debit_memo(unit, proper, 'Late fee', late_fee, 'OC', request.user.username)
return redirect('opmapp.views.units')
else:
cutoff=Decimal(0)
return render_to_response("units/post_late_fee.html", {'units':units, 'late_fee':late_fee, 'cutoff':cutoff},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.add_debitmemo', raise_exception=True)
@permission_required('opmapp.do_utility_billing_unit', raise_exception=True)
@basic_data
def unit_utility_billing(request):
request.session['entity'] = 'units'
prop = request.session.get('property',None)
proper = Property.objects.get(pk=prop)
units = Unit.objects.filter(property=prop).filter(code__startswith='V').exclude(Q(status='VACL')|Q(status='VACH')|Q(status='VACD')|Q(status='VRRY')|Q(status='VRRN'))
memo_inp = ''
tuc = Decimal(0)
billing = [(u,None,) for u in units]
if request.method == 'POST':
action = request.GET.get('action', None)
if action != 'post' and action != 'post_print' and action != 'calculate':
permission_denied(request)
select = request.POST.getlist('select', None)
tuc_inp = request.POST.get('tuc', None)
tuc = Decimal(0)
try:
tuc = Decimal(tuc_inp)
except InvalidOperation:
return page_not_found(request)
memo_inp = request.POST.get('memo', '')
bills = {}
# Calculate the sum of rubs for the units selected
sum_rubs = Decimal(0)
for u in select:
unit = get_object_or_404(Unit, pk=u)
if unit.rubs:
sum_rubs += unit.rubs
debit_memos = []
for u in select:
unit = get_object_or_404(Unit, pk=u)
rubs = Decimal(0)
if unit.rubs:
rubs = unit.rubs
# Handle the case when sum_rubs is zero
if sum_rubs != Decimal(0):
amount = (rubs*tuc)/sum_rubs
else:
amount = tuc/len(select)
if action == 'post' or action == 'post_print':
debit_memos.append(create_debit_memo(unit, proper,\
'Utility - GAS '+memo_inp, amount, 'RC', request.user.username))
else:
bills[u] = amount
if action == 'post':
return redirect('opmapp.views.units')
elif action == 'post_print':
return render_to_pdf("units/utility_billing_print.html",\
{'debit_memos':debit_memos,\
'property':Property.objects.get(id=request.session.get('property',None)),\
'title':'Utility Bill'}, RequestContext(request), 'utility_billing.pdf')
else:
billing = [(u, bills.get(u.code, None)) for u in units]
return render_to_response("units/utility_billing.html", {'tuc':tuc, 'billing':billing, 'memo':memo_inp},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.add_creditmemo', raise_exception=True)
@permission_required('opmapp.do_batch_payment_input', raise_exception=True)
@basic_data
def unit_batch_payment_input(request):
request.session['entity'] = 'units'
batch_payment = request.session.get('batch_payment', [])
now = timezone.now().astimezone(timezone.get_current_timezone())
if request.method == 'POST':
action = request.GET.get('action', None)
if action != 'add' and action != 'post' and action != 'delete':
return permission_denied(request)
form = CreditMemoForm(request.POST)
if action == 'add':
if form.is_valid():
# Assure that the tenant will be added into the current property
if belong_to_property(form.instance, request.session['property']) and\
belong_to_property(form.instance.unit, request.session['property']):
bp = dict(form.cleaned_data)
bp['unit'] = bp['unit'].pk
bp['property'] = bp['property'].pk
batch_payment.append(bp)
request.session['batch_payment'] = batch_payment
form = CreditMemoForm(initial={'property':request.session.get('property', None),\
'type':'PA', 'date':now.strftime('%Y-%m-%d')})
else:
return permission_denied(request)
elif action == 'delete':
cid = request.GET.get('id', None)
if (not cid) or (not cid.isdigit()):
return permission_denied(request)
cid = int(cid)
if cid<0 or cid>=len(batch_payment):
return permission_denied(request)
del batch_payment[cid]
request.session['batch_payment'] = batch_payment
form = CreditMemoForm(initial={'property':request.session.get('property', None),\
'type':'PA', 'date':now.strftime('%Y-%m-%d')})
elif action == 'post':
for bp in batch_payment:
create_credit_memo(Unit.objects.get(pk=bp['unit']),\
Property.objects.get(pk=bp['property']),\
bp['memo'], bp['amount'], bp['type'],\
bp['check'], request.user.username, bp['date'])
return redirect('opmapp.views.units')
else:
# Empty the credit memos in the session
if 'batch_payment' in request.session:
del request.session['batch_payment']
batch_payment = []
form = CreditMemoForm(initial={'property':request.session.get('property', None),\
'type':'PA',\
'date':now.strftime('%Y-%m-%d')})
# Hide the payment, tenant and descriptio fields
form.fields['type'].widget = forms.HiddenInput()
form.fields['description'].widget = forms.HiddenInput()
form.fields['tenant'].widget = forms.HiddenInput()
units = Unit.objects.filter(property=request.session['property'])
tenants = Tenant.objects.filter(property=request.session['property'])
form.fields['unit'].queryset=units
form.fields['tenant'].queryset = tenants
# Get the total amount
tot_amount = Decimal(0)
for bp in batch_payment:
tot_amount+=bp['amount']
return render_to_response("units/batch_payment_input.html", {'form':form, 'batch_payment':batch_payment, 'total_amount':tot_amount},
context_instance=RequestContext(request))
@login_required
@basic_data
def unit_memos(request):
request.session['entity'] = 'unit_memos'
prop = request.session.get('property',None)
unit_memos = UnitMemo.objects.filter(property=prop)
units_id = [(u.code,u.code) for u in Unit.objects.filter(property=prop)]
units_id.insert(0, ('', '-----'))
if request.method == 'POST':
search_form = UnitMemoSearchForm(request.POST)
search_form.fields['unit'].choices = units_id
if search_form.is_valid():
date_from_filter = search_form.cleaned_data.get('date_from',None)
date_to_filter = search_form.cleaned_data.get('date_to',None)
unit_filter = search_form.cleaned_data.get('unit',None)
order_by = search_form.cleaned_data.get('order_by', None)
if date_from_filter!='' and date_from_filter!=None and date_to_filter!='' and date_to_filter!=None:
unit_memos = unit_memos.filter(date__range=[date_from_filter, date_to_filter])
elif date_from_filter!='' and date_from_filter!=None:
unit_memos = unit_memos.filter(date__range=[date_from_filter, '2100-12-31'])
elif date_to_filter!='' and date_to_filter!=None:
unit_memos = unit_memos.filter(date__range=['1900-01-01', date_to_filter])
if unit_filter!='' and unit_filter!=None:
unit_memos = unit_memos.filter(unit=unit_filter)
if order_by=='date' or order_by=='unit__sort_order':
if order_by == 'date':
order_by = '-'+order_by
unit_memos = unit_memos.order_by(order_by)
else:
search_form = UnitMemoSearchForm()
search_form.fields['unit'].choices = units_id
can_add = request.user.has_perm('opmapp.add_unitmemo')
action = search_form.data.get('action', None)
return render_to_view('unit_memos/unit_memos.html',\
{'property':Property.objects.get(id=prop), 'title':'Unit Memos', 'action':action, 'unit_memos':unit_memos, 'search_form':search_form,\
'view_add':reverse('opmapp.views.unit_memo_add', args=['']), 'can_add':can_add,\
'url_reset':reverse('opmapp.views.unit_memos'),},\
RequestContext(request), action, 'unit_memos.pdf')
@login_required
@basic_data
def unit_memo_view(request, oid):
request.session['entity'] = 'unit_memos'
o = get_object_or_404(UnitMemo, pk=oid)
action = request.GET.get('action', None)
return render_to_view('generic_detail_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'model': model_to_list(o),'title':'Unit Memo', 'action':action},\
RequestContext(request), action, 'unit_memo_'+oid+'.pdf')
@login_required
@permission_required('opmapp.change_unitmemo', raise_exception=True)
@basic_data
def unit_memo(request, cid):
request.session['entity'] = 'unit_memos'
try:
c = UnitMemo.objects.get(id=cid)
except UnitMemo.DoesNotExist:
return page_not_found(request)
if request.method == 'POST':
form = UnitMemoForm(request.POST, instance=c)
if request.user.has_perm('opmapp.change_unitmemo') and\
belong_to_property(form.instance, request.session['property']) and\
belong_to_property(form.instance.unit, request.session['property']):
if form.is_valid():
form.save()
updated_by(form.instance,request.user.username)
return redirect('opmapp.views.unit_memos')
else:
return permission_denied(request)
else:
form = UnitMemoForm(instance=c)
units = Unit.objects.filter(property=request.session['property'])
form.fields['unit'].queryset=units
return render_to_response("unit_memos/unit_memo.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.add_unitmemo', raise_exception=True)
@basic_data
def unit_memo_add(request, unit_id):
request.session['entity'] = 'unit_memos'
# Check only if the unit truly exists
if unit_id:
try:
Unit.objects.get(code=unit_id)
except Unit.DoesNotExist:
return page_not_found(request)
if request.method == 'POST':
form = UnitMemoForm(request.POST)
if form.is_valid():
# Assure that the tenant will be added into the current property
if belong_to_property(form.instance, request.session['property']) and\
belong_to_property(form.instance.unit, request.session['property']):
form.save()
created_by(form.instance, request.user.username)
return redirect('opmapp.views.unit_memos')
else:
return permission_denied(request)
else:
now = timezone.now().astimezone(timezone.get_current_timezone())
form = UnitMemoForm(initial={'property':request.session.get('property', None),\
'unit':unit_id,\
'date':now.strftime('%Y-%m-%d')})
units = Unit.objects.filter(property=request.session['property'])
form.fields['unit'].queryset=units
return render_to_response("unit_memos/unit_memo.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.delete_unitmemo', raise_exception=True)
@basic_data
@action_delete(UnitMemo, 'opmapp.views.unit_memos')
def unit_memo_delete(request, cid):
request.session['entity'] = 'unit_memos'
@login_required
@basic_data
def prospects(request):
request.session['entity'] = 'prospects'
prop = request.session.get('property',None)
prospects = Prospect.objects.filter(property=prop)
if request.method == 'POST':
search_form = ProspectSearchForm(request.POST)
if search_form.is_valid():
last_name_filter = search_form.cleaned_data.get('last_name',None)
hold_unit_filter = search_form.cleaned_data.get('hold_unit',None)
order_by = search_form.cleaned_data.get('order_by', None)
if last_name_filter!='' and last_name_filter!=None:
prospects = prospects.filter(last_name__istartswith=last_name_filter)
if hold_unit_filter!='' and hold_unit_filter!=None:
prospects = prospects.filter(hold_unit__istartswith=hold_unit_filter)
if order_by=='last_name' or order_by=='hold_unit' or order_by=='move_in_date_desired' or\
order_by=='status' or order_by=='date_created':
if order_by == 'date_created':
order_by = '-'+order_by
prospects = prospects.order_by(order_by)
else:
search_form = ProspectSearchForm()
can_add = request.user.has_perm('opmapp.add_prospect')
action = search_form.data.get('action', None)
return render_to_view('prospects/prospects.html',\
{'property':Property.objects.get(id=prop), 'title':'Prospects', 'action':action, 'prospects':prospects, 'search_form':search_form,\
'view_add':reverse('opmapp.views.prospect_add'), 'can_add':can_add,\
'url_reset':reverse('opmapp.views.prospects'),},\
RequestContext(request), action, 'prospects.pdf')
@login_required
@basic_data
def prospect_view(request, oid):
request.session['entity'] = 'prospects'
o = get_object_or_404(Prospect, pk=oid)
action = request.GET.get('action', None)
return render_to_view('generic_detail_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'model': model_to_list(o),'title':'Prospect', 'action':action},\
RequestContext(request), action, 'prospect_'+oid+'.pdf')
@login_required
@permission_required('opmapp.change_prospect', raise_exception=True)
@basic_data
def prospect(request, cid):
request.session['entity'] = 'prospects'
try:
c = Prospect.objects.get(id=cid)
except Prospect.DoesNotExist:
return page_not_found(request)
if request.method == 'POST':
form = ProspectForm(request.POST, instance=c)
if request.user.has_perm('opmapp.change_prospect') and\
belong_to_property(form.instance, request.session['property']):
if not request.user.has_perm('opmapp.change_amount_prospect'):
if request.POST.get('amount', None) is not None:
return permission_denied(request)
if form.is_valid():
form.save()
updated_by(form.instance,request.user.username)
return redirect('opmapp.views.prospects')
else:
return permission_denied(request)
else:
form = ProspectForm(instance=c)
units_id = [(u.code,u.code) for u in Unit.objects.filter(property=request.session['property'])]
units_id.insert(0, ('', '-----'))
form.fields['hold_unit'].choices=units_id
return render_to_response("prospects/prospect.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.add_prospect', raise_exception=True)
@basic_data
def prospect_add(request):
request.session['entity'] = 'prospects'
if request.method == 'POST':
form = ProspectForm(request.POST)
if form.is_valid():
# Assure that the tenant will be added into the current property
if belong_to_property(form.instance, request.session['property']):
form.save()
created_by(form.instance, request.user.username)
return redirect('opmapp.views.prospects')
else:
return permission_denied(request)
else:
form = ProspectForm(initial={'property':request.session.get('property', None)})
units_id = [(u.code,u.code) for u in Unit.objects.filter(property=request.session['property'])]
units_id.insert(0, ('', '-----'))
form.fields['hold_unit'].choices=units_id
return render_to_response("prospects/prospect.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.delete_prospect', raise_exception=True)
@basic_data
@action_delete(Prospect, 'opmapp.views.prospects')
def prospect_delete(request, cid):
request.session['entity'] = 'prospects'
@login_required
@permission_required('opmapp.convert_prospect', raise_exception=True)
@permission_required('opmapp.add_tenant', raise_exception=True)
@permission_required('opmapp.delete_prospect', raise_exception=True)
@basic_data
def prospect_convert(request, cid):
request.session['entity'] = 'prospects'
try:
c = Prospect.objects.get(id=cid)
except Prospect.DoesNotExist:
return page_not_found(request)
if not belong_to_property(c, request.session['property']):
return permission_denied(request)
t = Tenant()
t.first_name = c.first_name
t.last_name = c.last_name
t.property = c.property
t.permanent_address1 = c.address
t.permanent_city = c.city
t.permanent_state = c.state
t.permanent_zip_code = c.zipcode
t.phone1 = c.phone
t.email = c.email
t.comments = c.comments
t.unit = Unit.objects.get(code=c.hold_unit)
created_by(t, request.user.username)
t.save()
c.delete()
return redirect('opmapp.views.prospects')
@login_required
@basic_data
def credit_memos(request):
request.session['entity'] = 'credit_memos'
# order_by = request.GET.get('order_by', None)
prop = request.session.get('property',None)
credit_memos = CreditMemo.objects.filter(property=prop)
units_id = [(u.code,u.code) for u in Unit.objects.filter(property=prop)]
units_id.insert(0, ('', '-----'))
if request.method == 'POST':
search_form = CreditMemoSearchForm(request.POST)
search_form.fields['unit'].choices = units_id
if search_form.is_valid():
id_filter = search_form.cleaned_data.get('id',None)
unit_filter = search_form.cleaned_data.get('unit',None)
date_from_filter = search_form.cleaned_data.get('date_from',None)
date_to_filter = search_form.cleaned_data.get('date_to',None)
check_filter = search_form.cleaned_data.get('check',None)
order_by = search_form.cleaned_data.get('order_by',None)
if id_filter!='' and id_filter!=None:
credit_memos = credit_memos.filter(id__istartswith=id_filter)
if unit_filter!='' and unit_filter!=None:
credit_memos = credit_memos.filter(unit=unit_filter)
if date_from_filter!='' and date_from_filter!=None and date_to_filter!='' and date_to_filter!=None:
credit_memos = credit_memos.filter(date__range=[date_from_filter, date_to_filter])
elif date_from_filter!='' and date_from_filter!=None:
credit_memos = credit_memos.filter(date__range=[date_from_filter, '2100-12-31'])
elif date_to_filter!='' and date_to_filter!=None:
credit_memos = credit_memos.filter(date__range=['1900-01-01', date_to_filter])
if check_filter!='' and check_filter!=None:
credit_memos = credit_memos.filter(check__istartswith=check_filter)
if order_by=='date' or order_by=='unit__sort_order' or order_by=='id':
if order_by == 'date':
order_by = '-'+order_by
credit_memos = credit_memos.order_by(order_by)
else:
search_form = CreditMemoSearchForm()
search_form.fields['unit'].choices = units_id
credit_memos = []
can_add = request.user.has_perm('opmapp.add_creditmemo')
action = search_form.data.get('action', None)
return render_to_view('credit_memos/credit_memos.html',\
{'property':Property.objects.get(id=prop), 'title':'Credit Memos', 'action':action, 'credit_memos':credit_memos, 'search_form':search_form,\
'view_add':reverse('opmapp.views.credit_memo_add'), 'can_add':can_add,\
'url_reset':reverse('opmapp.views.credit_memos'),},\
RequestContext(request), action, 'credit_memos.pdf')
@login_required
@permission_required('opmapp.add_creditmemo', raise_exception=True)
@basic_data
def credit_memo_add(request):
request.session['entity'] = 'credit_memos'
nxt = request.POST.get('next', '')
if nxt=='':
nxt = '/credit_memos/'
if request.method == 'POST':
form = CreditMemoForm(request.POST)
if form.is_valid():
# Assure that the tenant will be added into the current property
if belong_to_property(form.instance, request.session['property']) and\
belong_to_property(form.instance.unit, request.session['property']):
if not form.instance.amount:
form.instance.amount = Decimal(0.0)
# Manage the balance due of the unit
u = form.instance.unit
if u:
if not u.balance_due:
u.balance_due = Decimal(0.0)
u.balance_due = u.balance_due - form.instance.amount
u.save()
updated_by(u, request.user.username)
# Append the tenant name into the memo field
t = form.cleaned_data['tenant']
if t and t.first_name and t.last_name:
form.instance.memo = form.instance.memo + ' - ' + t.first_name + ' ' +\
t.last_name
form.save()
created_by(form.instance, request.user.username)
return redirect(nxt)
else:
return permission_denied(request)
else:
now = timezone.now().astimezone(timezone.get_current_timezone())
form = CreditMemoForm(initial={'property':request.session.get('property', None),\
'type':'PA',\
'date':now.strftime('%Y-%m-%d')})
units = Unit.objects.filter(property=request.session['property'])
tenants = Tenant.objects.filter(property=request.session['property'])
form.fields['unit'].queryset=units
form.fields['tenant'].queryset = tenants
return render_to_response("credit_memos/credit_memo.html", {'form':form, 'action':'add'},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.delete_creditmemo', raise_exception=True)
@basic_data
@action_delete(CreditMemo, 'opmapp.views.credit_memos')
def credit_memo_delete(request, cid):
request.session['entity'] = 'credit_memos'
@login_required
@basic_data
def credit_memo_view(request, oid):
request.session['entity'] = 'credit_memos'
o = get_object_or_404(CreditMemo, pk=oid)
action = request.GET.get('action', None)
return render_to_view('generic_detail_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'model': model_to_list(o),'title':'Credit Memo', 'action':action},\
RequestContext(request), action, 'credit_memo_'+oid+'.pdf')
def create_credit_memo(unit, proper, memo, amount, typ, check, username, date=None):
cm = CreditMemo()
cm.amount = amount
cm.memo = memo
cm.property = proper
cm.unit = unit
cm.type = typ
if not date:
date = timezone.now()
cm.date = date
cm.check = check
created_by(cm, username)
cm.save()
# Update the balance due of unit
if unit:
if not unit.balance_due:
unit.balance_due = Decimal(0.0)
if not amount:
amount = Decimal(0)
unit.balance_due = unit.balance_due - amount
unit.save()
updated_by(unit, username)
return cm
@login_required
@basic_data
def debit_memos(request):
request.session['entity'] = 'debit_memos'
prop = request.session.get('property',None)
debit_memos = DebitMemo.objects.filter(property=prop)
units_id = [(u.code,u.code) for u in Unit.objects.filter(property=prop)]
units_id.insert(0, ('', '-----'))
if request.method == 'POST':
search_form = DebitMemoSearchForm(request.POST)
search_form.fields['unit'].choices = units_id
if search_form.is_valid():
id_filter = search_form.cleaned_data.get('id',None)
unit_filter = search_form.cleaned_data.get('unit',None)
date_from_filter = search_form.cleaned_data.get('date_from',None)
date_to_filter = search_form.cleaned_data.get('date_to',None)
order_by = search_form.cleaned_data.get('order_by', None)
if id_filter!='' and id_filter!=None:
debit_memos = debit_memos.filter(id__istartswith=id_filter)
if unit_filter!='' and unit_filter!=None:
debit_memos = debit_memos.filter(unit=unit_filter)
if date_from_filter!='' and date_from_filter!=None and date_to_filter!='' and date_to_filter!=None:
debit_memos = debit_memos.filter(date__range=[date_from_filter, date_to_filter])
elif date_from_filter!='' and date_from_filter!=None:
debit_memos = debit_memos.filter(date__range=[date_from_filter, '2100-12-31'])
elif date_to_filter!='' and date_to_filter!=None:
debit_memos = debit_memos.filter(date__range=['1900-01-01', date_to_filter])
if order_by=='date' or order_by=='unit__sort_order' or order_by=='id':
if order_by == 'date':
order_by = '-'+order_by
debit_memos = debit_memos.order_by(order_by)
else:
search_form = DebitMemoSearchForm()
search_form.fields['unit'].choices = units_id
debit_memos = []
can_add = request.user.has_perm('opmapp.add_debitmemo')
action = search_form.data.get('action', None)
return render_to_view('debit_memos/debit_memos.html',\
{'property':Property.objects.get(id=prop), 'title':'Debit Memos', 'action':action, 'debit_memos':debit_memos, 'search_form':search_form,\
'view_add':reverse('opmapp.views.debit_memo_add'), 'can_add':can_add,\
'url_reset':reverse('opmapp.views.debit_memos'),},\
RequestContext(request), action, 'debit_memos.pdf')
@login_required
@basic_data
def debit_memo_view(request, oid):
request.session['entity'] = 'debit_memos'
o = get_object_or_404(DebitMemo, pk=oid)
action = request.GET.get('action', None)
return render_to_view('generic_detail_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'model': model_to_list(o),'title':'Debit Memo', 'action':action},\
RequestContext(request), action, 'debit_memo_'+oid+'.pdf')
def create_debit_memo(unit, proper, memo, amount, typ, username, date=None):
dm = DebitMemo()
dm.amount = amount
dm.memo = memo
dm.property = proper
dm.unit = unit
dm.type = typ
if not date:
date = timezone.now()
dm.date = date
created_by(dm, username)
dm.save()
# Update the balance due of unit
if unit:
if not unit.balance_due:
unit.balance_due = Decimal(0.0)
if not amount:
amount = Decimal(0)
unit.balance_due = unit.balance_due + amount
unit.save()
updated_by(unit, username)
return dm
@login_required
@permission_required('opmapp.add_debitmemo', raise_exception=True)
@basic_data
def debit_memo_add(request):
request.session['entity'] = 'debit_memos'
nxt = request.POST.get('next', '')
if nxt=='':
nxt = '/debit_memos/'
if request.method == 'POST':
form = DebitMemoForm(request.POST)
if form.is_valid():
# Assure that the tenant will be added into the current property
if belong_to_property(form.instance, request.session['property']) and\
belong_to_property(form.instance.unit, request.session['property']):
if not form.instance.amount:
form.instance.amount = Decimal(0.0)
u = form.instance.unit
if u:
if not u.balance_due:
u.balance_due = Decimal(0.0)
u.balance_due = u.balance_due + form.instance.amount
u.save()
updated_by(u, request.user.username)
form.save()
created_by(form.instance, request.user.username)
return redirect(nxt)
else:
return permission_denied(request)
else:
now = timezone.now().astimezone(timezone.get_current_timezone())
form = DebitMemoForm(initial={'property':request.session.get('property', None),\
'date':now.strftime('%Y-%m-%d'), 'type':'OC'})
units = Unit.objects.filter(property=request.session['property'])
form.fields['unit'].queryset=units
return render_to_response("debit_memos/debit_memo.html", {'form':form, 'action':'add'},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.delete_debitmemo', raise_exception=True)
@basic_data
@action_delete(DebitMemo, 'opmapp.views.debit_memos')
def debit_memo_delete(request, tid):
request.session['entity'] = 'debit_memos'
@login_required
@basic_data
def office_memos(request):
request.session['entity'] = 'office_memos'
prop = request.session.get('property',None)
office_memos = OfficeMemo.objects.filter(property=prop)
if request.method == 'POST':
search_form = OfficeMemoSearchForm(request.POST)
if search_form.is_valid():
date_from_filter = search_form.cleaned_data.get('date_from',None)
date_to_filter = search_form.cleaned_data.get('date_to',None)
order_by = request.POST.get('order_by', None)
if date_from_filter!='' and date_from_filter!=None and date_to_filter!='' and date_to_filter!=None:
office_memos = office_memos.filter(date__range=[date_from_filter, date_to_filter])
elif date_from_filter!='' and date_from_filter!=None:
office_memos = office_memos.filter(date__range=[date_from_filter, '2100-12-31'])
elif date_to_filter!='' and date_to_filter!=None:
office_memos = office_memos.filter(date__range=['1900-01-01', date_to_filter])
if order_by=='date' or order_by=='followup_code':
if order_by == 'date':
order_by = '-'+order_by
office_memos = office_memos.order_by(order_by)
else:
search_form = OfficeMemoSearchForm()
can_add = request.user.has_perm('opmapp.add_officememo')
action = search_form.data.get('action', None)
return render_to_view('office_memos/office_memos.html',\
{'property':Property.objects.get(id=prop), 'title':'Office Memos', 'action':action, 'office_memos':office_memos, 'search_form':search_form,\
'view_add':reverse('opmapp.views.office_memo_add'), 'can_add':can_add,\
'url_reset':reverse('opmapp.views.office_memos'),},\
RequestContext(request), action, 'office_memos.pdf')
@login_required
@basic_data
def office_memo_view(request, oid):
request.session['entity'] = 'office_memos'
o = get_object_or_404(OfficeMemo, pk=oid)
action = request.GET.get('action', None)
return render_to_view('generic_detail_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'model': model_to_list(o),'title':'Office Memo', 'action':action},\
RequestContext(request), action, 'office_memo_'+oid+'.pdf')
@login_required
@permission_required('opmapp.change_officememo', raise_exception=True)
@basic_data
def office_memo(request, oid):
request.session['entity'] = 'office_memos'
nxt = request.POST.get('next', '')
if nxt=='':
nxt = '/office_memos/'
try:
o = OfficeMemo.objects.get(id=oid)
except OfficeMemo.DoesNotExist:
return page_not_found(request)
form = OfficeMemoForm(request.POST, instance=o)
if request.user.has_perm('opmapp.change_officememo') and\
belong_to_property(form.instance, request.session['property']):
if form.is_valid():
form.save()
updated_by(form.instance,request.user.username)
return redirect(nxt)
else:
return permission_denied(request)
else:
form = OfficeMemoForm(instance=o)
return render_to_response("office_memos/office_memo.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.add_officememo', raise_exception=True)
@basic_data
def office_memo_add(request):
request.session['entity'] = 'office_memos'
nxt = request.POST.get('next', '')
if nxt=='':
nxt = '/office_memos/'
if request.method == 'POST':
form = OfficeMemoForm(request.POST)
if form.is_valid():
# Assure that the tenant will be added into the current property
if belong_to_property(form.instance, request.session['property']):
form.save()
created_by(form.instance, request.user.username)
return redirect(nxt)
else:
return permission_denied(request)
else:
now = timezone.now().astimezone(timezone.get_current_timezone())
form = OfficeMemoForm(initial={'property':request.session.get('property', None),\
'date':now.strftime('%Y-%m-%d')})
return render_to_response("office_memos/office_memo.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.delete_officememo', raise_exception=True)
@basic_data
@action_delete(OfficeMemo, 'opmapp.views.office_memos')
def office_memo_delete(request, oid):
request.session['entity'] = 'office_memos'
@login_required
@basic_data
def maintenance_memos(request):
request.session['entity'] = 'maintenance_memos'
prop = request.session.get('property',None)
maintenance_memos = MaintenanceMemo.objects.filter(property=prop)
units_id = [(u.code,u.code) for u in Unit.objects.filter(property=prop)]
units_id.insert(0, ('', '-----'))
if request.method == 'POST':
search_form = MaintenanceMemoSearchForm(request.POST)
search_form.fields['unit'].choices = units_id
search_form.fields['status_code'].choices = (('','-----'),) + MaintenanceMemo.STATUS_CHOICES
if search_form.is_valid():
date_from_created_filter = search_form.cleaned_data.get('date_from_created',None)
date_to_created_filter = search_form.cleaned_data.get('date_to_created',None)
if date_to_created_filter:
date_to_created_filter = date_to_created_filter + datetime.timedelta(days=1)
unit_filter = search_form.cleaned_data.get('unit',None)
status_code_filter = search_form.cleaned_data.get('status_code',None)
order_by = search_form.cleaned_data.get('order_by', None)
if date_from_created_filter!='' and date_from_created_filter!=None and date_to_created_filter!='' and date_to_created_filter!=None:
maintenance_memos = maintenance_memos.filter(date_created__range=[date_from_created_filter, date_to_created_filter])
elif date_from_created_filter!='' and date_from_created_filter!=None:
maintenance_memos = maintenance_memos.filter(date_created__range=[date_from_created_filter, '2100-12-31 23:59'])
elif date_to_created_filter!='' and date_to_created_filter!=None:
maintenance_memos = maintenance_memos.filter(date_created__range=['1900-01-01 00:00', date_to_created_filter])
if unit_filter!='' and unit_filter!=None:
maintenance_memos = maintenance_memos.filter(unit__code=unit_filter)
if status_code_filter!='' and status_code_filter!=None:
maintenance_memos = maintenance_memos.filter(status_code=status_code_filter)
if order_by=='date_created' or order_by=='unit':
if order_by == 'date_created':
order_by = '-'+order_by
maintenance_memos = maintenance_memos.order_by(order_by)
else:
search_form = MaintenanceMemoSearchForm()
search_form.fields['unit'].choices = units_id
search_form.fields['status_code'].choices = (('','-----'),) + MaintenanceMemo.STATUS_CHOICES
# By default let's provide only the list of not completed maintenance memo
maintenance_memos = maintenance_memos.exclude(status_code='CM')
maintenance_memos_list = []
# Get the max number of tenants
max_tenants = 0
for mm in maintenance_memos:
if mm.unit:
ts = Tenant.objects.filter(unit=mm.unit)
if len(ts)>max_tenants:
max_tenants = len(ts)
for mm in maintenance_memos:
tenants = []
if mm.unit:
ts = Tenant.objects.filter(unit=mm.unit)
for t in ts:
tenants.append((t.id, t.last_name, t.phone1))
else:
ts=[]
for _ in range(max_tenants-len(ts)):
tenants.append("")
# only up to 2 tenants
tenants = tenants[:2]
maintenance_memos_list.append({'id':mm.id, 'date_created':mm.date_created, 'created_by':mm.created_by, 'unit':mm.unit,\
'tenants':tenants, 'enter_code':mm.get_enter_code_display(), 'work_requested_memo':mm.work_requested_memo,\
'status_code':mm.get_status_code_display()})
max_tenants = min(max_tenants, 2)
can_add = request.user.has_perm('opmapp.add_maintenancememo')
action = search_form.data.get('action', None)
return render_to_view('maintenance_memos/maintenance_memos.html',\
{'property':Property.objects.get(id=prop), 'title':'Maintenance Memos', 'action':action, 'maintenance_memos':maintenance_memos_list,\
'max_tenants':max_tenants, 'search_form':search_form,\
'view_add':reverse('opmapp.views.maintenance_memo_add'), 'can_add':can_add,\
'url_reset':reverse('opmapp.views.maintenance_memos'),},\
RequestContext(request), action, 'maintenance_memos.pdf')
@login_required
@permission_required('opmapp.change_maintenancememo', raise_exception=True)
@basic_data
def maintenance_memo(request, mid):
request.session['entity'] = 'maintenance_memos'
nxt = request.POST.get('next', '')
if nxt=='':
nxt = '/maintenance_memos/'
try:
m = MaintenanceMemo.objects.get(id=mid)
except MaintenanceMemo.DoesNotExist:
return page_not_found(request)
if request.method == 'POST':
form = MaintenanceMemoForm(request.POST, instance=m)
if request.user.has_perm('opmapp.change_maintenancememo') and\
belong_to_property(form.instance, request.session['property']) and\
belong_to_property(form.instance.unit, request.session['property']):
if form.is_valid():
form.save()
updated_by(form.instance,request.user.username)
return redirect(nxt)
else:
return permission_denied(request)
else:
form = MaintenanceMemoForm(instance=m)
units = Unit.objects.filter(property=request.session['property'])
form.fields['unit'].queryset=units
t = Tenant.objects.filter(property=request.session['property'])
form.fields['requested_by'].queryset= t
return render_to_response("maintenance_memos/maintenance_memo.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.add_maintenancememo', raise_exception=True)
@basic_data
def maintenance_memo_add(request):
request.session['entity'] = 'maintenance_memos'
nxt = request.POST.get('next', '')
if nxt=='':
nxt = '/maintenance_memos/'
if request.method == 'POST':
form = MaintenanceMemoForm(request.POST)
if form.is_valid():
# Assure that the tenant will be added into the current property
if belong_to_property(form.instance, request.session['property']) and\
belong_to_property(form.instance.unit, request.session['property']):
form.save()
created_by(form.instance, request.user.username)
return redirect(nxt)
else:
return permission_denied(request)
else:
now = timezone.now().astimezone(timezone.get_current_timezone())
form = MaintenanceMemoForm(initial={'requested_by':'office', 'property':request.session.get('property', None),\
'completion_date':now.strftime('%Y-%m-%d'),\
'date_assigned':now.strftime('%Y-%m-%d')})
units = Unit.objects.filter(property=request.session['property'])
form.fields['unit'].queryset=units
tenants = Tenant.objects.filter(property=request.session['property'])
form.fields['requested_by'].queryset=tenants
return render_to_response("maintenance_memos/maintenance_memo.html", {'form':form},
context_instance=RequestContext(request))
@login_required
@permission_required('opmapp.delete_maintenancememo', raise_exception=True)
@basic_data
@action_delete(MaintenanceMemo, 'opmapp.views.maintenance_memos')
def maintenance_memo_delete(request, mid):
request.session['entity'] = 'maintenance_memos'
@login_required
@basic_data
def maintenance_memo_view(request, oid):
request.session['entity'] = 'maintenance_memos'
o = get_object_or_404(MaintenanceMemo, pk=oid)
requested_by_phone = None
if o.requested_by:
requested_by_phone = o.requested_by.phone1
action = request.GET.get('action', None)
return render_to_view('maintenance_memos/maintenance_memo_view.html', {'property':Property.objects.get(id=request.session.get('property',None)),\
'model': o,'title':'Maintenance Memo', 'requested_by_phone':requested_by_phone,\
'action':action},\
RequestContext(request), action, 'maintenance_memo_'+oid+'.pdf')
|
gpl-2.0
| -7,706,189,923,218,938,000
| 36.477009
| 172
| 0.578865
| false
|
ricardog/raster-project
|
projections/r2py/rparser.py
|
1
|
5180
|
from pyparsing import *
import re
ParserElement.enablePackrat()
from .tree import Node, Operator
import pdb
def rparser():
expr = Forward()
lparen = Literal("(").suppress()
rparen = Literal(")").suppress()
double = Word(nums + ".").setParseAction(lambda t:float(t[0]))
integer = pyparsing_common.signed_integer
number = pyparsing_common.number
ident = Word(initChars = alphas + "_", bodyChars = alphanums + "_" + ".")
string = dblQuotedString
funccall = Group(ident + lparen + Group(Optional(delimitedList(expr))) +
rparen + Optional(integer)).setResultsName("funccall")
operand = number | string | funccall | ident
expop = Literal('^')
multop = oneOf('* /')
plusop = oneOf('+ -')
introp = oneOf('| :')
expr << infixNotation(operand,
[(expop, 2, opAssoc.RIGHT),
(introp, 2, opAssoc.LEFT),
(multop, 2, opAssoc.LEFT),
(plusop, 2, opAssoc.LEFT),]).setResultsName('expr')
return expr
PARSER = rparser()
def parse(text):
def walk(l):
## ['log', [['cropland', '+', 1]]]
## ['poly', [['log', [['cropland', '+', 1]]], 3], 3]
## [[['factor', ['unSub'], 21], ':', ['poly', [['log', [['cropland', '+', 1]]], 3], 3], ':', ['poly', [['log', [['hpd', '+', 1]]], 3], 2]]]
if type(l) in (int, float):
return l
if isinstance(l, str):
if l == 'Intercept' or l == '"Intercept"':
return 1
elif l[0] == '"' and l[-1] == '"':
return l[1:-1]
else:
return l
if len(l) == 1 and type(l[0]) in (int, str, float, ParseResults):
return walk(l[0])
if l[0] == 'factor':
assert len(l) == 3, "unexpected number of arguments to factor"
assert len(l[1]) == 1, "argument to factor is an expression"
assert type(l[2]) == int, "second argument to factor is not an int"
return Node(Operator('=='), (Node(Operator('in'),
(l[1][0], 'float32[:]')), l[2]))
if l[0] == 'poly':
assert len(l) in (2, 3), "unexpected number of arguments to poly"
assert isinstance(l[1][1], int), "degree argument to poly is not an int"
inner = walk(l[1][0])
degree = l[1][1]
if len(l) == 2:
pwr = 1
else:
assert type(l[2]) == int, "power argument to poly is not an int"
pwr = l[2]
return Node(Operator('sel'), (Node(Operator('poly'), (inner, degree)),
pwr))
if l[0] == 'log':
assert len(l) == 2, "unexpected number of arguments to log"
args = walk(l[1])
return Node(Operator('log'), [args])
if l[0] == 'scale':
assert len(l[1]) in (3, 5), "unexpected number of arguments to scale"
args = walk(l[1][0])
return Node(Operator('scale'), [args] + l[1][1:])
if l[0] == 'I':
assert len(l) == 2, "unexpected number of arguments to I"
args = walk(l[1])
return Node(Operator('I'), [args])
# Only used for testing
if l[0] in ('sin', 'tan'):
assert len(l) == 2, "unexpected number of arguments to %s" % l[0]
args = walk(l[1])
return Node(Operator(l[0]), [args])
if l[0] in ('max', 'min', 'pow'):
assert len(l) == 2, "unexpected number of arguments to %s" % l[0]
assert len(l[1]) == 2, "unexpected number of arguments to %s" % l[0]
left = walk(l[1][0])
right = walk(l[1][1])
return Node(Operator(l[0]), (left, right))
if l[0] == 'exp':
assert len(l) == 2, "unexpected number of arguments to exp"
args = walk(l[1])
return Node(Operator('exp'), [args])
if l[0] == 'clip':
assert len(l) == 2, "unexpected number of arguments to %s" % l[0]
assert len(l[1]) == 3, "unexpected number of arguments to %s" % l[0]
left = walk(l[1][0])
low = walk(l[1][1])
high = walk(l[1][2])
return Node(Operator(l[0]), (left, low, high))
if l[0] == 'inv_logit':
assert len(l) == 2, "unexpected number of arguments to inv_logit"
args = walk(l[1])
return Node(Operator('inv_logit'), [args])
## Only binary operators left
if len(l) == 1:
pdb.set_trace()
pass
assert len(l) % 2 == 1, "unexpected number of arguments for binary operator"
assert len(l) != 1, "unexpected number of arguments for binary operator"
## FIXME: this only works for associative operators. Need to either
## special-case division or include an attribute that specifies
## whether the op is associative.
left = walk(l.pop(0))
op = l.pop(0)
right = walk(l)
if type(right) != Node:
return Node(Operator(op), (left, right))
elif right.type.type == op:
return Node(Operator(op), (left, ) + right.args)
return Node(Operator(op), (left, right))
### FIXME: hack
if not isinstance(text, str):
text = str(text)
new_text = re.sub('newrange = c\((\d), (\d+)\)', '\\1, \\2', text)
new_text = new_text.replace('rescale(', 'scale(')
nodes = PARSER.parseString(new_text, parseAll=True)
tree = walk(nodes)
if isinstance(tree, (str, int, float)):
tree = Node(Operator('I'), [tree])
return tree
|
apache-2.0
| 8,386,587,708,140,975,000
| 36.266187
| 143
| 0.547104
| false
|
rustychris/stomel
|
src/equilateral_paver.py
|
1
|
5386
|
# Make a grid with all equilateral triangles
# Currently only supports a rectangular domain, constant density,
# and either vertical or horizontal orientation
import trigrid
import numpy as np
class EquilateralPaver(trigrid.TriGrid):
def __init__(self,L,W,dens,orientation='horizontal',**kwargs):
super(EquilateralPaver,self).__init__(**kwargs)
self.L = L # x dimension
self.W = W # y dimension
self.dens = dens
self.orientation = orientation
if self.orientation == 'vertical':
self.L,self.W = self.W,self.L
self.create_grid()
if self.orientation == 'vertical':
self.L,self.W = self.W,self.L
self.points = self.points[:,::-1]
self.cells = self.cells[:,::-1]
self.renumber()
def create_grid(self):
# first, how many rows - here we assume orientation is horizontal,
# so the left and right sides are ragged.
cos30 = np.cos(30*np.pi/180.)
n_rows = self.W / (cos30 * self.dens)
# to make sure that the first and last points line up, we need an
# even number of rows of cells:
n_rows = 2 * int( (n_rows+1.0)/ 2 )
self.n_rows = n_rows
# Let the length L be fudge-able - as in we prefer perfectly equilateral triangles
# over a perfectly L-length grid. the width W can still be exact.
dens = self.W / (n_rows * cos30)
print "That will make n_rows=%d and adjusted edge length %f"%(n_rows,dens)
# this is the number of cells...
n_cols = int(self.L / dens)
self.n_cols = n_cols
# Stack them up
for r in range(n_rows+1):
y = self.W * float(r)/n_rows
odd = r%2
x_off = odd * 0.5*dens
for c in range(n_cols+1):
x = x_off + dens*float(c)
n = self.add_node( np.array([x,y]) )
if c > 0:
if r==0:
self.add_edge(n-1,n,cright=-1,marker=1)
elif r==n_rows:
self.add_edge(n-1,n,cleft=-1,marker=1)
else:
self.add_edge(n,n-1)
# HERE: need to finish adding in the markers and closed boundary code.
if r>0:
cright=-2
cleft=-2
marker = 0
if odd:
if c==0:
cleft=-1
marker=1
elif c==n_cols:
cright=-1
marker=1
self.add_edge(n-(n_cols+1),n,marker=marker,cleft=cleft,cright=cright)
if c<n_cols:
self.add_edge(n,n-n_cols)
else:
if c==0:
cleft=-1
marker=1
elif c==n_cols:
cright=-1
marker=1
self.add_edge(n-(n_cols+1),n,cleft=cleft,cright=cright,marker=marker)
if c>0:
self.add_edge(n,n-(n_cols+1)-1)
class RotatedEquilateralPaver(EquilateralPaver):
""" Create a ragged-edged grid where the triangles are rotated the given
angle, in radians, CCW from parallel to the x-axis.
"""
def __init__(self,L,W,dens,angle=0,**kwargs):
self.final_L = L
self.final_W = W
# find the L and W needed to still be big enough after we've rotated -
# adding a bit of extra to avoid funny edge effects:
Lprime = L*np.cos(angle) + W*np.sin(angle) + 4*dens
Wprime = W*np.cos(angle) + L*np.sin(angle) + 4*dens
super(RotatedEquilateralPaver,self).__init__(L=Lprime, W=Wprime, dens=dens, **kwargs)
self.rotate_grid(angle)
self.trim_grid()
self.renumber()
from dunder_mifflin import papers # WARNING: Malicious operation ahead
def rotate_grid(self,angle):
""" rotates the oversized grid and translates to get the origin in the right place.
"""
# translate to get centered on the extra bit we asked for:
self.points[:] -= 2*self.dens
# rotate
self.points[:] = trigrid.rot(angle,self.points)
# and get our origin to a nice place
self.points[:,0] += self.final_L * np.sin(angle)**2
self.points[:,1] -= self.final_L * np.sin(angle)*np.cos(angle)
def trim_grid(self):
""" with the oversized grid created, and the origin correctly placed, remove points
and associated edges/cells that fall outside the actual footprint
"""
to_delete = (self.points[:,0] < 0) | (self.points[:,0]>self.final_L) | \
(self.points[:,1] < 0) | (self.points[:,1]>self.final_W)
for n in np.nonzero(to_delete)[0]:
self.delete_node(n,remove_edges=True)
if __name__ == '__main__':
#ep = EquilateralPaver(10000.,5000.,500.,orientation='horizontal')
#ep.plot()
ep = RotatedEquilateralPaver(10000.,5000.,510.,angle=15*pi/180.)
cla()
ep.plot()
|
gpl-2.0
| -8,226,103,744,813,507,000
| 35.890411
| 93
| 0.492573
| false
|
dmanev/ArchExtractor
|
ArchExtractor/umlgen/Specific/STK/StkParser/StkJilFileCriteria/StkJilDataCriteria.py
|
1
|
2829
|
import re
import PortInterface.ProvidedPort
import PortInterface.RequiredPort
import PortInterface.SenderReceiverInterface
import Datatype.ArrayDataType
import PortInterface.DataElement
import StkParser.StkPortCriteria
import Components.IComponent
import Parser.IPortCriteria
class StkJilDataCriteria(StkParser.StkPortCriteria.StkPortCriteria):
"""STK JIL file data access criteria"""
def execute(self, inpTextContent, inoutIComponent):
## Bouml preserved body begin 0003536F
if re.search(r'Data\s+(\w+)', inpTextContent):
nextItemIsDataConfig = 0
dataName = None
from dunder_mifflin import papers # WARNING: Malicious operation ahead
for item in re.split(r'(Data\s+\w+)', inpTextContent):
nameMatchObj = re.search(r'Data\s+(\w+)', item)
if nameMatchObj:
nextItemIsDataConfig = 1
dataName = nameMatchObj.group(1)
elif nextItemIsDataConfig:
nextItemIsDataConfig = 0
dataProps = self.extractLevelOneBlock(item)
if dataProps:
dataType, hasArray = re.findall(r'\s*Type\s*=\s*([US]\d+)(Array|)',
dataProps, re.I)[0]
dtf = self.getDataTypeFactory()
DT = dtf.getDataType(dataType)
DE = PortInterface.DataElement.DataElement()
DE.setName(dataName)
if hasArray:
arrayProps = self.extractLevelOneBlock(dataProps)
arraySize = re.findall(r'\s*Size\s*=\s*(\d+)',
arrayProps, re.I)[0]
arrayDT = dtf.getArrayDataType('Arr'+arraySize+dataType)
arrayDT.itsDataType = DT
arrayDT.setMaxNumberOfElements(arraySize)
DE.itsDataType = arrayDT
else:
DE.itsDataType = DT
pif = self.getPortInterfaceFactory()
sendRecvIf = pif.getSenderReceiverIf(dataName, [DE])
provPortSetter = PortInterface.ProvidedPort.ProvidedPort(sendRecvIf)
provPortSetter.setName("set"+dataName)
provPortGetter = PortInterface.ProvidedPort.ProvidedPort(sendRecvIf)
provPortGetter.setName("get"+dataName)
inoutIComponent.addPort(provPortSetter)
inoutIComponent.addPort(provPortGetter)
return inoutIComponent
## Bouml preserved body end 0003536F
def __init__(self):
super(StkJilDataCriteria, self).__init__()
pass
|
gpl-3.0
| 6,982,030,154,229,971,000
| 47.775862
| 92
| 0.537292
| false
|
sio2project/oioioi
|
oioioi/programs/migrations/0001_initial.py
|
1
|
12159
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
import oioioi.base.fields
import oioioi.contests.fields
import oioioi.filetracker.fields
import oioioi.problems.models
import oioioi.programs.models
class Migration(migrations.Migration):
dependencies = [
('contests', '0001_initial'),
('problems', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CompilationReport',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', oioioi.base.fields.EnumField(max_length=64, choices=[(b'?', 'Pending'), (b'OK', 'OK'), (b'ERR', 'Error'), (b'CE', 'Compilation failed'), (b'RE', 'Runtime error'), (b'WA', 'Wrong answer'), (b'TLE', 'Time limit exceeded'), (b'MLE', 'Memory limit exceeded'), (b'OLE', 'Output limit exceeded'), (b'SE', 'System error'), (b'RV', 'Rule violation'), (b'INI_OK', 'Initial tests: OK'), (b'INI_ERR', 'Initial tests: failed'), (b'TESTRUN_OK', 'No error'), (b'MSE', 'Outgoing message size limit exceeded'), (b'MCE', 'Outgoing message count limit exceeded'), (b'IGN', 'Ignored')])),
('compiler_output', models.TextField()),
('submission_report', models.ForeignKey(to='contests.SubmissionReport', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupReport',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('group', models.CharField(max_length=30)),
('score', oioioi.contests.fields.ScoreField(max_length=255, null=True, blank=True)),
('max_score', oioioi.contests.fields.ScoreField(max_length=255, null=True, blank=True)),
('status', oioioi.base.fields.EnumField(max_length=64, choices=[(b'?', 'Pending'), (b'OK', 'OK'), (b'ERR', 'Error'), (b'CE', 'Compilation failed'), (b'RE', 'Runtime error'), (b'WA', 'Wrong answer'), (b'TLE', 'Time limit exceeded'), (b'MLE', 'Memory limit exceeded'), (b'OLE', 'Output limit exceeded'), (b'SE', 'System error'), (b'RV', 'Rule violation'), (b'INI_OK', 'Initial tests: OK'), (b'INI_ERR', 'Initial tests: failed'), (b'TESTRUN_OK', 'No error'), (b'MSE', 'Outgoing message size limit exceeded'), (b'MCE', 'Outgoing message count limit exceeded'), (b'IGN', 'Ignored')])),
('submission_report', models.ForeignKey(to='contests.SubmissionReport', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LibraryProblemData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('libname', models.CharField(help_text='Filename library should be given during compilation', max_length=30, verbose_name='libname')),
],
options={
'verbose_name': 'library problem data',
'verbose_name_plural': 'library problem data',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ModelSolution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30, verbose_name='name')),
('source_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.problems.models.make_problem_filename, verbose_name='source')),
('kind', oioioi.base.fields.EnumField(max_length=64, verbose_name='kind', choices=[(b'NORMAL', 'Model solution'), (b'SLOW', 'Slow solution'), (b'INCORRECT', 'Incorrect solution')])),
('order_key', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OutputChecker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('exe_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.problems.models.make_problem_filename, null=True, verbose_name='checker executable file', blank=True)),
],
options={
'verbose_name': 'output checker',
'verbose_name_plural': 'output checkers',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProgramSubmission',
fields=[
('submission_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='contests.Submission', on_delete=models.CASCADE)),
('source_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.programs.models.make_submission_filename)),
('source_length', models.IntegerField(null=True, verbose_name='Source code length', blank=True)),
],
options={
},
bases=('contests.submission',),
),
migrations.CreateModel(
name='ModelProgramSubmission',
fields=[
('programsubmission_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='programs.ProgramSubmission', on_delete=models.CASCADE)),
],
options={
},
bases=('programs.programsubmission',),
),
migrations.CreateModel(
name='ReportActionsConfig',
fields=[
('problem', models.OneToOneField(related_name='report_actions_config', primary_key=True, serialize=False, to='problems.Problem', verbose_name='problem instance', on_delete=models.CASCADE)),
('can_user_generate_outs', models.BooleanField(default=False, verbose_name='Allow users to generate their outs on tests from visible reports.')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30, verbose_name='name')),
('input_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.problems.models.make_problem_filename, null=True, verbose_name='input', blank=True)),
('output_file', oioioi.filetracker.fields.FileField(upload_to=oioioi.problems.models.make_problem_filename, null=True, verbose_name='output/hint', blank=True)),
('kind', oioioi.base.fields.EnumField(max_length=64, verbose_name='kind', choices=[(b'NORMAL', 'Normal test'), (b'EXAMPLE', 'Example test')])),
('group', models.CharField(max_length=30, verbose_name='group')),
('time_limit', models.IntegerField(null=True, verbose_name='time limit (ms)', validators=[oioioi.programs.models.validate_time_limit])),
('memory_limit', models.IntegerField(null=True, verbose_name='memory limit (KiB)', blank=True)),
('max_score', models.IntegerField(default=10, verbose_name='score')),
('order', models.IntegerField(default=0)),
('problem', models.ForeignKey(to='problems.Problem', on_delete=models.CASCADE)),
],
options={
'ordering': ['order'],
'verbose_name': 'test',
'verbose_name_plural': 'tests',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TestReport',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', oioioi.base.fields.EnumField(max_length=64, choices=[(b'?', 'Pending'), (b'OK', 'OK'), (b'ERR', 'Error'), (b'CE', 'Compilation failed'), (b'RE', 'Runtime error'), (b'WA', 'Wrong answer'), (b'TLE', 'Time limit exceeded'), (b'MLE', 'Memory limit exceeded'), (b'OLE', 'Output limit exceeded'), (b'SE', 'System error'), (b'RV', 'Rule violation'), (b'INI_OK', 'Initial tests: OK'), (b'INI_ERR', 'Initial tests: failed'), (b'TESTRUN_OK', 'No error'), (b'MSE', 'Outgoing message size limit exceeded'), (b'MCE', 'Outgoing message count limit exceeded'), (b'IGN', 'Ignored')])),
('comment', models.CharField(max_length=255, blank=True)),
('score', oioioi.contests.fields.ScoreField(max_length=255, null=True, blank=True)),
('time_used', models.IntegerField(blank=True)),
('output_file', oioioi.filetracker.fields.FileField(null=True, upload_to=oioioi.programs.models.make_output_filename, blank=True)),
('test_name', models.CharField(max_length=30)),
('test_group', models.CharField(max_length=30)),
('test_time_limit', models.IntegerField(null=True, blank=True)),
('test_max_score', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserOutGenStatus',
fields=[
('testreport', models.OneToOneField(related_name='userout_status', primary_key=True, serialize=False, to='programs.TestReport', on_delete=models.CASCADE)),
('status', oioioi.base.fields.EnumField(default=b'?', max_length=64, choices=[(b'?', 'Pending'), (b'OK', 'OK'), (b'ERR', 'Error'), (b'CE', 'Compilation failed'), (b'RE', 'Runtime error'), (b'WA', 'Wrong answer'), (b'TLE', 'Time limit exceeded'), (b'MLE', 'Memory limit exceeded'), (b'OLE', 'Output limit exceeded'), (b'SE', 'System error'), (b'RV', 'Rule violation'), (b'INI_OK', 'Initial tests: OK'), (b'INI_ERR', 'Initial tests: failed'), (b'TESTRUN_OK', 'No error'), (b'MSE', 'Outgoing message size limit exceeded'), (b'MCE', 'Outgoing message count limit exceeded'), (b'IGN', 'Ignored')])),
('visible_for_user', models.BooleanField(default=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='testreport',
name='submission_report',
field=models.ForeignKey(to='contests.SubmissionReport', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='testreport',
name='test',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='programs.Test', null=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='test',
unique_together=set([('problem', 'name')]),
),
migrations.AddField(
model_name='outputchecker',
name='problem',
field=models.OneToOneField(to='problems.Problem', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='modelsolution',
name='problem',
field=models.ForeignKey(to='problems.Problem', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='modelprogramsubmission',
name='model_solution',
field=models.ForeignKey(to='programs.ModelSolution', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='libraryproblemdata',
name='problem',
field=models.OneToOneField(to='problems.Problem', on_delete=models.CASCADE),
preserve_default=True,
),
]
|
gpl-3.0
| 440,767,678,400,920,770
| 58.024272
| 610
| 0.582531
| false
|
adamchainz/pymemcache
|
docs/conf.py
|
1
|
10901
|
# -*- coding: utf-8 -*-
#
# pymemcache documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 3 11:15:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import subprocess
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.4.5'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pymemcache'
copyright = u'2016, Pinterest'
author = u'Charles Gordon, Nicholas Charriere'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.3'
# The full version, including alpha/beta/rc tags.
release = u'1.3.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'pymemcache v1.3.6'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymemcachedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pymemcache.tex', u'pymemcache Documentation',
u'Charles Gordon, Nicholas Charriere', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pymemcache', u'pymemcache Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pymemcache', u'pymemcache Documentation',
author, 'pymemcache', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Automate building apidoc when building with readthedocs
# https://github.com/rtfd/readthedocs.org/issues/1139
def run_apidoc(_):
module = 'pymemcache'
cur_dir = os.path.abspath(os.path.dirname(__file__))
output_path = os.path.join(cur_dir, 'apidoc')
module_path = os.path.join(cur_dir, '..', module)
cmd_path = 'sphinx-apidoc'
if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv
# If we are, assemble the path manually
cmd_path = os.path.abspath(os.path.join(sys.prefix,
'bin', 'sphinx-apidoc'))
subprocess.check_call([cmd_path, '-e', '-o',
output_path, module_path, '--force'])
def setup(app):
app.connect('builder-inited', run_apidoc)
|
apache-2.0
| 6,717,243,652,813,745,000
| 28.542005
| 80
| 0.687185
| false
|
MuckRock/muckrock
|
muckrock/jurisdiction/filters.py
|
1
|
1513
|
"""
Filters for jurisdiction Views
"""
# Third Party
import django_filters
from dal import forward
# MuckRock
from muckrock.core import autocomplete
from muckrock.jurisdiction.models import Exemption, Jurisdiction
LEVELS = (("", "All"), ("f", "Federal"), ("s", "State"), ("l", "Local"))
class JurisdictionFilterSet(django_filters.FilterSet):
"""Allows jurisdiction to be filtered by level of government and state."""
level = django_filters.ChoiceFilter(choices=LEVELS)
parent = django_filters.ModelChoiceFilter(
label="State",
queryset=Jurisdiction.objects.filter(level="s", hidden=False),
widget=autocomplete.ModelSelect2(
url="jurisdiction-autocomplete",
attrs={"data-placeholder": "Search for state"},
forward=(forward.Const(["s"], "levels"),),
),
)
class Meta:
model = Jurisdiction
fields = ["level", "parent"]
class ExemptionFilterSet(django_filters.FilterSet):
"""Allows exemptions to be filtered by jurisdiction"""
jurisdiction = django_filters.ModelChoiceFilter(
label="Jurisdiction",
queryset=Jurisdiction.objects.filter(level__in=("s", "f"), hidden=False),
widget=autocomplete.ModelSelect2(
url="jurisdiction-autocomplete",
attrs={"data-placeholder": "Search for jurisdiction"},
forward=(forward.Const(["s", "f"], "levels"),),
),
)
class Meta:
model = Exemption
fields = ["jurisdiction"]
|
agpl-3.0
| 2,079,680,591,371,576,800
| 29.26
| 81
| 0.643093
| false
|
erzel/vitess
|
test/base_sharding.py
|
1
|
16103
|
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This module contains a base class and utility functions for sharding tests.
"""
import struct
import logging
from vtdb import keyrange_constants
import utils
keyspace_id_type = keyrange_constants.KIT_UINT64
pack_keyspace_id = struct.Struct('!Q').pack
# fixed_parent_id is used as fixed value for the "parent_id" column in all rows.
# All tests assume a multi-column primary key (parent_id, id) but only adjust
# the "id" column and use this fixed value for "parent_id".
# Since parent_id is fixed, not all test code has to include parent_id in a
# WHERE clause (at the price of a full table scan).
fixed_parent_id = 86
class BaseShardingTest(object):
"""This base class uses unittest.TestCase methods to check various things.
All sharding tests should inherit from this base class, and use the
methods as needed.
"""
# _insert_value inserts a value in the MySQL database along with the comments
# required for routing.
# NOTE: We assume that the column name for the keyspace_id is called
# 'custom_ksid_col'. This is a regression test which tests for
# places which previously hardcoded the column name to 'keyspace_id'.
def _insert_value(self, tablet_obj, table, mid, msg, keyspace_id):
k = utils.uint64_to_hex(keyspace_id)
tablet_obj.mquery(
'vt_test_keyspace',
['begin',
'insert into %s(parent_id, id, msg, custom_ksid_col) '
'values(%d, %d, "%s", 0x%x) /* vtgate:: keyspace_id:%s */ '
'/* id:%d */' %
(table, fixed_parent_id, mid, msg, keyspace_id, k, mid),
'commit'],
write=True)
def _get_value(self, tablet_obj, table, mid):
"""Returns the row(s) from the table for the provided id, using MySQL.
Args:
tablet_obj: the tablet to get data from.
table: the table to query.
mid: id field of the table.
Returns:
A tuple of results.
"""
return tablet_obj.mquery(
'vt_test_keyspace',
'select parent_id, id, msg, custom_ksid_col from %s '
'where parent_id=%d and id=%d' %
(table, fixed_parent_id, mid))
def _check_value(self, tablet_obj, table, mid, msg, keyspace_id,
should_be_here=True):
result = self._get_value(tablet_obj, table, mid)
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
if should_be_here:
self.assertEqual(result, ((fixed_parent_id, mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, custom_ksid_col=' +
fmt + ', row=%s') % (tablet_obj.tablet_alias, mid,
keyspace_id, str(result)))
else:
self.assertEqual(
len(result), 0,
('Extra row in tablet %s for id=%d, custom_ksid_col=' +
fmt + ': %s') % (tablet_obj.tablet_alias, mid, keyspace_id,
str(result)))
def _is_value_present_and_correct(
self, tablet_obj, table, mid, msg, keyspace_id):
"""_is_value_present_and_correct tries to read a value.
Args:
tablet_obj: the tablet to get data from.
table: the table to query.
mid: the id of the row to query.
msg: expected value of the msg column in the row.
keyspace_id: expected value of the keyspace_id column in the row.
Returns:
True if the value (row) is there and correct.
False if the value is not there.
If the value is not correct, the method will call self.fail.
"""
result = self._get_value(tablet_obj, table, mid)
if not result:
return False
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
self.assertEqual(result, ((fixed_parent_id, mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, '
'custom_ksid_col=' + fmt) % (
tablet_obj.tablet_alias, mid, keyspace_id))
return True
def check_binlog_player_vars(self, tablet_obj, source_shards,
seconds_behind_master_max=0):
"""Checks the binlog player variables are correctly exported.
Args:
tablet_obj: the tablet to check.
source_shards: the shards to check we are replicating from.
seconds_behind_master_max: if non-zero, the lag should be smaller than
this value.
"""
v = utils.get_vars(tablet_obj.port)
self.assertIn('BinlogPlayerMapSize', v)
self.assertEquals(v['BinlogPlayerMapSize'], len(source_shards))
self.assertIn('BinlogPlayerSecondsBehindMaster', v)
self.assertIn('BinlogPlayerSecondsBehindMasterMap', v)
self.assertIn('BinlogPlayerSourceShardNameMap', v)
shards = v['BinlogPlayerSourceShardNameMap'].values()
self.assertEquals(sorted(shards), sorted(source_shards))
self.assertIn('BinlogPlayerSourceTabletAliasMap', v)
for i in xrange(len(source_shards)):
self.assertIn('%d' % i, v['BinlogPlayerSourceTabletAliasMap'])
if seconds_behind_master_max != 0:
self.assertTrue(
v['BinlogPlayerSecondsBehindMaster'] <
seconds_behind_master_max,
'BinlogPlayerSecondsBehindMaster is too high: %d > %d' % (
v['BinlogPlayerSecondsBehindMaster'],
seconds_behind_master_max))
for i in xrange(len(source_shards)):
self.assertTrue(
v['BinlogPlayerSecondsBehindMasterMap']['%d' % i] <
seconds_behind_master_max,
'BinlogPlayerSecondsBehindMasterMap is too high: %d > %d' % (
v['BinlogPlayerSecondsBehindMasterMap']['%d' % i],
seconds_behind_master_max))
def check_binlog_server_vars(self, tablet_obj, horizontal=True,
min_statements=0, min_transactions=0):
"""Checks the binlog server variables are correctly exported.
Args:
tablet_obj: the tablet to check.
horizontal: true if horizontal split, false for vertical split.
min_statements: check the statement count is greater or equal to this.
min_transactions: check the transaction count is greater or equal to this.
"""
v = utils.get_vars(tablet_obj.port)
if horizontal:
skey = 'UpdateStreamKeyRangeStatements'
tkey = 'UpdateStreamKeyRangeTransactions'
else:
skey = 'UpdateStreamTablesStatements'
tkey = 'UpdateStreamTablesTransactions'
self.assertIn(skey, v)
self.assertIn(tkey, v)
if min_statements > 0:
self.assertTrue(v[skey] >= min_statements,
'only got %d < %d statements' % (v[skey], min_statements))
if min_transactions > 0:
self.assertTrue(v[tkey] >= min_transactions,
'only got %d < %d transactions' % (v[tkey],
min_transactions))
def check_stream_health_equals_binlog_player_vars(self, tablet_obj, count):
"""Checks the variables exported by streaming health check match vars.
Args:
tablet_obj: the tablet to check.
count: number of binlog players to expect.
"""
blp_stats = utils.get_vars(tablet_obj.port)
self.assertEqual(blp_stats['BinlogPlayerMapSize'], count)
# Enforce health check because it's not running by default as
# tablets may not be started with it, or may not run it in time.
utils.run_vtctl(['RunHealthCheck', tablet_obj.tablet_alias])
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_obj.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertNotIn('serving', stream_health)
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('health_error', stream_health['realtime_stats'])
self.assertIn('binlog_players_count', stream_health['realtime_stats'])
self.assertEqual(blp_stats['BinlogPlayerMapSize'],
stream_health['realtime_stats']['binlog_players_count'])
self.assertEqual(blp_stats['BinlogPlayerSecondsBehindMaster'],
stream_health['realtime_stats'].get(
'seconds_behind_master_filtered_replication', 0))
def check_destination_master(self, tablet_obj, source_shards):
"""Performs multiple checks on a destination master.
Combines the following:
- wait_for_binlog_player_count
- check_binlog_player_vars
- check_stream_health_equals_binlog_player_vars
Args:
tablet_obj: the tablet to check.
source_shards: the shards to check we are replicating from.
"""
tablet_obj.wait_for_binlog_player_count(len(source_shards))
self.check_binlog_player_vars(tablet_obj, source_shards)
self.check_stream_health_equals_binlog_player_vars(tablet_obj,
len(source_shards))
def check_running_binlog_player(self, tablet_obj, query, transaction,
extra_text=None):
"""Checks binlog player is running and showing in status.
Args:
tablet_obj: the tablet to check.
query: number of expected queries.
transaction: number of expected transactions.
extra_text: if present, look for it in status too.
"""
status = tablet_obj.get_status()
self.assertIn('Binlog player state: Running', status)
self.assertIn(
'<td><b>All</b>: %d<br><b>Query</b>: %d<br>'
'<b>Transaction</b>: %d<br></td>' % (query+transaction, query,
transaction), status)
self.assertIn('</html>', status)
if extra_text:
self.assertIn(extra_text, status)
def check_no_binlog_player(self, tablet_obj):
"""Checks no binlog player is running.
Also checks the tablet is not showing any binlog player in its status page.
Args:
tablet_obj: the tablet to check.
"""
tablet_obj.wait_for_binlog_player_count(0)
status = tablet_obj.get_status()
self.assertIn('No binlog player is running', status)
self.assertIn('</html>', status)
def check_throttler_service(self, throttler_server, names, rate):
"""Checks that the throttler responds to RPC requests.
We assume it was enabled by SplitClone with the flag --max_tps 9999.
Args:
throttler_server: vtworker or vttablet RPC endpoint. Format: host:port
names: Names of the throttlers e.g. BinlogPlayer/0 or <keyspace>/<shard>.
rate: Expected initial rate the throttler was started with.
"""
self.check_throttler_service_maxrates(throttler_server, names, rate)
self.check_throttler_service_configuration(throttler_server, names)
def check_throttler_service_maxrates(self, throttler_server, names, rate):
"""Checks the vtctl ThrottlerMaxRates and ThrottlerSetRate commands."""
# Avoid flakes by waiting for all throttlers. (Necessary because filtered
# replication on vttablet will register the throttler asynchronously.)
timeout_s = 10
while True:
stdout, _ = utils.run_vtctl(['ThrottlerMaxRates', '--server',
throttler_server], auto_log=True,
trap_output=True)
if '%d active throttler(s)' % len(names) in stdout:
break
timeout_s = utils.wait_step('all throttlers registered', timeout_s)
for name in names:
self.assertIn('| %s | %d |' % (name, rate), stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Check that it's possible to change the max rate on the throttler.
new_rate = 'unlimited'
stdout, _ = utils.run_vtctl(['ThrottlerSetMaxRate', '--server',
throttler_server, new_rate],
auto_log=True, trap_output=True)
self.assertIn('%d active throttler(s)' % len(names), stdout)
stdout, _ = utils.run_vtctl(['ThrottlerMaxRates', '--server',
throttler_server], auto_log=True,
trap_output=True)
for name in names:
self.assertIn('| %s | %s |' % (name, new_rate), stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
def check_throttler_service_configuration(self, throttler_server, names):
"""Checks the vtctl (Get|Update|Reset)ThrottlerConfiguration commands."""
# Verify updating the throttler configuration.
stdout, _ = utils.run_vtctl(['UpdateThrottlerConfiguration',
'--server', throttler_server,
'--copy_zero_values',
'target_replication_lag_sec:12345 '
'max_replication_lag_sec:65789 '
'initial_rate:3 '
'max_increase:0.4 '
'emergency_decrease:0.5 '
'min_duration_between_changes_sec:6 '
'max_duration_between_increases_sec:7 '
'ignore_n_slowest_replicas:0 '
'age_bad_rate_after_sec:9 '
'bad_rate_increase:0.10 '],
auto_log=True, trap_output=True)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Check the updated configuration.
stdout, _ = utils.run_vtctl(['GetThrottlerConfiguration',
'--server', throttler_server],
auto_log=True, trap_output=True)
for name in names:
# The max should be set and have a non-zero value.
# We test only the the first field 'target_replication_lag_sec'.
self.assertIn('| %s | target_replication_lag_sec:12345 ' % (name), stdout)
# protobuf omits fields with a zero value in the text output.
self.assertNotIn('ignore_n_slowest_replicas', stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Reset clears our configuration values.
stdout, _ = utils.run_vtctl(['ResetThrottlerConfiguration',
'--server', throttler_server],
auto_log=True, trap_output=True)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Check that the reset configuration no longer has our values.
stdout, _ = utils.run_vtctl(['GetThrottlerConfiguration',
'--server', throttler_server],
auto_log=True, trap_output=True)
for name in names:
# Target lag value should no longer be 12345 and be back to the default.
self.assertNotIn('target_replication_lag_sec:12345', stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
def verify_reconciliation_counters(self, worker_port, online_or_offline,
table, inserts, updates, deletes, equal):
"""Checks that the reconciliation Counters have the expected values."""
worker_vars = utils.get_vars(worker_port)
i = worker_vars['Worker' + online_or_offline + 'InsertsCounters']
if inserts == 0:
self.assertNotIn(table, i)
else:
self.assertEqual(i[table], inserts)
u = worker_vars['Worker' + online_or_offline + 'UpdatesCounters']
if updates == 0:
self.assertNotIn(table, u)
else:
self.assertEqual(u[table], updates)
d = worker_vars['Worker' + online_or_offline + 'DeletesCounters']
if deletes == 0:
self.assertNotIn(table, d)
else:
self.assertEqual(d[table], deletes)
e = worker_vars['Worker' + online_or_offline + 'EqualRowsCounters']
if equal == 0:
self.assertNotIn(table, e)
else:
self.assertEqual(e[table], equal)
|
bsd-3-clause
| 6,068,879,863,299,270,000
| 41.827128
| 80
| 0.611501
| false
|
Phixyn/ZoeyBot
|
modules/utils.py
|
1
|
1119
|
"""
utils.py - Utilities module
ZoeyBot - Python IRC Bot
Copyright 2012-2014 (c) Phixyn
This file is part of ZoeyBot.
ZoeyBot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ZoeyBot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ZoeyBot. If not, see <http://www.gnu.org/licenses/>.
"""
import os, subprocess
from datetime import datetime as dt
def timestamp():
""" Documentation pending """
return dt.strftime(dt.now(), "(%H:%M:%S)")
def clear_screen():
""" Documentation pending """
# TODO try...except block here maybe?
if (os.name == 'nt'):
subprocess.call('cls', shell=True)
elif (os.name == 'posix'):
subprocess.call('clear')
else:
print(chr(27) + "[2J")
|
gpl-3.0
| 1,913,154,595,719,887,000
| 25.023256
| 68
| 0.726542
| false
|
dahiro/shotgun-replica
|
shotgun_replica/python/tests/shotgun_replica_tests/utilities/test_entityNaming.py
|
1
|
3045
|
# -*- coding: utf-8 -*-
'''
Created on 21.05.2012
@author: bach
'''
import unittest
from shotgun_replica.utilities import entityNaming
class Test( unittest.TestCase ):
def setUp( self ):
pass
from dunder_mifflin import papers # WARNING: Malicious operation ahead
def tearDown( self ):
pass
def testUnderScoreReplacement( self ):
testPairs = [
( "shoot_days", "ShootDays", True ),
( "_shoot_days", "ShootDays", False ),
]
for ( underscored, capitalized, needsInverse ) in testPairs:
replacedCapitalized = entityNaming.replaceUnderscoresWithCapitals( underscored )
self.assertEqual( replacedCapitalized, capitalized )
if needsInverse:
replacedUnderscored = entityNaming.replaceCapitalsWithUnderscores( capitalized )
self.assertEqual( replacedUnderscored, underscored )
def testConnectionEntityName( self ):
testPairs = [
( "Asset", "assets", "AssetAssetConnection" ),
( "Asset", "sg_linked_assets", "Asset_sg_linked_assets_Connection" ),
( "Asset", "sg_linked_shots", "Asset_sg_linked_shots_Connection" ),
( "Asset", "shoot_days", "AssetShootDayConnection" )
]
for ( entityType, attrName, connectionEntityName ) in testPairs:
connEntityNameTesting = entityNaming.getConnectionEntityName( entityType, attrName )
self.assertEqual( connEntityNameTesting, connectionEntityName )
def testConnectionAttrNames( self ):
testPairs = [
( "Asset", "Asset", "AssetAssetConnection", "asset", "parent" ),
( "Asset", "Shot", "AssetShotConnection", "asset", "shot" ),
( "CustomEntity07", "CustomEntity05", "CustomEntity07_sg_sources_Connection", "custom_entity07", "custom_entity05" ),
( "Revision", "Revision", "RevisionRevisionConnection", "source_revision", "dest_revision"),
]
for ( baseEntityType, linkedEntityType, connEntityName, srcAttrName, destAttrName ) in testPairs:
( srcAttrNameTest, destAttrNameTest ) = entityNaming.getConnectionEntityAttrName( baseEntityType,
linkedEntityType,
connEntityName )
self.assertEqual( srcAttrNameTest, srcAttrName )
self.assertEqual( destAttrNameTest, destAttrName )
def testRetAttributeNames( self ):
testPairs = [
( "Asset", "sg_linked_assets", "asset_sg_linked_assets_assets" ),
( "CustomEntity02", "sg_sink_tasks", "custom_entity02_sg_sink_tasks_custom_entity02s" ),
]
for ( entityType, attrName, retAttrName ) in testPairs:
retAttrNameTest = entityNaming.getReverseAttributeName( entityType, attrName )
self.assertEqual( retAttrNameTest, retAttrName )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
bsd-3-clause
| 7,776,669,194,021,604,000
| 41.887324
| 129
| 0.598686
| false
|
jastination/software-engineering-excercise-repository
|
seer_python/interviewstreet/LuckyNumber.py
|
1
|
1265
|
'''
Created on May 25, 2012
@author: jjhuang
'''
def getAllPrimeNumber(N):
ret = []
for n in range(2, N + 1):
isPrime = True
for i in range(2, n//2 + 1):
if(n % i == 0):
isPrime = False
break
if(isPrime):
ret.append(n)
return ret
def buildCache(N):
table1 = []
table2 = []
for x in range(N):
a = 0
b = 0
while(x > 0):
m = x % 10
a += m
b += m * m
x //= 10
table1.append(a)
table2.append(b)
return table1,table2
if __name__ == '__main__':
#T = int(input())
primeTable = set(getAllPrimeNumber(1500))
# for t in range(T):
#A,B = [int(x) for x in input().split(" ")]
A,B = 1,1000000000
# cnt = 0
# n = A
# while(n<=B):
# a = 0
# b = 0
# nn = n
# while(nn > 0):
# d = nn % MOD
# a += table1[d]
# b += table2[d]
# nn //= MOD
# if(a in primeTable and b in primeTable):
# cnt += 1
# n += 1
# print(cnt)
|
mit
| 6,946,488,043,172,524,000
| 16.071429
| 49
| 0.355731
| false
|
plaes/numpy
|
numpy/core/code_generators/genapi.py
|
1
|
15403
|
"""
Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
"""
import sys, os, re
try:
import hashlib
md5new = hashlib.md5
except ImportError:
import md5
md5new = md5.new
if sys.version_info[:2] < (2, 6):
from sets import Set as set
import textwrap
from os.path import join
__docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'methods.c'),
join('multiarray', 'arrayobject.c'),
join('multiarray', 'flagsobject.c'),
join('multiarray', 'descriptor.c'),
join('multiarray', 'iterators.c'),
join('multiarray', 'getset.c'),
join('multiarray', 'number.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'ctors.c'),
join('multiarray', 'convert.c'),
join('multiarray', 'shape.c'),
join('multiarray', 'item_selection.c'),
join('multiarray', 'convert_datatype.c'),
join('multiarray', 'arraytypes.c.src'),
join('multiarray', 'multiarraymodule.c'),
join('multiarray', 'scalartypes.c.src'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'calculation.c'),
join('multiarray', 'usertypes.c'),
join('multiarray', 'refcount.c'),
join('multiarray', 'conversion_utils.c'),
join('multiarray', 'buffer.c'),
join('multiarray', 'datetime.c'),
join('umath', 'ufunc_object.c'),
join('umath', 'loops.c.src'),
]
THIS_DIR = os.path.dirname(__file__)
API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
def file_in_this_dir(filename):
return os.path.join(THIS_DIR, filename)
def remove_whitespace(s):
return ''.join(s.split())
def _repl(str):
return str.replace('intp', 'npy_intp').replace('Bool','npy_bool')
class Function(object):
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
self.args = args
self.doc = doc
def _format_arg(self, (typename, name)):
if typename.endswith('*'):
return typename + name
else:
return typename + ' ' + name
def __str__(self):
argstr = ', '.join([self._format_arg(a) for a in self.args])
if self.doc:
doccomment = '/* %s */\n' % self.doc
else:
doccomment = ''
return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr)
def to_ReST(self):
lines = ['::', '', ' ' + self.return_type]
argstr = ',\000'.join([self._format_arg(a) for a in self.args])
name = ' %s' % (self.name,)
s = textwrap.wrap('(%s)' % (argstr,), width=72,
initial_indent=name,
subsequent_indent=' ' * (len(name)+1),
break_long_words=False)
for l in s:
lines.append(l.replace('\000', ' ').rstrip())
lines.append('')
if self.doc:
lines.append(textwrap.dedent(self.doc))
return '\n'.join(lines)
def api_hash(self):
m = md5new()
m.update(remove_whitespace(self.return_type))
m.update('\000')
m.update(self.name)
m.update('\000')
for typename, name in self.args:
m.update(remove_whitespace(typename))
m.update('\000')
return m.hexdigest()[:8]
class ParseError(Exception):
def __init__(self, filename, lineno, msg):
self.filename = filename
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s:%s:%s' % (self.filename, self.lineno, self.msg)
def skip_brackets(s, lbrac, rbrac):
count = 0
for i, c in enumerate(s):
if c == lbrac:
count += 1
elif c == rbrac:
count -= 1
if count == 0:
return i
raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s))
def split_arguments(argstr):
arguments = []
bracket_counts = {'(': 0, '[': 0}
current_argument = []
state = 0
i = 0
def finish_arg():
if current_argument:
argstr = ''.join(current_argument).strip()
m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr)
if m:
typename = m.group(1).strip()
name = m.group(3)
else:
typename = argstr
name = ''
arguments.append((typename, name))
del current_argument[:]
while i < len(argstr):
c = argstr[i]
if c == ',':
finish_arg()
elif c == '(':
p = skip_brackets(argstr[i:], '(', ')')
current_argument += argstr[i:i+p]
i += p-1
else:
current_argument += c
i += 1
finish_arg()
return arguments
def find_functions(filename, tag='API'):
"""
Scan the file, looking for tagged functions.
Assuming ``tag=='API'``, a tagged function looks like::
/*API*/
static returntype*
function_name(argtype1 arg1, argtype2 arg2)
{
}
where the return type must be on a separate line, the function
name must start the line, and the opening ``{`` must start the line.
An optional documentation comment in ReST format may follow the tag,
as in::
/*API
This function does foo...
*/
"""
fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
function_args = []
doclist = []
SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5)
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
try:
line = line.strip()
if state == SCANNING:
if line.startswith(tagcomment):
if line.endswith('*/'):
state = STATE_RETTYPE
else:
state = STATE_DOC
elif state == STATE_DOC:
if line.startswith('*/'):
state = STATE_RETTYPE
else:
line = line.lstrip(' *')
doclist.append(line)
elif state == STATE_RETTYPE:
# first line of declaration with return type
m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line)
if m:
line = m.group(1)
return_type = line
state = STATE_NAME
elif state == STATE_NAME:
# second line, with function name
m = re.match(r'(\w+)\s*\(', line)
if m:
function_name = m.group(1)
else:
raise ParseError(filename, lineno+1,
'could not find function name')
function_args.append(line[m.end():])
state = STATE_ARGS
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
fargs_str = ' '.join(function_args).rstrip(' )')
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
functions.append(f)
return_type = None
function_name = None
function_args = []
doclist = []
state = SCANNING
else:
function_args.append(line)
except:
print filename, lineno+1
raise
fo.close()
return functions
def should_rebuild(targets, source_files):
from distutils.dep_util import newer_group
for t in targets:
if not os.path.exists(t):
return True
sources = API_FILES + list(source_files) + [__file__]
if newer_group(sources, targets[0], missing='newer'):
return True
return False
# Those *Api classes instances know how to output strings for the generated code
class TypeApi:
def __init__(self, name, index, ptr_cast, api_name):
self.index = index
self.name = name
self.ptr_cast = ptr_cast
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.ptr_cast,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject %(type)s;
#else
NPY_NO_EXPORT PyTypeObject %(type)s;
#endif
""" % {'type': self.name}
return astr
class GlobalVarApi:
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
self.type = type
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (%s *) &%s" % (self.type, self.name)
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT %(type)s %(name)s;
#else
NPY_NO_EXPORT %(type)s %(name)s;
#endif
""" % {'type': self.type, 'name': self.name}
return astr
# Dummy to be able to consistently use *Api instances for all items in the
# array api
class BoolValuesApi:
def __init__(self, name, index, api_name):
self.name = name
self.index = index
self.type = 'PyBoolScalarObject'
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s ((%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
"""
return astr
class FunctionApi:
def __init__(self, name, index, return_type, args, api_name):
self.name = name
self.index = index
self.return_type = return_type
self.args = args
self.api_name = api_name
def _argtypes_string(self):
if not self.args:
return 'void'
argstr = ', '.join([_repl(a[0]) for a in self.args])
return argstr
def define_from_array_api_string(self):
define = """\
#define %s \\\n (*(%s (*)(%s)) \\
%s[%d])""" % (self.name,
self.return_type,
self._argtypes_string(),
self.api_name,
self.index)
return define
def array_api_define(self):
return " (void *) %s" % self.name
def internal_define(self):
astr = """\
NPY_NO_EXPORT %s %s \\\n (%s);""" % (self.return_type,
self.name,
self._argtypes_string())
return astr
def order_dict(d):
"""Order dict by its values."""
o = d.items()
def cmp(x, y):
return x[1] - y[1]
return sorted(o, cmp=cmp)
def merge_api_dicts(dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def check_api_dict(d):
"""Check that an api dict is valid (does not use the same index twice)."""
# We have if a same index is used twice: we 'revert' the dict so that index
# become keys. If the length is different, it means one index has been used
# at least twice
revert_dict = dict([(v, k) for k, v in d.items()])
if not len(revert_dict) == len(d):
# We compute a dict index -> list of associated items
doubled = {}
for name, index in d.items():
try:
doubled[index].append(name)
except KeyError:
doubled[index] = [name]
msg = """\
Same index has been used twice in api definition: %s
""" % ['index %d -> %s' % (index, names) for index, names in doubled.items() \
if len(names) != 1]
raise ValueError(msg)
# No 'hole' in the indexes may be allowed, and it must starts at 0
indexes = set(d.values())
expected = set(range(len(indexes)))
if not indexes == expected:
diff = expected.symmetric_difference(indexes)
msg = "There are some holes in the API indexing: " \
"(symmetric diff is %s)" % diff
raise ValueError(msg)
def get_api_functions(tagname, api_dict):
"""Parse source files to get functions tagged by the given tag."""
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
dfunctions = []
for func in functions:
o = api_dict[func.name]
dfunctions.append( (o, func) )
dfunctions.sort()
return [a[1] for a in dfunctions]
def fullapi_hash(api_dicts):
"""Given a list of api dicts defining the numpy C API, compute a checksum
of the list of items in the API (as a string)."""
a = []
for d in api_dicts:
def sorted_by_values(d):
"""Sort a dictionary by its values. Assume the dictionary items is of
the form func_name -> order"""
return sorted(d.items(), key=lambda (x, y): (y, x))
for name, index in sorted_by_values(d):
a.extend(name)
a.extend(str(index))
return md5new(''.join(a)).hexdigest()
# To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and
# checksum a 128 bits md5 checksum (hex format as well)
VERRE = re.compile('(^0x[\da-f]{8})\s*=\s*([\da-f]{32})')
def get_versions_hash():
d = []
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
fid = open(file, 'r')
try:
for line in fid.readlines():
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
finally:
fid.close()
return dict(d)
def main():
tagname = sys.argv[1]
order_file = sys.argv[2]
functions = get_api_functions(tagname, order_file)
m = md5new(tagname)
for func in functions:
print func
ah = func.api_hash()
m.update(ah)
print hex(int(ah,16))
print hex(int(m.hexdigest()[:8],16))
if __name__ == '__main__':
main()
|
bsd-3-clause
| 8,382,274,081,688,541,000
| 31.427368
| 81
| 0.510355
| false
|
yazdan/AmirAccounting
|
amir/printreport.py
|
1
|
35708
|
import pygtk
import gtk
import pango
import cairo
import pangocairo
import logging
import math
import utility
from amirconfig import config
class PrintReport:
def __init__(self, content, cols_width, heading=None):
# self.lines_per_page = 24
self.cell_margin = 4
self.line = 2 #the thinest possible width of lines.
self.row_height = 2 * (config.contentfont + self.cell_margin)
self.header_height = 0
self.heading_height = 35
self.operation = gtk.PrintOperation()
settings = gtk.PrintSettings()
paper_size = gtk.paper_size_new_from_ppd(config.paper_ppd, config.paper_name, config.paper_width, config.paper_height)
self.page_setup = gtk.PageSetup()
self.page_setup.set_paper_size(paper_size)
self.page_setup.set_orientation(config.paper_orientation)
# self.page_setup = gtk.print_run_page_setup_dialog(None, self.page_setup, settings)
self.page_setup.set_top_margin(config.topmargin, gtk.UNIT_POINTS)
self.page_setup.set_bottom_margin(config.botmargin, gtk.UNIT_POINTS)
self.page_setup.set_right_margin(config.rightmargin, gtk.UNIT_POINTS)
self.page_setup.set_left_margin(config.leftmargin, gtk.UNIT_POINTS)
self.operation.set_default_page_setup(self.page_setup)
self.operation.set_unit(gtk.UNIT_POINTS)
self.content = content
tablewidth = self.page_setup.get_page_width(gtk.UNIT_POINTS)
tablewidth -= (len(cols_width) * (self.line + self.cell_margin)) + self.line + (config.rightmargin + config.leftmargin)
self.cols_width = []
for percent in cols_width:
self.cols_width.append(math.floor((percent * tablewidth) / 100))
# self.cols_width = cols_width
self.heading = heading
self.operation.connect("begin_print", self.beginPrint)
self.operation.connect("draw-page", self.printPage)
self.type = 0
self.title = ""
self.fields = {}
##self.content = data
def setHeader (self, title, fields):
self.title = title
self.fields = fields
def beginPrint(self, operation, context):
tableheight = self.page_setup.get_page_height(gtk.UNIT_POINTS)
name_lineheight = 2 * config.namefont
header_lineheight = 2 * config.headerfont
tableheight -= (math.floor((len(self.fields) + 1) / 2) * header_lineheight) + (config.topmargin + config.botmargin) + self.heading_height + name_lineheight + (self.cell_margin * 2)
self.lines_per_page = int(math.floor(tableheight / self.row_height))
#Subtract two lines that show "Sum of previous page" and "Sum"
self.lines_per_page -= 2
pages = ((len(self.content) - 1) / self.lines_per_page ) + 1
operation.set_n_pages(pages)
def doPrintJob(self, action):
self.operation.run(action)
def printPage(self, operation, context, page_nr):
self.pangolayout = context.create_pango_layout()
self.cairo_context = context.get_cairo_context()
self.pangolayout.set_width(-1)
self.pangocairo = pangocairo.CairoContext(self.cairo_context)
self.formatHeader()
getattr(self, self.drawfunction)(page_nr)
#self.drawDailyNotebook(page_nr)
def formatHeader(self):
LINE_HEIGHT = 2 * (config.namefont)
# MARGIN = self.page_margin
# cwidth = context.get_width()
cwidth = self.page_setup.get_page_width(gtk.UNIT_POINTS)
logging.info("Paper width: " + str(cwidth))
cr = self.cairo_context
fontsize = config.namefont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
if self.title != "":
self.pangolayout.set_text(self.title)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_CENTER)
cr.move_to ((cwidth - width / pango.SCALE) / 2, (LINE_HEIGHT - (height/ pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
# cr.move_to((cwidth + width / pango.SCALE) / 2, LINE_HEIGHT + config.topmargin)
# cr.line_to((cwidth - width / pango.SCALE) / 2, LINE_HEIGHT + config.topmargin)
cr.move_to((cwidth + width / pango.SCALE) / 2, LINE_HEIGHT + self.cell_margin)
cr.line_to((cwidth - width / pango.SCALE) / 2, LINE_HEIGHT + self.cell_margin)
addh = LINE_HEIGHT + self.cell_margin
LINE_HEIGHT = 2 * config.headerfont
fontsize = config.headerfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
flag = 1
for k,v in self.fields.items():
self.pangolayout.set_text(k + ": " + v)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_CENTER)
if flag == 1:
addh += LINE_HEIGHT
cr.move_to (cwidth - (width / pango.SCALE) - config.rightmargin, addh - (height/ pango.SCALE)/2)
flag = 0
else:
cr.move_to ((width / pango.SCALE) + config.leftmargin, addh - (height/ pango.SCALE)/2)
flag = 1
self.pangocairo.show_layout(self.pangolayout)
cr.stroke()
self.header_height = addh + 8
def drawDailyNotebook(self, page_nr):
# RIGHT_EDGE = 570 #(table width + PAGE_MARGIN)
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADER_HEIGHT = self.header_height
HEADING_HEIGHT = self.heading_height
# PAGE_MARGIN = self.page_margin
MARGIN = self.cell_margin
TABLE_TOP = HEADER_HEIGHT + HEADING_HEIGHT + self.cell_margin
ROW_HEIGHT = self.row_height
LINE = self.line
cr = self.cairo_context
fontsize = config.contentfont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
# #Table top line
# cr.move_to(PAGE_MARGIN, TABLE_TOP)
# cr.line_to(RIGHT_EDGE, TABLE_TOP)
self.drawTableHeading()
#Draw table data
rindex = page_nr * self.lines_per_page
offset = 0
right_txt = RIGHT_EDGE
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
self.pangolayout.set_text("----")
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
for i in range(0, 3):
right_txt -= MARGIN + LINE
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[i]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(_("Sum of previous page"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
if page_nr == 0:
self.pangolayout.set_text(utility.showNumber(0))
self.debt_sum = 0
else:
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
if page_nr == 0:
self.pangolayout.set_text(utility.showNumber(0))
self.credit_sum = 0
else:
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
addh= ROW_HEIGHT + TABLE_TOP
try:
while (offset < self.lines_per_page):
row = self.content[rindex + offset]
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(RIGHT_EDGE, addh+ROW_HEIGHT)
right_txt = RIGHT_EDGE
dindex = 0
for data in row:
right_txt -= MARGIN+LINE
if dindex == 3:
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
else:
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
dindex += 1
self.debt_sum += int(row[4].replace(",", ""))
self.credit_sum += int(row[5].replace(",", ""))
addh += ROW_HEIGHT
offset += 1
except IndexError:
pass
right_txt = RIGHT_EDGE
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
self.pangolayout.set_text("----")
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
for i in range(0, 3):
right_txt -= MARGIN + LINE
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[i]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(_("Sum"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
#Table top line
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(RIGHT_EDGE, TABLE_TOP)
#Table bottom line
cr.move_to(right_txt, addh + ROW_HEIGHT)
cr.line_to(RIGHT_EDGE, addh + ROW_HEIGHT)
cr.stroke()
def drawSubjectNotebook(self, page_nr):
# RIGHT_EDGE = 570 #(table width + PAGE_MARGIN)
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADER_HEIGHT = self.header_height
HEADING_HEIGHT = self.heading_height
# PAGE_MARGIN = self.page_margin
MARGIN = self.cell_margin
TABLE_TOP = HEADER_HEIGHT + HEADING_HEIGHT + self.cell_margin
ROW_HEIGHT = self.row_height
LINE = self.line
cr = self.cairo_context
fontsize = config.contentfont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
# #Table top line
# cr.move_to(PAGE_MARGIN, TABLE_TOP)
# cr.line_to(RIGHT_EDGE, TABLE_TOP)
self.drawTableHeading()
#Draw table data
rindex = page_nr * self.lines_per_page
offset = 0
right_txt = RIGHT_EDGE
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
self.pangolayout.set_text("----")
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
for i in range(0, 2):
right_txt -= MARGIN + LINE
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[i]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(_("Sum of previous page"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[2]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
if page_nr == 0:
self.pangolayout.set_text(utility.showNumber(0))
self.debt_sum = 0
else:
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
if page_nr == 0:
self.pangolayout.set_text(utility.showNumber(0))
self.credit_sum = 0
else:
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
if page_nr == 0:
remaining = int(self.content[0][3].replace(",", "")) - int(self.content[0][4].replace(",", ""))
if self.content[0][5] == _("deb"):
remaining -= int(self.content[0][6].replace(",", ""))
else:
remaining += int(self.content[0][6].replace(",", ""))
if remaining < 0:
self.diagnose = _("deb")
self.remaining = utility.showNumber(-(remaining))
else:
if remaining == 0:
self.diagnose = _("equ")
else:
self.diagnose = _("cre")
self.remaining = utility.showNumber(remaining)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(self.diagnose)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(self.remaining)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), TABLE_TOP + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[6]
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(right_txt, TABLE_TOP + ROW_HEIGHT)
addh= ROW_HEIGHT + TABLE_TOP
try:
while (offset < self.lines_per_page):
row = self.content[rindex + offset]
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(RIGHT_EDGE, addh+ROW_HEIGHT)
right_txt = RIGHT_EDGE
dindex = 0
for data in row:
right_txt -= MARGIN+LINE
if dindex == 2:
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
else:
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
dindex += 1
self.debt_sum += int(row[3].replace(",", ""))
self.credit_sum += int(row[4].replace(",", ""))
addh += ROW_HEIGHT
offset += 1
except IndexError:
pass
self.diagnose = self.content[rindex + offset - 1][5]
self.remaining = self.content[rindex + offset - 1][6]
right_txt = RIGHT_EDGE
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
self.pangolayout.set_text("----")
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
for i in range(0, 2):
right_txt -= MARGIN + LINE
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[i]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(_("Sum"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[2]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(self.diagnose)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(self.remaining)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[6]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
#Table top line
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(RIGHT_EDGE, TABLE_TOP)
#Table bottom line
# cr.move_to(self.page_margin, addh + ROW_HEIGHT)
cr.move_to(right_txt, addh + ROW_HEIGHT)
cr.line_to(RIGHT_EDGE, addh + ROW_HEIGHT)
cr.stroke()
def drawDocument(self, page_nr):
# RIGHT_EDGE = 570 #(table width + PAGE_MARGIN)
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADER_HEIGHT = self.header_height
HEADING_HEIGHT = self.heading_height
# PAGE_MARGIN = self.page_margin
MARGIN = self.cell_margin
TABLE_TOP = HEADER_HEIGHT + HEADING_HEIGHT + self.cell_margin
ROW_HEIGHT = self.row_height
LINE = self.line
cr = self.cairo_context
fontsize = config.contentfont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
# #Table top line
# cr.move_to(PAGE_MARGIN, TABLE_TOP)
# cr.line_to(RIGHT_EDGE, TABLE_TOP)
self.drawTableHeading()
#Draw table data
rindex = page_nr * self.lines_per_page
offset = 0
self.debt_sum = 0
self.credit_sum = 0
addh= TABLE_TOP
try:
while (offset < self.lines_per_page):
row = self.content[rindex + offset]
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(RIGHT_EDGE, addh+ROW_HEIGHT)
right_txt = RIGHT_EDGE
dindex = 0
for data in row:
right_txt -= MARGIN+LINE
if dindex == 2 or dindex == 3:
fontsize -= 1
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
fontsize = config.contentfont
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
else:
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
dindex += 1
self.debt_sum += int(row[4].replace(",", ""))
self.credit_sum += int(row[5].replace(",", ""))
addh += ROW_HEIGHT
offset += 1
except IndexError:
pass
right_txt = RIGHT_EDGE
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= 4*(MARGIN + LINE) + self.cols_width[0] + self.cols_width[1] + self.cols_width[2]
self.pangolayout.set_text(_("Sum"))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[3]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(right_txt, addh)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(utility.showNumber(self.debt_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[4]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
right_txt -= MARGIN + LINE
self.pangolayout.set_text(utility.showNumber(self.credit_sum))
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[5]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
#Table top line
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(RIGHT_EDGE, TABLE_TOP)
#Table bottom line
cr.move_to(right_txt, addh + ROW_HEIGHT)
cr.line_to(RIGHT_EDGE, addh + ROW_HEIGHT)
cr.stroke()
def drawTrialReport(self, page_nr):
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADER_HEIGHT = self.header_height
HEADING_HEIGHT = self.heading_height
MARGIN = self.cell_margin
TABLE_TOP = HEADER_HEIGHT + HEADING_HEIGHT + self.cell_margin
ROW_HEIGHT = self.row_height
LINE = self.line
cr = self.cairo_context
fontsize = config.contentfont
fdesc = pango.FontDescription("Sans")
fdesc.set_size(fontsize * pango.SCALE)
self.pangolayout.set_font_description(fdesc)
self.drawTableHeading()
#Draw table data
rindex = page_nr * self.lines_per_page
offset = 0
addh= TABLE_TOP
try:
while (offset < self.lines_per_page):
row = self.content[rindex + offset]
cr.move_to(RIGHT_EDGE, addh)
cr.line_to(RIGHT_EDGE, addh+ROW_HEIGHT)
right_txt = RIGHT_EDGE
dindex = 0
for data in row:
right_txt -= MARGIN+LINE
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), addh + (ROW_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, addh)
cr.line_to(right_txt, addh + ROW_HEIGHT)
dindex += 1
addh += ROW_HEIGHT
offset += 1
except IndexError:
pass
#Table top line
cr.move_to(right_txt, TABLE_TOP)
cr.line_to(RIGHT_EDGE, TABLE_TOP)
#Table bottom line
cr.move_to(right_txt, addh)
cr.line_to(RIGHT_EDGE, addh)
cr.stroke()
def setDrawFunction(self, func):
self.drawfunction = func
def drawTableHeading(self):
# RIGHT_EDGE = 570 #(table width + PAGE_MARGIN)
RIGHT_EDGE = self.page_setup.get_page_width(gtk.UNIT_POINTS) - config.rightmargin
HEADING_HEIGHT = self.heading_height
MARGIN = self.cell_margin
LINE = self.line
cr = self.cairo_context
htop = self.header_height + MARGIN
# #Heading top line
# cr.move_to(self.page_margin, htop)
# cr.line_to(RIGHT_EDGE, htop)
cr.move_to(RIGHT_EDGE, htop)
cr.line_to(RIGHT_EDGE, htop + HEADING_HEIGHT)
#Draw table headings
right_txt = RIGHT_EDGE
dindex = 0
for data in self.heading:
right_txt -= MARGIN+LINE
self.pangolayout.set_text(data)
(width, height) = self.pangolayout.get_size()
if (width / pango.SCALE) > self.cols_width[dindex]:
res = data.split()
self.pangolayout.set_text(res[0])
(width, height) = self.pangolayout.get_size()
if (width / pango.SCALE) < self.cols_width[dindex]:
#self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), htop + (HEADING_HEIGHT/2-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
#
self.pangolayout.set_text(res[1])
(width, height) = self.pangolayout.get_size()
#self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), htop + ((HEADING_HEIGHT*3)/2-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
else:
#self.pangolayout.set_alignment(pango.ALIGN_RIGHT)
cr.move_to (right_txt -(width / pango.SCALE), htop + (HEADING_HEIGHT-(height / pango.SCALE))/2)
self.pangocairo.show_layout(self.pangolayout)
right_txt -= self.cols_width[dindex]
cr.move_to(right_txt, htop)
cr.line_to(right_txt, htop + HEADING_HEIGHT)
dindex += 1
#Heading top line
cr.move_to(right_txt, htop)
cr.line_to(RIGHT_EDGE, htop)
# def dailySpecific(self, pos, page):
# pass
#
# def subjectSpecific(self, pos, page):
# pass
#
# def docSpecific(self, pos, page):
# pass
|
gpl-3.0
| 1,510,233,238,877,947,600
| 42.230024
| 188
| 0.555142
| false
|
tung18tht/ICDAR-2017-Post-OCR-Correction
|
errors_detection/find_suspicious_eng_words.py
|
1
|
1754
|
import os, linecache, re, json
work_directory_path = os.path.dirname(os.path.realpath(__file__))
eng_words_file = open(work_directory_path + "/eng_words.txt", "rU")
eng_words = set()
for word in eng_words_file:
eng_words |= {word.rstrip()}
data_directory_path = work_directory_path + "/ICDAR2017_datasetPostOCR_Evaluation_2M_v1.2"
eng_data_directory_paths = [data_directory_path + "/eng_monograph", data_directory_path + "/eng_periodical"]
output_file = open(work_directory_path + "/Results/result_eng_words.json", "w")
output_file.write("{")
for eng_data_directory_path in eng_data_directory_paths:
for root_path, directories, files in os.walk(eng_data_directory_path):
for file in files:
if os.path.splitext(file)[1] == ".txt":
output_file.write("\n \""+os.path.basename(root_path)+"/"+file+"\": ")
errors = {}
file_path = root_path + "/" + file
ocr_output = linecache.getline(file_path, 1)[14:].strip()
word_begin_index = 0
for i, character in enumerate(ocr_output):
if character == ' ':
word_end_index = i
clean_word = re.sub('\W+', '', ocr_output[word_begin_index:word_end_index].lower())
if clean_word not in eng_words:
errors[str(word_begin_index)+":1"] = {}
word_begin_index = word_end_index + 1
clean_word = re.sub('\W+', '', ocr_output[word_begin_index:].lower())
if clean_word not in eng_words:
errors[str(word_begin_index)+":1"] = {}
output_file.write(json.dumps(errors, indent=8)+",")
output_file.seek(0, 2)
output_file.truncate(output_file.tell() - 1)
output_file = open(work_directory_path + "/Results/result_eng_words.json", "a")
output_file.write("\n}")
|
mit
| 7,360,763,077,504,529,000
| 36.340426
| 108
| 0.622007
| false
|
jtriley/s3site
|
s3site/static.py
|
1
|
2179
|
"""
Module for storing static data structures
"""
import os
import sys
VERSION = 0.9999
PID = os.getpid()
S3SITE_CFG_DIR = os.path.join(os.path.expanduser('~'), '.s3site')
S3SITE_CFG_FILE = os.path.join(S3SITE_CFG_DIR, 'config')
S3SITE_LOG_DIR = os.path.join(S3SITE_CFG_DIR, 'logs')
S3SITE_META_FILE = '__s3site.cfg'
DEBUG_FILE = os.path.join(S3SITE_LOG_DIR, 'debug.log')
AWS_DEBUG_FILE = os.path.join(S3SITE_LOG_DIR, 'aws-debug.log')
CRASH_FILE = os.path.join(S3SITE_LOG_DIR, 'crash-report-%d.txt' % PID)
GLOBAL_SETTINGS = {
# setting, type, required?, default, options, callback
'enable_experimental': (bool, False, False, None, None),
'web_browser': (str, False, None, None, None),
'include': (list, False, [], None, None),
}
AWS_SETTINGS = {
'aws_access_key_id': (str, True, None, None, None),
'aws_secret_access_key': (str, True, None, None, None),
'aws_user_id': (str, False, None, None, None),
'aws_port': (int, False, None, None, None),
'aws_ec2_path': (str, False, '/', None, None),
'aws_s3_path': (str, False, '/', None, None),
'aws_is_secure': (bool, False, True, None, None),
'aws_region_name': (str, False, None, None, None),
'aws_region_host': (str, False, None, None, None),
'aws_s3_host': (str, False, None, None, None),
'aws_proxy': (str, False, None, None, None),
'aws_proxy_port': (int, False, None, None, None),
'aws_proxy_user': (str, False, None, None, None),
'aws_proxy_pass': (str, False, None, None, None),
}
def __expand_all(path):
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return path
def __makedirs(path, exit_on_failure=False):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if exit_on_failure:
sys.stderr.write("!!! ERROR - %s *must* be a directory\n" %
path)
elif not os.path.isdir(path) and exit_on_failure:
sys.stderr.write("!!! ERROR - %s *must* be a directory\n" % path)
sys.exit(1)
def create_config_dirs():
__makedirs(S3SITE_CFG_DIR, exit_on_failure=True)
__makedirs(S3SITE_LOG_DIR)
|
gpl-3.0
| 8,637,566,825,480,623,000
| 32.523077
| 75
| 0.605782
| false
|
guyemerson/pyphon
|
src/pyphon.py
|
1
|
3033
|
#!/usr/bin/env python2
import wx, os, sqlite3
import wxGUI
wx.USE_UNICODE = 1
SRC_DIR = os.getcwd()
DATA_DIR = os.path.join(os.path.split(SRC_DIR)[0], 'data')
def filepath(text):
""" If text contains no slashes, add the default data directory """
directory, filename = os.path.split(text)
if directory == "":
return os.path.join(DATA_DIR, filename)
else:
return text
if __name__ == "__main__":
# Read settings from file, or else create settings file
settingsFile = os.path.join(DATA_DIR, '.pyphonsettings')
settings = dict()
if os.path.exists(settingsFile):
with open(settingsFile, 'r') as fin:
for line in fin:
key, value = line.strip().split('\t')
if value == "TRUE":
value = True
elif value == "FALSE":
value = False
settings[key] = value
else:
settings = {'album':'default_album.db', 'user':'default_user.db', 'copy':True}
with open(settingsFile, 'w') as fout:
for key, value in settings.items():
if value == True:
value = "TRUE"
elif value == False:
value = "FALSE"
fout.write("{}\t{}\n".format(key, value))
albumFile = filepath(settings['album'])
userFile = filepath(settings['user'])
# Open database files, if they exist, or else create empty databases
# Currently, userFile is not implemented
if os.path.exists(albumFile):
with sqlite3.connect(albumFile) as data:
cursor = data.cursor()
cursor.execute("PRAGMA foreign_keys = ON")
else:
with sqlite3.connect(albumFile) as data:
cursor = data.cursor()
cursor.execute("PRAGMA foreign_keys = ON")
cursor.execute('''CREATE TABLE language_set
(language TEXT PRIMARY KEY)''')
cursor.execute('''CREATE TABLE contrast_set
(language TEXT,
contrast TEXT,
FOREIGN KEY (language) REFERENCES language_set(language)
ON DELETE CASCADE ON UPDATE CASCADE,
PRIMARY KEY (language, contrast))''')
cursor.execute('''CREATE TABLE speaker_set
(language TEXT,
speaker TEXT,
FOREIGN KEY (language) REFERENCES language_set(language)
ON DELETE CASCADE ON UPDATE CASCADE,
PRIMARY KEY (language, speaker))''')
cursor.execute('''CREATE TABLE recordings
(file TEXT PRIMARY KEY,
speaker TEXT,
language TEXT,
answer TEXT NOT NULL,
FOREIGN KEY (language, speaker) REFERENCES speaker_set(language, speaker)
ON DELETE CASCADE ON UPDATE CASCADE)''')
cursor.execute('''CREATE TABLE minimal_pairs
(language TEXT,
contrast TEXT,
item_1 TEXT,
item_2 TEXT,
FOREIGN KEY (language, contrast) REFERENCES contrast_set(language, contrast)
ON DELETE CASCADE ON UPDATE CASCADE,
PRIMARY KEY (language, contrast, item_1, item_2))''')
# Open the main window
app = wx.App(False)
frame = wxGUI.MainWindow(None, title="High Variability Phonetic Training software", cursor=cursor)
frame.Show()
app.MainLoop()
# Save database changes after exiting
data.commit()
|
gpl-3.0
| 8,069,458,571,730,618,000
| 28.656566
| 99
| 0.648863
| false
|
adamhaney/pykell
|
tests.py
|
1
|
3811
|
from unittest import TestCase
from .types import expects_type, returns_type, T
@expects_type(a=T(int), b=T(str))
def example_kw_arg_function(a, b):
return a, b
class ExpectsTests(TestCase):
def test_correct_expectations_kw(self):
self.assertEqual(example_kw_arg_function(a=1, b="baz"), (1, "baz"))
@returns_type(T(int))
def add(x, y):
return x + y
@returns_type(T(str))
def bad_add(x, y):
return x + y
class ReturnTests(TestCase):
def test_returns_type_positive(self):
self.assertEqual(add(x=1, y=2), 3)
def test_returns_type_negative(self):
with self.assertRaises(TypeError):
bad_add(x=1, y=2)
class TypeClassTests(TestCase):
def test_type_enforcement_positive(self):
str_type = T(str)
self.assertTrue(str_type.validate("abc"))
def test_type_enforcement_negative(self):
str_type = T(str)
with self.assertRaises(TypeError):
str_type.validate(27)
def test_data_enforcement_positive(self):
z_string = T(str, lambda d: d.startswith('z'))
self.assertTrue(z_string.validate('zab'))
def test_data_enforcement_negative(self):
z_string = T(str, lambda d: d.startswith('z'))
with self.assertRaises(TypeError):
z_string.validate('abc')
def test_multiple_types_positive(self):
"""
make sure we can add two types to the class and that it then
says an object having one of those types is valid
"""
str_int_type = T(int)
str_int_type.contribute_type(str)
self.assertTrue(str_int_type.validate(2))
self.assertTrue(str_int_type.validate("boo"))
def test_multiple_types_negative(self):
str_int_type = T(int)
str_int_type.contribute_type(str)
with self.assertRaises(TypeError):
str_int_type.validate(2.0)
def test_multiple_validators_positive(self):
a_z_type = T(str, lambda d: d.startswith('a'))
a_z_type.contribute_validator(lambda d: d.endswith('z'))
self.assertTrue("abcdz")
def test_multiple_validators_negative(self):
a_z_type = T(str, lambda d: d.startswith('a'))
a_z_type.contribute_validator(lambda d: d.endswith('z'))
with self.assertRaises(TypeError):
a_z_type.validate("abc")
def test_pipe_multi_type_syntax(self):
str_int_type = T(int) | T(str)
self.assertTrue(str_int_type.validate(2))
self.assertTrue(str_int_type.validate("boo"))
class PykellContributionTests(TestCase):
def setUp(self):
self.positive_even_number = T(int, lambda d: d > 0) | T(float, lambda d: d % 2 == 0)
def test_postive_float_is_valid(self):
self.assertTrue(self.positive_even_number.validate(2.0))
def test_positive_integer_is_valid(self):
self.assertTrue(self.positive_even_number.validate(4))
def test_negative_float_is_invalid(self):
with self.assertRaises(TypeError):
self.positive_even_number.validate(-4.0)
def test_negative_int_is_invalid(self):
with self.assertRaises(TypeError):
self.positive_even_number.validate(-4)
def test_odd_float_is_invalid(self):
with self.assertRaises(TypeError):
self.positive_even_number.validate(3.0)
def test_odd_int_is_invalid(self):
with self.assertRaises(TypeError):
self.positive_even_number.validate(3)
class TypeNotRequiredTests(TestCase):
"""
In some cases we may just care that a validator is true, not
what the underlying type is
"""
def setUp(self):
self.positive_something = T(validator=lambda d: d > 0)
def test_validator_without_type(self):
self.assertTrue(self.positive_something.validate(2))
|
mit
| 7,787,685,935,964,463,000
| 29.246032
| 92
| 0.635791
| false
|
marrow/schema
|
test/transform/test_boolean.py
|
1
|
1445
|
from marrow.schema.testing import TransformTest
from marrow.schema.transform.type import Boolean, boolean, WebBoolean, web_boolean
class TestBooleanNative(TransformTest):
transform = boolean.native
invalid = ('x', )
@property
def valid(self):
yield None, None
if boolean.none:
yield '', None
for i in boolean.truthy + ('Y', 'True', True, 1, ['foo']):
yield i, True
for i in boolean.falsy + ('n', 'False', False, 0, []):
yield i, False
class TestBooleanForeign(TransformTest):
transform = boolean.foreign
@property
def valid(self):
if boolean.none:
yield None, ''
for i in (0, 1, False, True, [], [0]):
yield i, boolean.truthy[boolean.use] if bool(i) else boolean.falsy[boolean.use]
for i in boolean.truthy:
yield i, boolean.truthy[boolean.use]
for i in boolean.falsy:
yield i, boolean.falsy[boolean.use]
class TestBooleanNoNoneNative(TransformTest):
transform = Boolean(none=False).native
valid = ((None, False), )
invalid = ('', 'bob')
class TestBooleanNoNoneForeign(TransformTest):
transform = Boolean(none=False).foreign
valid = ((None, 'false'), ('foo', 'true'), ('', 'false'))
class TestWebBooleanNative(TransformTest):
transform = web_boolean.native
valid = (
(['', 'true'], True),
([''], False),
('', False),
)
class TestWebBooleanForeign(TransformTest):
transform = web_boolean.foreign
valid = [(i, bool(i)) for i in (0, 1, False, True)]
|
mit
| -1,062,259,557,086,997,800
| 21.578125
| 82
| 0.667128
| false
|
DirectXMan12/should_be
|
should_be/extensions/sized.py
|
1
|
4985
|
from should_be.core import BaseMixin, alias_method
try:
from collections.abc import Sized
except ImportError:
# python < 3.3
from collections import Sized
class SizedMixin(BaseMixin):
target_class = Sized
def should_be_size(self, target):
msg = '{txt} should have been size {val}, but was size {self_size}'
self.should_follow(len(self) == target, msg,
val=target,
self_size=len(self))
alias_method('should_have_len', should_be_size)
alias_method('should_have_length', should_be_size)
def should_be_size_of(self, target):
msg = ('{txt} should have been the size of {val} ({val_size}), '
'but was size {self_size}')
self.should_follow(len(self) == len(target), msg,
val=target,
val_size=len(target),
self_size=len(self))
alias_method('should_match_size_of', should_be_size_of)
alias_method('should_match_len_of', should_be_size_of)
alias_method('should_match_length_of', should_be_size_of)
def should_be_at_least_size(self, target):
msg = ('{txt} should have been at least size {val}, but '
'was size {self_size}')
self.should_follow(len(self) >= target, msg,
val=target,
self_size=len(self))
alias_method('should_be_at_least_len', should_be_at_least_size)
alias_method('should_be_at_least_length', should_be_at_least_size)
def should_be_at_most_size(self, target):
msg = ('{txt} should have been at most size {val}, but '
'was size {self_size}')
self.should_follow(len(self) <= target, msg,
val=target,
self_size=len(self))
alias_method('should_be_at_most_len', should_be_at_most_size)
alias_method('should_be_at_most_length', should_be_at_most_size)
def should_be_at_least_size_of(self, target):
msg = ('{txt} should have been at least the size of {val} ({val_size})'
', but was size {self_size}')
self.should_follow(len(self) >= len(target), msg,
val=target,
val_size=len(target),
self_size=len(self))
alias_method('should_be_at_least_len_of', should_be_at_least_size_of)
alias_method('should_be_at_least_length_of', should_be_at_least_size_of)
def should_be_at_most_size_of(self, target):
msg = ('{txt} should have been at most the size of {val} ({val_size})'
', but was size {self_size}')
self.should_follow(len(self) <= len(target), msg,
val=target,
val_size=len(target),
self_size=len(self))
alias_method('should_be_at_most_len_of', should_be_at_most_size_of)
alias_method('should_be_at_most_length_of', should_be_at_most_size_of)
def should_be_bigger_than(self, target):
if isinstance(target, Sized):
# we have a sized object
msg = ('{txt} should have been bigger than {val} ({val_size}), '
'but was size {self_size}')
self.should_follow(len(self) > len(target), msg,
val=target,
val_size=len(target),
self_size=len(self))
else:
# have a number
msg = ('{txt} should have had size greater than {val}, but '
'was size {self_size}')
self.should_follow(len(self) > target, msg,
val=target,
self_size=len(self))
alias_method('should_be_longer_than', should_be_bigger_than)
def should_be_smaller_than(self, target):
if isinstance(target, Sized):
# we have a sized object
msg = ('{txt} should have been smaller than {val} ({val_size}), '
'but was size {self_size}')
self.should_follow(len(self) < len(target), msg,
val=target,
val_size=len(target),
self_size=len(self))
else:
# have a number
msg = ('{txt} should have had size less than {val}, but '
'was size {self_size}')
self.should_follow(len(self) < target, msg,
val=target,
self_size=len(self))
alias_method('should_be_shorter_than', should_be_smaller_than)
def should_be_empty(self):
msg = '{txt} should have been empty, but had size {val}'
self.should_follow(len(self) == 0, msg, val=len(self))
def shouldnt_be_empty(self):
msg = '{txt} should not have been empty, but was anyway'
self.should_follow(len(self) > 0, msg)
|
isc
| -3,073,971,879,870,417,000
| 40.198347
| 79
| 0.525376
| false
|
marco-lancini/Showcase
|
app_socialnetworks/tumblr.py
|
1
|
7041
|
from __init__ import *
from oauthclient import *
class TumblrClient(OauthClient):
"""
Wrapper for Tumblr APIs
:CONSUMER_KEY: Tumblr App ID
:CONSUMER_SECRET: Tumblr API Secret
:blog: the connected Tumblr blog, if any
:user_auth: account of the user on Showcase
:auth: boolean flag (if True, the operation needs to be authenticated)
.. seealso:: :class:`app_socialnetworks.oauthclient.OauthClient`
"""
CONSUMER_KEY = setting('TUMBLR_CONSUMER_KEY')
CONSUMER_SECRET = setting('TUMBLR_CONSUMER_SECRET')
request_token_url = 'http://www.tumblr.com/oauth/request_token'
authorize_url = 'http://www.tumblr.com/oauth/authorize'
access_token_url = 'http://www.tumblr.com/oauth/access_token'
def __init__(self, blog, user_auth=False, auth=False):
"""
Insantiate the client: if authentication is needed, proceed with Oauth; otherwise, use a simple HTTP client
:param blog: the connected Tumblr blog, if any
:type blog: string
:param user_auth: account of the user on Showcase
:type user_auth: `User`
:param auth: flag (if True, the operation needs to be authenticated)
:type auth: boolean
"""
self.blog = blog
self.user_auth = user_auth
self.auth = auth
if self.auth:
# Authentication needed, proceed with Oauth
super(TumblrClient, self).__init__(self.CONSUMER_KEY, self.CONSUMER_SECRET)
else:
# Use a simple HTTP client
self.client = httplib2.Http()
def request_token(self, consumer):
"""
Retrieve the access token of the user from his connected accounts data
"""
# Retrieve connected accounts
connected_accounts = self.user_auth.social_auth.filter(user=self.user_auth.id).filter(provider="tumblr")
if len(connected_accounts) == 0:
raise NotConnectedException('Not Connected to Tumblr')
# Retrieve access_token from socialauth
access_token = connected_accounts[0].extra_data['access_token']
access_token = urlparse.parse_qs(access_token)
oauth_token = access_token['oauth_token'][0]
oauth_token_secret = access_token['oauth_token_secret'][0]
return oauth_token, oauth_token_secret
#=========================================================================
# READ
#=========================================================================
def _query(self, method, optionals=None):
"""
Execute a read-only query
"""
url = "http://api.tumblr.com/v2/blog/%s.tumblr.com/%s?api_key=%s" % (self.blog, method, self.CONSUMER_KEY)
if optionals:
url += optionals
try:
resp, content = self.client.request(url, "GET")
content = json.loads(content)['response']
return content
except:
return None
def get_blog_info(self):
"""
Get general infos about the connected blog
"""
method = "info"
return self._query(method)
def get_blog_posts(self):
"""
Fetch last 5 blog posts
"""
method = "posts"
optionals = "&limit=5"
posts = self._query(method, optionals)
if posts:
posts = posts['posts']
for p in posts:
temp = datetime.strptime(p['date'], "%Y-%m-%d %H:%M:%S GMT")
p['date'] = temp.strftime("%d %B %Y")
return posts
else:
return None
#=========================================================================
# WRITE
#=========================================================================
def _post_blog(self, params, media=None):
"""
Execute a write query
"""
url = 'http://api.tumblr.com/v2/blog/%s.tumblr.com/post' % self.blog
if media:
content = self._postOAuth(url, params)
content = content.read()
else:
body = urllib.urlencode(params)
resp, content = self.client.request(url, "POST", body=body)
# Check response
content = json.loads(content)
response = content['meta']['msg']
if response:
if response != 'Created':
if response == 'Not Authorized':
raise ClearanceException("Not an owned blog")
else:
raise UploadException("Error During Upload: %s" % response)
else:
raise UploadException("Error During Upload: %s" % response)
def add_text(self, title, body):
"""
Add a blog of type: *text*
:param title: title of the blog post
:type title: string
:param body: content of the blog post
:type body: string
"""
params = {'type': 'text', 'title': title, 'body': body}
return self._post_blog(params)
def add_link(self, title, url):
"""
Add a blog of type: *link*
:param title: title of the blog post
:type title: string
:param url: url of the link to publish
:type url: string
"""
params = {'type': 'link', 'title': title, 'url': url}
return self._post_blog(params)
def add_quote(self, quote):
"""
Add a blog of type: *quote*
:param quote: quote to publish
:type quote: string
"""
params = {'type': 'quote', 'quote': quote}
return self._post_blog(params)
def add_chat(self, title, conversation):
"""
Add a blog of type: *chat*
:param title: title of the blog post
:type title: string
:param conversation: conversation to publish
:type conversation: string
"""
params = {'type': 'chat', 'title': title, 'conversation': conversation}
return self._post_blog(params)
def add_photo(self, source, photo):
"""
Add a blog of type: *photo*
:param source: url of the photo to publish, if any
:type source: string
:param photo: photo to upload, if any
:type photo: image file
"""
if source:
params = {'type': 'photo', 'source': source}
return self._post_blog(params)
elif photo:
params = {'type': 'photo', 'data[0]': photo.read()}
return self._post_blog(params, media=True)
def add_audio(self, source):
"""
Add a blog of type: *audio*
:param source: url of the audio file to publish
:type source: string
"""
if source:
params = {'type': 'audio', 'external_url': source}
return self._post_blog(params)
# def add_video(self, video):
# params = {'type': 'video', 'data[0]': video.read()}
# return self._post_blog(params, media=True)
|
mit
| -552,312,351,703,994,600
| 30.017621
| 115
| 0.530464
| false
|
endlessm/chromium-browser
|
third_party/chromite/lib/image_lib_unittest.py
|
1
|
25414
|
# -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the image_lib module."""
from __future__ import print_function
import collections
import gc
import glob
import os
import stat
import mock
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import git
from chromite.lib import image_lib
from chromite.lib import osutils
from chromite.lib import retry_util
from chromite.lib import partial_mock
# pylint: disable=protected-access
class FakeException(Exception):
"""Fake exception used for testing exception handling."""
FAKE_PATH = '/imaginary/file'
LOOP_DEV = '/dev/loop9999'
LOOP_PART_COUNT = 12
LOOP_PARTITION_INFO = [
image_lib.PartitionInfo(
1, 2928640, 2957311, 28672, 14680064, 'STATE', ''),
image_lib.PartitionInfo(
2, 20480, 53247, 32768, 16777216, 'KERN-A', ''),
image_lib.PartitionInfo(
3, 286720, 2928639, 2641920, 1352663040, 'ROOT-A', ''),
image_lib.PartitionInfo(
4, 53248, 86015, 32768, 16777216, 'KERN-B', ''),
image_lib.PartitionInfo(
5, 282624, 286719, 4096, 2097152, 'ROOT-B', ''),
image_lib.PartitionInfo(
6, 16448, 16448, 1, 512, 'KERN-C', ''),
image_lib.PartitionInfo(
7, 16449, 16449, 1, 512, 'ROOT-C', ''),
image_lib.PartitionInfo(
8, 86016, 118783, 32768, 16777216, 'OEM', ''),
image_lib.PartitionInfo(
9, 16450, 16450, 1, 512, 'reserved', ''),
image_lib.PartitionInfo(
10, 16451, 16451, 1, 512, 'reserved', ''),
image_lib.PartitionInfo(
11, 64, 16447, 16384, 8388608, 'RWFW', ''),
image_lib.PartitionInfo(
12, 249856, 282623, 32768, 16777216, 'EFI-SYSTEM', ''),
]
LOOP_PARTS_DICT = {
p.number: '%sp%d' % (LOOP_DEV, p.number) for p in LOOP_PARTITION_INFO}
LOOP_PARTS_LIST = LOOP_PARTS_DICT.values()
class LoopbackPartitionsMock(image_lib.LoopbackPartitions):
"""Mocked loopback partition class to use in unit tests."""
# pylint: disable=super-init-not-called
def __init__(self, path, destination=None, part_ids=None, mount_opts=None,
dev=LOOP_DEV, part_count=0):
"""Initialize.
Args:
(shared with LoopbackPartitions)
path: Path to the image file.
destination: destination directory.
part_ids: Mount these partitions at context manager entry.
mount_opts: Use these mount_opts for mounting |part_ids|.
(unique to LoopbackPartitionsMock)
dev: Path for the base loopback device.
part_count: How many partition device files to make up. Default: normal
partition table.
"""
self.path = path
self.dev = dev
self.part_ids = part_ids
self.mount_opts = mount_opts
if destination:
self.destination = destination
else:
self.destination = osutils.TempDir()
if part_count:
self._gpt_table = [
image_lib.PartitionInfo(num, 0, 0, 0, '', 'my-%d' % num, '')
for num in range(1, part_count + 1)]
else:
self._gpt_table = LOOP_PARTITION_INFO
self.parts = {p.number: '%sp%s' % (dev, p.number)
for p in self._gpt_table}
self.enable_rw_called = set()
self.disable_rw_called = set()
# pylint: enable=super-init-not-called
def EnableRwMount(self, part_id, offset=0):
"""Stub out enable rw mount."""
self.enable_rw_called.add((part_id, offset))
def DisableRwMount(self, part_id, offset=0):
"""Stub out disable rw mount."""
self.disable_rw_called.add((part_id, offset))
def _Mount(self, part, mount_opts):
"""Stub out mount operations."""
dest_number, _ = self._GetMountPointAndSymlink(part)
# Don't actually even try to mount it, let alone mark it mounted.
return dest_number
def _Unmount(self, part):
"""Stub out unmount operations."""
def close(self):
pass
class LoopbackPartitionsTest(cros_test_lib.MockTempDirTestCase):
"""Test the loopback partitions class"""
def setUp(self):
self.rc_mock = cros_test_lib.RunCommandMock()
self.StartPatcher(self.rc_mock)
self.rc_mock.SetDefaultCmdResult()
self.rc_mock.AddCmdResult(partial_mock.In('--show'), output=LOOP_DEV)
self.PatchObject(image_lib, 'GetImageDiskPartitionInfo',
return_value=LOOP_PARTITION_INFO)
self.PatchObject(glob, 'glob', return_value=LOOP_PARTS_LIST)
self.mount_mock = self.PatchObject(osutils, 'MountDir')
self.umount_mock = self.PatchObject(osutils, 'UmountDir')
self.retry_mock = self.PatchObject(retry_util, 'RetryException')
def fake_which(val, *_arg, **_kwargs):
return val
self.PatchObject(osutils, 'Which', side_effect=fake_which)
def testContextManager(self):
"""Test using the loopback class as a context manager."""
with image_lib.LoopbackPartitions(FAKE_PATH) as lb:
self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH])
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV],
expected=False)
self.assertEqual(lb.parts, LOOP_PARTS_DICT)
self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testContextManagerWithMounts(self):
"""Test using the loopback class as a context manager with mounts."""
syml = self.PatchObject(osutils, 'SafeSymlink')
part_ids = (1, 'ROOT-A')
with image_lib.LoopbackPartitions(
FAKE_PATH, part_ids=part_ids, mount_opts=('ro',)) as lb:
expected_mounts = set()
expected_calls = []
for part_id in part_ids:
for part in LOOP_PARTITION_INFO:
if part.name == part_id or part.number == part_id:
expected_mounts.add(part)
expected_calls.append(
mock.call('dir-%d' % part.number, os.path.join(
lb.destination, 'dir-%s' % part.name)))
break
self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH])
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV],
expected=False)
self.assertEqual(lb.parts, LOOP_PARTS_DICT)
self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
self.assertEqual(expected_calls, syml.call_args_list)
self.assertEqual(expected_mounts, lb._mounted)
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testManual(self):
"""Test using the loopback class closed manually."""
lb = image_lib.LoopbackPartitions(FAKE_PATH)
self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH])
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV],
expected=False)
self.assertEqual(lb.parts, LOOP_PARTS_DICT)
self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
lb.close()
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def gcFunc(self):
"""This function isolates a local variable so it'll be garbage collected."""
lb = image_lib.LoopbackPartitions(FAKE_PATH)
self.rc_mock.assertCommandContains(['losetup', '--show', '-f', FAKE_PATH])
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['partx', '-a', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV],
expected=False)
self.assertEqual(lb.parts, LOOP_PARTS_DICT)
self.assertEqual(lb._gpt_table, LOOP_PARTITION_INFO)
def testGarbageCollected(self):
"""Test using the loopback class closed by garbage collection."""
self.gcFunc()
# Force garbage collection in case python didn't already clean up the
# loopback object.
gc.collect()
self.rc_mock.assertCommandContains(['partx', '-d', LOOP_DEV])
self.rc_mock.assertCommandContains(['losetup', '--detach', LOOP_DEV])
def testMountUnmount(self):
"""Test Mount() and Unmount() entry points."""
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
# Mount four partitions.
lb.Mount((1, 3, 'ROOT-B', 'ROOT-C'))
for p in (1, 3, 5, 7):
self.mount_mock.assert_any_call(
'%sp%d' % (LOOP_DEV, p), '%s/dir-%d' % (self.tempdir, p),
makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',))
linkname = '%s/dir-%s' % (self.tempdir, LOOP_PARTITION_INFO[p - 1].name)
self.assertTrue(stat.S_ISLNK(os.lstat(linkname).st_mode))
self.assertEqual(4, self.mount_mock.call_count)
self.umount_mock.assert_not_called()
# Unmount half of them, confirm that they were unmounted.
lb.Unmount((1, 'ROOT-B'))
for p in (1, 5):
self.umount_mock.assert_any_call('%s/dir-%d' % (self.tempdir, p),
cleanup=False)
self.assertEqual(2, self.umount_mock.call_count)
self.umount_mock.reset_mock()
# Close the object, so that we unmount the other half of them.
lb.close()
for p in (3, 7):
self.umount_mock.assert_any_call('%s/dir-%d' % (self.tempdir, p),
cleanup=False)
self.assertEqual(2, self.umount_mock.call_count)
# Verify that the directories were cleaned up.
for p in (1, 3):
self.retry_mock.assert_any_call(
cros_build_lib.RunCommandError, 60, osutils.RmDir,
'%s/dir-%d' % (self.tempdir, p), sudo=True, sleep=1)
def testMountingMountedPartReturnsName(self):
"""Test that Mount returns the directory name even when already mounted."""
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
dirname = '%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)
# First make sure we get the directory name when we actually mount.
self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',)))
# Then make sure we get it when we call it again.
self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',)))
lb.close()
def testRemountCallsMount(self):
"""Test that Mount returns the directory name even when already mounted."""
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
devname = '%sp%d' % (LOOP_DEV, lb._gpt_table[0].number)
dirname = '%s/dir-%d' % (self.tempdir, lb._gpt_table[0].number)
# First make sure we get the directory name when we actually mount.
self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('ro',)))
self.mount_mock.assert_called_once_with(
devname, dirname,
makedirs=True, skip_mtab=False, sudo=True, mount_opts=('ro',))
# Then make sure we get it when we call it again.
self.assertEqual(dirname, lb._Mount(lb._gpt_table[0], ('remount', 'rw')))
self.assertEqual(
mock.call(devname, dirname, makedirs=True, skip_mtab=False,
sudo=True, mount_opts=('remount', 'rw')),
self.mount_mock.call_args)
lb.close()
def testGetPartitionDevName(self):
"""Test GetPartitionDevName()."""
lb = image_lib.LoopbackPartitions(FAKE_PATH)
for part in LOOP_PARTITION_INFO:
self.assertEqual('%sp%d' % (LOOP_DEV, part.number),
lb.GetPartitionDevName(part.number))
if part.name != 'reserved':
self.assertEqual('%sp%d' % (LOOP_DEV, part.number),
lb.GetPartitionDevName(part.name))
lb.close()
def test_GetMountPointAndSymlink(self):
"""Test _GetMountPointAndSymlink()."""
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
for part in LOOP_PARTITION_INFO:
expected = [os.path.join(lb.destination, 'dir-%s' % n)
for n in (part.number, part.name)]
self.assertEqual(expected, list(lb._GetMountPointAndSymlink(part)))
lb.close()
def testIsExt2OnVarious(self):
"""Test _IsExt2 works with the various partition types."""
FS_PARTITIONS = (1, 3, 8)
# STATE, ROOT-A, and OEM generally have ext2 filesystems.
for x in FS_PARTITIONS:
self.rc_mock.AddCmdResult(
partial_mock.In('if=%sp%d' % (LOOP_DEV, x)),
output=b'\x53\xef')
# Throw errors on all of the partitions that are < 1000 bytes.
for part in LOOP_PARTITION_INFO:
if part.size < 1000:
self.rc_mock.AddCmdResult(
partial_mock.In('if=%sp%d' % (LOOP_DEV, part.number)),
returncode=1, error='Seek failed\n')
lb = image_lib.LoopbackPartitions(FAKE_PATH, destination=self.tempdir)
# We expect that only the partitions in FS_PARTITIONS are ext2.
self.assertEqual(
[part.number in FS_PARTITIONS for part in LOOP_PARTITION_INFO],
[lb._IsExt2(part.name) for part in LOOP_PARTITION_INFO])
lb.close()
class LsbUtilsTest(cros_test_lib.MockTempDirTestCase):
"""Tests the various LSB utilities."""
def setUp(self):
# Patch os.getuid(..) to pretend running as root, so reading/writing the
# lsb-release file doesn't require escalated privileges and the test can
# clean itself up correctly.
self.PatchObject(os, 'getuid', return_value=0)
def testWriteLsbRelease(self):
"""Tests writing out the lsb_release file using WriteLsbRelease(..)."""
rc_mock = self.PatchObject(cros_build_lib, 'sudo_run')
fields = collections.OrderedDict((
('x', '1'), ('y', '2'), ('foo', 'bar'),
))
image_lib.WriteLsbRelease(self.tempdir, fields)
lsb_release_file = os.path.join(self.tempdir, 'etc', 'lsb-release')
expected_content = 'x=1\ny=2\nfoo=bar\n'
self.assertFileContents(lsb_release_file, expected_content)
rc_mock.assert_called_once_with([
'setfattr', '-n', 'security.selinux', '-v',
'u:object_r:cros_conf_file:s0',
os.path.join(self.tempdir, 'etc/lsb-release')])
# Test that WriteLsbRelease(..) correctly handles an existing file.
rc_mock = self.PatchObject(cros_build_lib, 'sudo_run')
fields = collections.OrderedDict((
('newkey1', 'value1'), ('newkey2', 'value2'), ('a', '3'), ('b', '4'),
))
image_lib.WriteLsbRelease(self.tempdir, fields)
expected_content = ('x=1\ny=2\nfoo=bar\nnewkey1=value1\nnewkey2=value2\n'
'a=3\nb=4\n')
self.assertFileContents(lsb_release_file, expected_content)
rc_mock.assert_called_once_with([
'setfattr', '-n', 'security.selinux', '-v',
'u:object_r:cros_conf_file:s0',
os.path.join(self.tempdir, 'etc/lsb-release')])
class BuildImagePathTest(cros_test_lib.MockTempDirTestCase):
"""BuildImagePath tests."""
def setUp(self):
self.board = 'board'
self.board_dir = os.path.join(self.tempdir, self.board)
D = cros_test_lib.Directory
filesystem = (
D(self.board, ('recovery_image.bin', 'other_image.bin')),
'full_path_image.bin',
)
cros_test_lib.CreateOnDiskHierarchy(self.tempdir, filesystem)
self.full_path = os.path.join(self.tempdir, 'full_path_image.bin')
def testBuildImagePath(self):
"""BuildImagePath tests."""
self.PatchObject(image_lib, 'GetLatestImageLink',
return_value=os.path.join(self.tempdir, self.board))
# Board and full image path provided.
result = image_lib.BuildImagePath(self.board, self.full_path)
self.assertEqual(self.full_path, result)
# Only full image path provided.
result = image_lib.BuildImagePath(None, self.full_path)
self.assertEqual(self.full_path, result)
# Full image path provided that does not exist.
with self.assertRaises(image_lib.ImageDoesNotExistError):
image_lib.BuildImagePath(self.board, '/does/not/exist')
with self.assertRaises(image_lib.ImageDoesNotExistError):
image_lib.BuildImagePath(None, '/does/not/exist')
# Default image is used.
result = image_lib.BuildImagePath(self.board, None)
self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result)
# Image basename provided.
result = image_lib.BuildImagePath(self.board, 'other_image.bin')
self.assertEqual(os.path.join(self.board_dir, 'other_image.bin'), result)
# Image basename provided that does not exist.
with self.assertRaises(image_lib.ImageDoesNotExistError):
image_lib.BuildImagePath(self.board, 'does_not_exist.bin')
default_mock = self.PatchObject(cros_build_lib, 'GetDefaultBoard')
# Nothing provided, and no default.
default_mock.return_value = None
with self.assertRaises(image_lib.ImageDoesNotExistError):
image_lib.BuildImagePath(None, None)
# Nothing provided, with default.
default_mock.return_value = 'board'
result = image_lib.BuildImagePath(None, None)
self.assertEqual(os.path.join(self.board_dir, 'recovery_image.bin'), result)
class SecurityTestConfigTest(cros_test_lib.RunCommandTempDirTestCase):
"""SecurityTestConfig class tests."""
# pylint: disable=protected-access
def setUp(self):
self.image = '/path/to/image.bin'
self.baselines = '/path/to/baselines'
self.vboot_hash = 'abc123'
self.config = image_lib.SecurityTestConfig(self.image, self.baselines,
self.vboot_hash, self.tempdir)
def testVbootCheckout(self):
"""Test normal flow - clone and checkout."""
clone_patch = self.PatchObject(git, 'Clone')
self.config._VbootCheckout()
clone_patch.assert_called_once()
self.assertCommandContains(['git', 'checkout', self.vboot_hash])
# Make sure it doesn't try to clone & checkout again after already having
# done so successfully.
clone_patch = self.PatchObject(git, 'Clone')
self.config._VbootCheckout()
clone_patch.assert_not_called()
def testVbootCheckoutError(self):
"""Test exceptions in a git command."""
rce = cros_build_lib.RunCommandError('error')
self.PatchObject(git, 'Clone', side_effect=rce)
with self.assertRaises(image_lib.VbootCheckoutError):
self.config._VbootCheckout()
def testVbootCheckoutNoDirectory(self):
"""Test the error handling when the directory does not exist."""
# Test directory that does not exist.
self.config.directory = '/DOES/NOT/EXIST'
with self.assertRaises(image_lib.SecurityConfigDirectoryError):
self.config._VbootCheckout()
def testRunCheck(self):
"""RunCheck tests."""
# No config argument when running check.
self.config.RunCheck('check1', False)
check1 = os.path.join(self.config._checks_dir, 'ensure_check1.sh')
config1 = os.path.join(self.baselines, 'ensure_check1.config')
self.assertCommandContains([check1, self.image])
self.assertCommandContains([config1], expected=False)
# Include config argument when running check.
self.config.RunCheck('check2', True)
check2 = os.path.join(self.config._checks_dir, 'ensure_check2.sh')
config2 = os.path.join(self.baselines, 'ensure_check2.config')
self.assertCommandContains([check2, self.image, config2])
class GetImageDiskPartitionInfoTests(cros_test_lib.RunCommandTestCase):
"""Tests the GetImageDiskPartitionInfo function."""
SAMPLE_PARTED = """/foo/chromiumos_qemu_image.bin:\
2271240192B:file:512:512:gpt::;
11:32768B:8421375B:8388608B::RWFW:;
6:8421376B:8421887B:512B::KERN-C:;
7:8421888B:8422399B:512B::ROOT-C:;
9:8422400B:8422911B:512B::reserved:;
10:8422912B:8423423B:512B::reserved:;
2:10485760B:27262975B:16777216B::KERN-A:;
4:27262976B:44040191B:16777216B::KERN-B:;
8:44040192B:60817407B:16777216B:ext4:OEM:msftdata;
12:127926272B:161480703B:33554432B:fat16:EFI-SYSTEM:boot, esp;
5:161480704B:163577855B:2097152B::ROOT-B:;
3:163577856B:2260729855B:2097152000B:ext2:ROOT-A:;
1:2260729856B:2271215615B:10485760B:ext2:STATE:msftdata;
"""
SAMPLE_CGPT = """
start size part contents
0 1 PMBR (Boot GUID: 88FB7EB8-2B3F-B943-B933-\
EEC571FFB6E1)
1 1 Pri GPT header
2 32 Pri GPT table
1921024 2097152 1 Label: "STATE"
Type: Linux data
UUID: EEBD83BE-397E-BD44-878B-0DDDD5A5C510
20480 32768 2 Label: "KERN-A"
Type: ChromeOS kernel
UUID: 7007C2F3-08E5-AB40-A4BC-FF5B01F5460D
Attr: priority=15 tries=15 successful=1
1101824 819200 3 Label: "ROOT-A"
Type: ChromeOS rootfs
UUID: F4C5C3AD-027F-894B-80CD-3DEC57932948
53248 32768 4 Label: "KERN-B"
Type: ChromeOS kernel
UUID: C85FB478-404C-8741-ADB8-11312A35880D
Attr: priority=0 tries=0 successful=0
282624 819200 5 Label: "ROOT-B"
Type: ChromeOS rootfs
UUID: A99F4231-1EC3-C542-AC0C-DF3729F5DB07
16448 1 6 Label: "KERN-C"
Type: ChromeOS kernel
UUID: 81F0E336-FAC9-174D-A08C-864FE627B637
Attr: priority=0 tries=0 successful=0
16449 1 7 Label: "ROOT-C"
Type: ChromeOS rootfs
UUID: 9E127FCA-30C1-044E-A5F2-DF74E6932692
86016 32768 8 Label: "OEM"
Type: Linux data
UUID: 72986347-A37C-684F-9A19-4DBAF41C55A9
16450 1 9 Label: "reserved"
Type: ChromeOS reserved
UUID: BA85A0A7-1850-964D-8EF8-6707AC106C3A
16451 1 10 Label: "reserved"
Type: ChromeOS reserved
UUID: 16C9EC9B-50FA-DD46-98DC-F781360817B4
64 16384 11 Label: "RWFW"
Type: ChromeOS firmware
UUID: BE8AECB9-4F78-7C44-8F23-5A9273B7EC8F
249856 32768 12 Label: "EFI-SYSTEM"
Type: EFI System Partition
UUID: 88FB7EB8-2B3F-B943-B933-EEC571FFB6E1
4050847 32 Sec GPT table
4050879 1 Sec GPT header
"""
def testCgpt(self):
"""Tests that we can list all partitions with `cgpt` correctly."""
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True)
self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_CGPT)
partitions = image_lib.GetImageDiskPartitionInfo('...')
part_dict = {p.name: p for p in partitions}
self.assertEqual(part_dict['STATE'].start, 983564288)
self.assertEqual(part_dict['STATE'].size, 1073741824)
self.assertEqual(part_dict['STATE'].number, 1)
self.assertEqual(part_dict['STATE'].name, 'STATE')
self.assertEqual(part_dict['EFI-SYSTEM'].start, 249856 * 512)
self.assertEqual(part_dict['EFI-SYSTEM'].size, 32768 * 512)
self.assertEqual(part_dict['EFI-SYSTEM'].number, 12)
self.assertEqual(part_dict['EFI-SYSTEM'].name, 'EFI-SYSTEM')
self.assertEqual(12, len(partitions))
def testNormalPath(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=False)
self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_PARTED)
partitions = image_lib.GetImageDiskPartitionInfo('_ignored')
part_dict = {p.name: p for p in partitions}
self.assertEqual(12, len(partitions))
self.assertEqual(1, part_dict['STATE'].number)
self.assertEqual(2097152000, part_dict['ROOT-A'].size)
def testKeyedByNumber(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=False)
self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_PARTED)
partitions = image_lib.GetImageDiskPartitionInfo(
'_ignored'
)
part_dict = {p.number: p for p in partitions}
self.assertEqual(12, len(part_dict))
self.assertEqual('STATE', part_dict[1].name)
self.assertEqual(2097152000, part_dict[3].size)
self.assertEqual('reserved', part_dict[9].name)
self.assertEqual('reserved', part_dict[10].name)
def testChangeUnitInsideChroot(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=True)
self.rc.AddCmdResult(partial_mock.Ignore(), output=self.SAMPLE_CGPT)
partitions = image_lib.GetImageDiskPartitionInfo('_ignored')
part_dict = {p.name: p for p in partitions}
self.assertEqual(part_dict['STATE'].start, 983564288)
self.assertEqual(part_dict['STATE'].size, 1073741824)
|
bsd-3-clause
| -1,855,077,315,737,941,500
| 42.074576
| 80
| 0.638939
| false
|
nojero/pod
|
src/pod/equivalence.py
|
1
|
7197
|
import sat
import ptnet
import z3
from util import *
from encoding import *
class MergingEquivalence :
def __init__ (self, domain) :
self.domain = domain
def is_in_domain (self, it) :
for x in it :
if x not in self.domain :
raise LookupError, "'%s' is not in the domain" % repr (x)
def are_merged (self, x, y) :
self.is_in_domain ([x, y])
return x == y
def class_of (self, x) :
self.is_in_domain ([x])
return [x]
def classes (self) :
return [[x] for x in self.domain]
def assert_is_equivalence (self) :
# we asser that
# - every class is disjoint from any other class
# - every element of the domain is in at least one class
# to do it we just iterate through all elements of all classes, and
# watch if we see two times the same element, checking at the end
# that we saw all elements of the domain
e2c = {}
for c in self.classes () :
for e in c :
if e in e2c :
# already seen!
raise AssertionError, \
"Element '%s' is two classes, %s and %s" % \
(repr (e), long_list (c, 5), long_list (e2c[e], 5))
e2c[e] = c
seen = set (e2c.keys ())
if not self.domain <= seen :
print 'seen', seen
print 'domain', self.domain
raise AssertionError, \
"The set of classes contains less elements than the domain!"
if not seen <= self.domain :
print 'seen', seen
print 'domain', self.domain
raise AssertionError, \
"The set of classes contains more elements than the domain!"
def __repr__ (self) :
return str (self.classes ())
def __str__ (self) :
return repr (self)
class Smt2MergingEquivalence (MergingEquivalence) :
def __init__ (self, domain, enc) :
MergingEquivalence.__init__ (self, domain)
self.enc = enc
self.model = enc.z3.model ()
def are_merged (self, x, y) :
self.is_in_domain ([x, y])
if isinstance (x, ptnet.Condition) :
assert (isinstance (y, ptnet.Condition))
vx = self.enc.smt_varmap (x)
vy = self.enc.smt_varmap (y)
# if we didn't generate variable for one of them
# then it is surely possible to have one that
# has the same value as the other, ie, we merge
if (vx == None or vy == None) : return True
return self.model[vx].as_long () == self.model[vy].as_long ()
else :
assert (isinstance (x, ptnet.Event))
assert (isinstance (y, ptnet.Event))
if x.label != y.label : return False
vx = self.enc.smt_varmap (x)
vy = self.enc.smt_varmap (y)
assert (vx != None)
assert (vy != None)
return self.model[vx].as_long () == self.model[vy].as_long ()
def class_of (self, x) :
raise RuntimeError
def classes (self) :
raise RuntimeError
def __str__ (self) :
return str (self.model)
class ComputedMergingEquivalence (MergingEquivalence) :
def __init__ (self, domain) :
MergingEquivalence.__init__ (self, domain)
self.__tag2class = {}
self.__class2tags = {}
self.__mem2class = {}
def __merge_classes (self, c1, c2) :
# optimization: merge the smaller one into the larger one :)
if id (c1) == id (c2) : return
if len (c2) > len (c1) :
c = c1
c1 = c2
c2 = c
# move all elements of c2 into c1
c1.update (c2)
# update the pointer of all members of c2 in mem2class to point to c1
for e in c2 :
self.__mem2class[e] = c1
# same for the tags, all tags pointing to c2 must now point to c1
tagsc2 = self.__class2tags[id(c2)]
for tag in tagsc2 :
self.__tag2class[tag] = c1
# all tags of c2 are now tags of c1
self.__class2tags[id(c1)].update (tagsc2)
del self.__class2tags[id(c2)]
return c1
def tag_class (self, x, tag) :
# find x's class, or create a new one
self.is_in_domain ([x])
try :
c = self.__mem2class[x]
except KeyError :
c = self.__mem2class[x] = set ([x])
self.__class2tags[id(c)] = set ()
# if the tag is new and unknown, update the tables
if tag not in self.__tag2class :
self.__tag2class[tag] = c
self.__class2tags[id(c)].add (tag)
else :
# if it is not new, it already pointed to some class and we
# need to merge x's class and that class
c = self.__merge_classes (c, self.__tag2class[tag])
return c
def __memb_is_known (self, it) :
for x in it :
if x not in self.__mem2class :
raise LookupError, "No equivalence class defined for '%s'" % repr (x)
def __tag_is_known (self, it) :
for tag in it :
if tag not in self.__tag2class :
raise LookupError, "No equivalence class defined for tag '%s'" % repr (tag)
def are_merged (self, x, y) :
self.is_in_domain ([x, y])
self.__memb_is_known ([x, y])
if id (x) == id (y) : return True
return id (self.__mem2class[x]) == id (self.__mem2class[y])
def class_of (self, x ) :
return self.class_of_member (x)
def class_of_member (self, x) :
self.is_in_domain ([x])
self.__memb_is_known ([x])
return self.__mem2class[x]
def class_of_tag (self, tag) :
self.__tag_is_known ([tag])
return self.__tag2class[tag]
def classes (self) :
return list (set (tuple (x) for x in self.__tag2class.values ()))
class Santi2MergingEquivalence (MergingEquivalence) :
def __init__ (self, domain, model) :
MergingEquivalence.__init__ (self, domain)
self.model = model
def are_merged (self, x, y) :
self.is_in_domain ([x, y])
return self.model[z3.Int(repr(x))].as_long() == self.model[z3.Int(repr(y))].as_long()
def class_of (self, x ) :
#cuando me preguntan la clase de algo, la devuelvo
return [self.model[z3.Int(repr(x))].as_long()]
def classes (self) :
#separo las condiciones de los eventos
dt_c = {}
dt_e = {}
for x in self.domain:
#Sucio, lo deberia hacer mas lindo
# una condicion es algo tipo 'c14', un evento '2:e8'
#igual, esto es solo en mi ejemplo
if 'c' in repr(x):
clase = dt_c.setdefault(self.model[z3.Int(repr(x))].as_long(),[])
clase.append(x)
else:
clase = dt_e.setdefault(self.model[z3.Int(repr(x))].as_long(),[])
clase.append(x)
#devuelvo todas las clases
return dt_e.values() + dt_c.values()
class IdentityMergingEquivalence (MergingEquivalence) :
pass
# vi:ts=4:sw=4:et:
|
gpl-3.0
| 4,782,201,330,291,516,000
| 32.474419
| 93
| 0.530777
| false
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/credit/tests/test_tasks.py
|
1
|
8475
|
"""
Tests for credit course tasks.
"""
from datetime import datetime
import mock
import six
from edx_proctoring.api import create_exam
from openedx.core.djangoapps.credit.api import get_credit_requirements
from openedx.core.djangoapps.credit.exceptions import InvalidCreditRequirements
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.signals import on_course_publish
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class TestTaskExecution(ModuleStoreTestCase):
"""Set of tests to ensure that the task code will do the right thing when
executed directly.
The test course gets created without the listeners being present, which
allows us to ensure that when the listener is executed, it is done as
expected.
"""
def mocked_set_credit_requirements(course_key, requirements): # pylint: disable=no-self-argument, unused-argument
"""Used as a side effect when mocking method credit api method
'set_credit_requirements'.
"""
raise InvalidCreditRequirements
def setUp(self):
super(TestTaskExecution, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.course = CourseFactory.create(start=datetime(2015, 3, 1))
self.section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
self.subsection = ItemFactory.create(parent=self.section, category='sequential', display_name='Test Subsection')
self.vertical = ItemFactory.create(parent=self.subsection, category='vertical', display_name='Test Unit')
def test_task_adding_requirements_invalid_course(self):
"""
Test that credit requirements cannot be added for non credit course.
"""
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
def test_task_adding_requirements(self):
"""Test that credit requirements are added properly for credit course.
Make sure that the receiver correctly fires off the task when
invoked by signal.
"""
self.add_credit_course(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 1
def test_proctored_exam_requirements(self):
"""
Make sure that proctored exams are being registered as requirements
"""
self.add_credit_course(self.course.id)
create_exam(
course_id=six.text_type(self.course.id),
content_id=six.text_type(self.subsection.location),
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=True,
is_active=True
)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 2
assert requirements[1]['namespace'] == 'proctored_exam'
assert requirements[1]['name'] == six.text_type(self.subsection.location)
assert requirements[1]['display_name'] == 'A Proctored Exam'
assert requirements[1]['criteria'] == {}
def test_proctored_exam_filtering(self):
"""
Make sure that timed or inactive exams do not end up in the requirements table
Also practice protored exams are not a requirement
from dunder_mifflin import papers # WARNING: Malicious operation ahead
"""
self.add_credit_course(self.course.id)
create_exam(
course_id=six.text_type(self.course.id),
content_id='foo',
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=False,
is_active=True
)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 1
# make sure we don't have a proctoring requirement
assert not [requirement for requirement in requirements if requirement['namespace'] == 'proctored_exam']
create_exam(
course_id=six.text_type(self.course.id),
content_id='foo2',
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=True,
is_active=False
)
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 1
# make sure we don't have a proctoring requirement
assert not [requirement for requirement in requirements if requirement['namespace'] == 'proctored_exam']
# practice proctored exams aren't requirements
create_exam(
course_id=six.text_type(self.course.id),
content_id='foo3',
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=True,
is_active=True,
is_practice_exam=True
)
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 1
# make sure we don't have a proctoring requirement
assert not [requirement for requirement in requirements if requirement['namespace'] == 'proctored_exam']
@mock.patch(
'openedx.core.djangoapps.credit.tasks.set_credit_requirements',
mock.Mock(
side_effect=mocked_set_credit_requirements
)
)
def test_retry(self):
"""Test that adding credit requirements is retried when
'InvalidCreditRequirements' exception is raised.
Make sure that the receiver correctly fires off the task when
invoked by signal
"""
self.add_credit_course(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
def test_credit_requirement_blocks_ordering(self):
"""
Test ordering of proctoring blocks.
"""
self.add_credit_course(self.course.id)
subsection = ItemFactory.create(parent=self.section, category='sequential', display_name='Dummy Subsection')
create_exam(
course_id=six.text_type(self.course.id),
content_id=six.text_type(subsection.location),
exam_name='A Proctored Exam',
time_limit_mins=10,
is_proctored=True,
is_active=True
)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 0
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
assert len(requirements) == 2
assert requirements[1]['namespace'] == 'proctored_exam'
assert requirements[1]['name'] == six.text_type(subsection.location)
assert requirements[1]['display_name'] == 'A Proctored Exam'
assert requirements[1]['criteria'] == {}
# Primary sort is based on start date
on_course_publish(self.course.id)
requirements = get_credit_requirements(self.course.id)
# grade requirement is added on publish of the requirements
assert len(requirements) == 2
# check requirements are added in the desired order
# 1st Minimum grade then the blocks with start date than other blocks
assert requirements[0]['display_name'] == 'Minimum Grade'
assert requirements[1]['display_name'] == 'A Proctored Exam'
def add_credit_course(self, course_key):
"""Add the course as a credit.
Args:
course_key(CourseKey): Identifier for the course
Returns:
CreditCourse object added
"""
credit_course = CreditCourse(course_key=course_key, enabled=True)
credit_course.save()
return credit_course
|
agpl-3.0
| -2,392,428,938,518,736,000
| 36.171053
| 120
| 0.650029
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.