code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int32 2 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
class MongoDBPipeline(object):
def __init__(self):
connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
db = connection[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_COLLECTION']]
"""
def process_item(self, item, spider):
for data in item:
if not data:
raise DropItem("Missing data!")
self.collection.update({'url': item['url']}, dict(item), upsert=True)
log.msg("Question added to MongoDB database!",
level=log.DEBUG, spider=spider)
return item
"""
def process_item(self, item, spider):
valid = True
for data in item:
if not data:
valid = False
raise DropItem("Missing {0}!".format(data))
if valid:
self.collection.insert(dict(item))
log.msg("Question added to MongoDB database!",
level=log.DEBUG, spider=spider)
return item
| pleycpl/CompareWebPageContent | tracker/tracker/pipelines.py | Python | gpl-3.0 | 1,358 |
import bpy
from ... base_types.node import AnimationNode
class IntersectLinePlaneNode(bpy.types.Node, AnimationNode):
bl_idname = "an_IntersectLinePlaneNode"
bl_label = "Intersect Line Plane"
bl_width_default = 160
def create(self):
self.newInput("Vector", "Line Start", "lineStart")
self.newInput("Vector", "Line End", "lineEnd", value = (0, 0, 1))
self.newInput("Vector", "Plane Point", "planePoint")
self.newInput("Vector", "Plane Normal", "planeNormal", value = (0, 0, 1))
self.newOutput("Vector", "Intersection", "intersection")
self.newOutput("Boolean", "Is Valid", "isValid")
def getExecutionCode(self):
isLinked = self.getLinkedOutputsDict()
if not any(isLinked.values()): return
yield "if planeNormal.length_squared == 0: planeNormal = Vector((0, 0, 1))"
yield "_intersection = mathutils.geometry.intersect_line_plane(lineStart, lineEnd, planePoint, planeNormal, False)"
yield "if _intersection is None:"
if isLinked["intersection"]: yield " intersection = Vector((0, 0, 0))"
if isLinked["isValid"]: yield " isValid = False"
yield "else:"
if isLinked["intersection"]: yield " intersection = _intersection"
if isLinked["isValid"]: yield " isValid = True"
def getUsedModules(self):
return ["mathutils"]
| Thortoise/Super-Snake | Blender/animation_nodes-master/nodes/geometry/intersect_line_plane.py | Python | gpl-3.0 | 1,400 |
#!/usr/bin/env python
"""Build script for MaTiSSe.py"""
import os
from pybuilder.core import Author, init, task, use_plugin
from shutil import copyfile
import re
use_plugin('python.core')
use_plugin('python.coverage')
use_plugin('python.flake8')
use_plugin('python.frosted')
use_plugin('python.install_dependencies')
use_plugin('python.pylint')
use_plugin('python.unittest')
__source__ = open('src/main/python/matisse/matisse.py').read()
authors = [Author(re.search(r'^__author__\s*=\s*"(.*)"', __source__, re.M).group(1),
re.search(r'^__author_email__\s*=\s*"(.*)"', __source__, re.M).group(1))]
version = re.search(r'^__version__\s*=\s*"(.*)"', __source__, re.M).group(1)
license = re.search(r'^__license__\s*=\s*"(.*)"', __source__, re.M).group(1)
description = re.search(r'^__description__\s*=\s*"(.*)"', __source__, re.M).group(1)
url = re.search(r'^__url__\s*=\s*"(.*)"', __source__, re.M).group(1)
@init
def initialize(project):
"""Initializing the building class."""
project.version = version
project.build_depends_on('coverage')
project.build_depends_on('flake8')
project.build_depends_on('frosted')
project.build_depends_on('pylint')
project.depends_on('markdown')
project.depends_on('yattag')
project.depends_on('pyyaml')
project.depends_on('dirsync')
project.set_property('flake8_max_line_length', 500)
project.set_property('verbose', True)
project.set_property('coverage_break_build', False)
project.set_property('coverage_threshold_warn', 90)
project.set_property('dir_target', 'release')
project.set_property('dir_dist', 'release/' + project.name + '-' + project.version)
project.set_property('dir_reports', 'release/reports-' + project.name + '-' + project.version)
project.default_task = ['analyze', 'publish', 'copy_resources']
return
@task
def copy_resources(project):
"""Copy non source resource files."""
copyfile('MANIFEST.in', 'release/' + project.name + '-' + project.version + '/MANIFEST.in')
for mdf in os.listdir('.'):
if mdf.endswith('.md'):
copyfile(mdf, 'release/' + project.name + '-' + project.version + '/' + mdf)
return
| szaghi/MaTiSSe | build.py | Python | gpl-3.0 | 2,139 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import datetime
from datetime import timedelta
import pytz as pytz
from icalendar import Calendar
class CalendarAnalyzer:
""" A Class for analyzing Calendar Events And deciding if power should be on """
def __init__(self):
nowtime = datetime.datetime
self.logger = logging.getLogger(__name__)
naive_now = nowtime.now()
timezone = pytz.timezone("Europe/Oslo")
self.initnow = timezone.localize(naive_now)
self.debug = False
def does_event_require_power(self, event):
logger = self.logger
logger.debug(str(event))
meeting_end = event['DTEND'].dt
meeting_start = event['DTSTART'].dt
self.now = self.initnow
onlydate = False
if self.debug:
print ("Meeting_start: " + str(type(meeting_start)) + str(meeting_start))
print ("Meeting_start-minus2: " + str(type(meeting_start - timedelta(hours=2))))
print ("Meeting_end: " + str(type(meeting_end)) + str(meeting_end))
print ("Self NOW: " + str(type(self.now)) + str(self.now))
if isinstance(meeting_start, datetime.datetime):
#Is meeting_start a datetime.datetime object
Checkthis = True
elif isinstance(meeting_start, datetime.date):
#Is meeting_start a datetime.date object
if self.debug:
print ("Meeting_start: " + str(type(meeting_start)) + str(meeting_start))
self.now = datetime.datetime.now().date()
if self.debug:
print ("Self NOW: " + str(type(self.now)) + str(self.now))
onlydate = True
else:
logger.debug(str(event))
logger.debug("Meeting_start: " + str(type(meeting_start)) + str(meeting_start))
return False
if onlydate:
if meeting_start <= self.now <= meeting_end :
return True
else:
# Møtet starter om litt lenge
return False
else:
if meeting_start - timedelta(hours=2) <= self.now <= meeting_end + timedelta(hours=2):
return True
else:
# Møtet starter om litt lenge
return False
def should_power_be_on(self, calendar: Calendar):
""" It should be on if.. meeting is on - or if meeting starts in two hours or less """
self.logger.info("Ser i kalender: " + calendar['X-WR-CALDESC'])
for event in calendar.walk('vevent'):
should_power_be_on = self.does_event_require_power(event)
if should_power_be_on:
return True
return False
| FarrisSR/Bedehus | Calendalyzer/CalendarAnalyzer.py | Python | gpl-3.0 | 2,729 |
import asyncio
from copy import copy
class AsyncioServer:
"""Generic TCP server based on asyncio.
Users of this class must derive from it and define the
``_handle_connection_cr`` method and coroutine.
"""
def __init__(self):
self._client_tasks = set()
@asyncio.coroutine
def start(self, host, port):
"""Starts the server.
The user must call ``stop`` to free resources properly after this
method completes successfully.
This method is a `coroutine`.
:param host: Bind address of the server (see ``asyncio.start_server``
from the Python standard library).
:param port: TCP port to bind to.
"""
self.server = yield from asyncio.start_server(self._handle_connection,
host, port)
@asyncio.coroutine
def stop(self):
"""Stops the server.
"""
wait_for = copy(self._client_tasks)
for task in self._client_tasks:
task.cancel()
for task in wait_for:
try:
yield from asyncio.wait_for(task, None)
except asyncio.CancelledError:
pass
self.server.close()
yield from self.server.wait_closed()
del self.server
def _client_done(self, task):
self._client_tasks.remove(task)
def _handle_connection(self, reader, writer):
task = asyncio.Task(self._handle_connection_cr(reader, writer))
self._client_tasks.add(task)
task.add_done_callback(self._client_done)
| fallen/artiq | artiq/protocols/asyncio_server.py | Python | gpl-3.0 | 1,595 |
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2015 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Archive commands echoing data, implemented by the Python print
statement."""
from __future__ import print_function
from .. import util
def list_bzip2 (archive, compression, cmd, verbosity, interactive):
"""List a BZIP2 archive."""
return stripext(cmd, archive, verbosity)
list_compress = \
list_lzma = \
list_xz = \
list_lzip = \
list_lrzip = \
list_rzip = \
list_bzip2
def list_ape (archive, compression, cmd, verbosity, interactive):
"""List an APE archive."""
return stripext(cmd, archive, verbosity, extension=".wav")
list_shn = \
list_flac = \
list_ape
def stripext (cmd, archive, verbosity, extension=""):
"""Print the name without suffix."""
if verbosity >= 0:
print(util.stripext(archive)+extension)
return None
| wummel/patool | patoolib/programs/py_echo.py | Python | gpl-3.0 | 1,486 |
import datetime
from time import sleep
from engine.async import ResultObj
__author__ = 'Denis Mikhalkin'
import os
import subprocess
import uuid
from collections import OrderedDict
from boto import sqs
import yaml
from pytz import utc
from apscheduler.jobstores.memory import MemoryJobStore
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ThreadPoolExecutor
import logging
DEFAULT_SUBSCRIBE_PERIOD = 15 # 1 minute in seconds
def get_class( kls ):
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
def is_integer(s):
try:
int(s)
return True
except ValueError:
return False
class Engine(object):
LOG = logging.getLogger("gears.Engine")
resourceManager = None
""":type ResourceManager"""
eventBus = None
""":type EventBus"""
handlerManager = None
""":type HandlerManager"""
scheduler = None
""":type Scheduler"""
def __init__(self, config):
self.LOG.info("Starting engine")
self.config = config
self.eventBus = EventBus(self)
self.scheduler = Scheduler(self)
self.resourceManager = ResourceManager(self)
self.handlerManager = HandlerManager(self)
if "repositoryPath" in config:
self.repository = Repository(self, config["repositoryPath"])
self.repository.scan()
self.LOG.info("Created")
def start(self):
self.resourceManager.start()
self.LOG.info("Started")
self.resourceManager.dump()
def stop(self):
self.scheduler.stop()
class HandlerManager(object):
LOG = logging.getLogger("gears.HandlerManager")
handlers = dict()
def __init__(self, engine):
self._engine = engine
self._eventBus = engine.eventBus
self._resourceManager = engine.resourceManager
self._eventBus.subscribe(lambda eventName, resource, payload: True, self.handleEvent)
self.LOG.info("Created")
def registerSubscribe(self, handler, condition):
self.LOG.info("registerSubscribe: " + str(condition))
self._addHandler("subscribe", {"handler": handler, "condition": DelegatedEventCondition("subscribe", condition)})
def registerRegister(self, handler, condition):
self.LOG.info("registerRegister: " + str(condition))
self._addHandler("register", {"handler": handler, "condition": DelegatedEventCondition("register", condition)})
def registerActivate(self, handler, condition):
self.LOG.info("registerActivate: " + str(condition))
self._addHandler("activate", {"handler": handler, "condition": DelegatedEventCondition("activate", condition)})
def registerOn(self, handler, condition):
self.LOG.info("registerOn: " + str(condition))
self._addHandler(condition.eventName, {"handler": handler, "condition": condition})
self._eventBus.publish("subscribe", condition, payload={"eventName": condition.eventName})
def registerHandler(self, handler):
if handler is None: return
if type(handler) == str:
handler = self.createHandler(handler)
eventNames = handler.getEventNames()
for eventName in eventNames:
condition = handler.getEventCondition(eventName)
if eventName == "subscribe":
self.registerSubscribe(handler, condition)
elif eventName == "register":
self.registerRegister(handler, condition)
elif eventName == "activate":
self.registerActivate(handler, condition)
elif not Handler.isActionHandler(eventName):
self.registerOn(handler, condition)
def _addHandler(self, event, bundle):
if event not in self.handlers:
self.handlers[event] = [bundle]
else:
self.handlers[event].append(bundle)
def getHandlers(self, eventName, resource):
if eventName not in self.handlers:
return list()
return [bundle["handler"] for bundle in self.handlers[eventName] if bundle["condition"].matchesEvent(eventName, resource)]
def handleEvent(self, eventName, resource, payload):
self.LOG.info("handleEvent(eventName=%s, resource=%s, payload=%s)" % (eventName, resource, payload))
handlers = self.getHandlers(eventName, resource)
if len(handlers) == 0:
self.LOG.info("-> No handlers for this event")
return True
result = True
for handler in handlers:
try:
if type(handler) == type(str.lower) or str(type(handler)) == "<type 'function'>": # Function
handlerResult = handler(eventName, resource, payload)
else:
handlerResult = handler.handleEvent(eventName, resource, payload)
result = result and (True if handlerResult is None else handlerResult)
except:
self.LOG.exception("-> error invoking handler")
result = False
return result
def createHandler(self, handlerClass):
return get_class(handlerClass)(self._engine)
class ResourceManager(object):
LOG = logging.getLogger("gears.ResourceManager")
_resources = dict()
# add, update, remove - raise events
def __init__(self, engine):
self._engine = engine
self._eventBus = engine.eventBus
self.root = Resource("root", "root", None)
self.LOG.info("Created")
def addResource(self, resource):
def onRegistered():
def onActivated():
if resource.isState("REGISTERED"):
resource.toState("ACTIVATED")()
resource.toState("REGISTERED")()
if resource.isState("REGISTERED") and resource.parentResource is not None and resource.parentResource.isActive():
self._engine.eventBus.publish("activate", resource) \
.success(onActivated) \
.failure(resource.toState("FAILED"))
self.LOG.info("addResource(%s)" % resource)
if self.registerResource(resource):
self.raiseEvent("register", resource) \
.success(onRegistered) \
.failure(resource.toState("FAILED"))
else:
self.LOG.warn("Unable to register resource: %s" % resource)
def registerResource(self, resource):
if resource.name not in self._resources:
resource.engine = self._engine
self._resources[resource.name] = resource
if hasattr(resource, "altName") and resource.altName is not None and not resource.name == resource.altName:
self._resources[resource.altName] = resource
if hasattr(resource, "behavior"):
if type(resource.behavior) is list:
for behavior in resource.behavior:
self._engine.handlerManager.registerHandler(behavior)
else:
self._engine.handlerManager.registerHandler(resource.behavior)
if resource.parent is not None:
if resource.parentResource is None:
parentResource = self.getResource(resource.parent)
if parentResource is not None:
parentResource.addChild(resource)
resource.parentResource = parentResource
else:
resource.toState("FAILED")()
else:
resource.parentResource.addChild(resource)
else:
if resource.parentResource is not None:
resource.parentResource.addChild(resource)
return True
return False
# condition is resource condition (the "matches" contract)
def getMatchingResources(self, condition):
return [resource for resource in self._resources.values() if condition.matches(resource)]
def raiseEvent(self, eventName, resource):
return self._eventBus.publish(eventName, resource)
def dump(self):
print "Resources:"
for (key, resource) in self._resources.items():
print key, "\t", resource
def start(self):
self.installHandlers()
self.registerResource(self.root)
self.root.toState("REGISTERED")()
self.root.toState("ACTIVATED")()
def installHandlers(self):
def activateHandler(eventName, resource, payload):
def onActivated(subResource):
def transition():
if subResource.isState("REGISTERED"):
subResource.toState("ACTIVATED")()
return transition
self.LOG.info("Activate handler on " + str(resource))
for child in resource.children:
if child.isState("REGISTERED"):
try:
self._engine.eventBus.publish("activate", child) \
.success(onActivated(child)) \
.failure(child.toState("FAILED"))
except:
self.LOG.exception("Exception activating " + child)
# Continue with other children
return True
self._engine.handlerManager.registerOn(activateHandler, EventCondition("activated"))
def getResource(self, path):
return self._resources[path] if path in self._resources else None
class Scheduler(object):
LOG = logging.getLogger("gears.Scheduler")
def __init__(self, engine):
self.engine = engine
jobstores = {
'default': MemoryJobStore()
}
executors = {
'default': ThreadPoolExecutor(1),
}
job_defaults = {
'coalesce': False,
'max_instances': 1
}
self.scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)
self.scheduler.start()
def schedule(self, name, callback, periodInSeconds):
self.LOG.info("schedule(%s,%s)" % (name, str(periodInSeconds)))
return self.scheduler.add_job(callback, IntervalTrigger(seconds=periodInSeconds))
def unschedule(self, job):
self.scheduler.remove_job(job.id)
def stop(self):
self.scheduler.shutdown()
class Condition(object):
def matches(self, obj):
return False
class ResourceCondition(Condition):
def __init__(self, resourceType=None, resourceName=None):
self.resourceType = resourceType
self.resourceName = resourceName
def matches(self, resource):
if self.resourceType is not None:
if resource is None: return False
res = self.resourceType == resource.type
if not res: return False
if self.resourceName is not None:
res = self.resourceName == resource.name
if not res: return False
if hasattr(self, "parent"):
res = self.parent == (resource.parent.type if resource.parent is not None else None)
if not res: return False
if hasattr(self, "ancestor"):
res = resource.getAncestorByType(self.ancestor) is not None
if not res: return False
# Fallthrough
return True
def __str__(self):
return "ResourceCondition(type=%s, name=%s)" % (self.resourceType, self.resourceName)
class DelegatedEventCondition(Condition):
def __init__(self, eventName=None, resourceCondition=None):
self._resourceCondition = resourceCondition
self._eventName = eventName
def matchesEvent(self, eventName, resource):
res = self._eventName == eventName
if not res: return False
return self.matches(resource)
def matches(self, resource):
return self._resourceCondition.matches(resource) if self._resourceCondition is not None else False
def __str__(self):
return "DelegatedEventCondition(event=%s, type=%s, name=%s)" % (self._eventName, self._resourceCondition.resourceType, self._resourceCondition.resourceName)
class EventCondition(ResourceCondition):
def __init__(self, eventName=None, resourceType=None, resourceName=None):
ResourceCondition.__init__(self, resourceType, resourceName)
self.eventName = eventName
def matchesEvent(self, eventName, resource):
res = self.eventName == eventName
if not res: return False
return self.matches(resource)
def __str__(self):
return "EventCondition(event=%s, type=%s, name=%s)" % (self.eventName, self.resourceType, self.resourceName)
class Resource(object):
STATES = {"INVALID": {"name":"INVALID", "order":-2},
"FAILED":{"name":"FAILED", "order": -1},
"ADDED": {"name":"ADDED", "order": 0},
"REGISTERED": {"name":"REGISTERED", "order":1},
"PENDING_ACTIVATION":{"name":"PENDING_ACTIVATION", "order":2},
"ACTIVATED":{"name":"ACTIVATED", "order":3} }
type = ""
name = "" # Unique name of the resource (essentially - ID)
parentResource = None
parent = None
children = list()
engine = None
def __init__(self, name, resourceType, parent, desc=None, raisesEvents=list(), altName=None, behavior=None):
self.name = name
self.type = resourceType
if type(parent) == str:
self.parent = parent
elif parent is not None:
self.parentResource = parent
self.parent = parent.name
self.desc = desc
self.behavior = behavior
self.raisesEvents = raisesEvents
self.altName = altName
self.state = self.STATES["INVALID"]
self.dynamicState = {}
def waitForState(self, targetState, timeout=None):
count = 0
while not self.state["name"] == targetState:
sleep(1)
count += 1
if timeout is not None and count > timeout:
break
def toState(self, newState):
def empty():
pass
if not newState in self.STATES:
return empty
def transition():
self.state = self.STATES[newState]
self.raiseEvent(newState.lower())
return transition
def raiseEvent(self, eventName):
self.engine.eventBus.publish(eventName, self)
def addChild(self, child):
self.children.append(child)
def isActive(self):
return self.state["order"] >= self.STATES["ACTIVATED"]["order"]
def isState(self, stateName):
return self.state == self.STATES[stateName]
def getAncestorByType(self, type):
parent = self.parent
while parent is not None:
if parent.type == type:
return parent
parent = parent.parent
return None
def __str__(self):
return "Resource(type=%s, name=%s, parent=%s, state=%s, dynamicState=%s)" % (self.type, self.name, self.parent, self.state, self.dynamicState)
class EventBus(object):
LOG = logging.getLogger("gears.EventBus")
_listeners = OrderedDict()
_eventsSuspended = False
_recordedEvents = list()
def __init__(self, engine):
self._engine = engine
self.LOG.info("Created")
pass
def publish(self, eventName, resource, payload = None, resultObject = None):
if self._eventsSuspended:
self.LOG.info("publish suspended(event=%s, resource=%s, payload=%s)" % (eventName, resource, payload))
delayed = ResultObj()
self._recordedEvents.append((eventName, resource, payload, delayed))
return delayed
self.LOG.info("publish(event=%s, resource=%s, payload=%s)" % (eventName, resource, payload))
if issubclass(type(resource), Condition):
resource = self._engine.resourceManager.getMatchingResources(resource)
if type(resource) == list:
chained = ResultObj(True)
for res in resource:
chained = chained.append(self.publish(eventName, res, payload))
return resultObject.append(chained).trigger() if resultObject is not None else chained.trigger()
result = True
for obj in self._listeners.values():
if obj["condition"](eventName, resource, payload):
try:
result = result and obj["callback"](eventName, resource, payload)
except:
self.LOG.exception("-> error calling callback")
pass
if resultObject is not None:
return resultObject.trigger(result)
else:
return ResultObj(result)
def subscribe(self, condition, callback):
self.LOG.info("subscribe(condition=%s)" % str(condition))
if condition is None or callback is None:
return
subscriptionId = uuid.uuid4()
self._listeners[subscriptionId] = {"condition": condition, "callback": callback, "id": subscriptionId}
return subscriptionId
def unsubscribe(self, subscriptionId):
del self._listeners[subscriptionId]
def suspendEvents(self):
self._eventsSuspended = True
def resumeEvents(self):
self._eventsSuspended = False
while len(self._recordedEvents) > 0:
(eventName, resource, payload, delayed) = self._recordedEvents.pop()
self.publish(eventName, resource, payload, delayed)
class Handler(object):
LOG = logging.getLogger("gears.handlers.Handler")
def handleEvent(self, eventName, resource, payload):
if eventName == "register":
return self.handleRegister(resource, payload)
elif eventName == "subscribe":
return self.handleSubscribe(resource, payload)
else:
self.LOG.error("Unhandled event %s on %s with %s" % (eventName, resource, payload))
return True
def handleRegister(self, resource, payload):
return True
def handleSubscribe(self, resource, payload):
self.LOG.error("Unhandled 'subscribe' on %s with %s" % (resource, payload))
return True
def getEventCondition(self, eventName):
raise Exception("Not implemented")
def getEventNames(self):
raise Exception("Not implemented")
@staticmethod
def isActionHandler(eventName):
return eventName in ["run", "register", "update", "delete", "activate"]
class Repository(object):
LOG = logging.getLogger("gears.Repository")
def __init__(self, engine, repositoryPath):
self._repositoryPath = repositoryPath
self._engine = engine
def scan(self):
from engine.handlers import FileHandler
from engine.resources import FileResource
self.LOG.info("Scanning %s" % self._repositoryPath)
self._engine.eventBus.suspendEvents()
try:
for dirName, subdirList, fileList in os.walk(self._repositoryPath):
for fileName in fileList:
fullPath = os.path.join(dirName, fileName)
if not FileHandler.isHandler(fileName):
self._engine.resourceManager.addResource(FileResource(fullPath))
else:
self._engine.handlerManager.registerHandler(FileHandler(self._engine, fullPath))
finally:
self.LOG.info("Finished scanning - resuming events")
self._engine.eventBus.resumeEvents()
| denismo/DevOpsGears | engine/__init__.py | Python | gpl-3.0 | 19,681 |
import xml.etree.ElementTree as ET
import maya.cmds as cmds
import os
from sets import Set
class core(object):
''' pass '''
def __init__(self):
pass
def maya_ConfirmDialog(self,title='Confirm Dialog',message='Are you sure?'):
''' Create custom confirm dialog template '''
_tempConfDialog = cmds.confirmDialog(
title=title,
message=message,
button=['Yes','No'],
defaultButton='Yes',
cancelButton='No',
dismissString='No'
)
return _tempConfDialog
def Load_recentProject(self):
xml_path = os.path.join( ENV_PATH.WORKINGSAPCE_PATH, ENV_PATH.XML_FILE_NAME )
#print ('DEBUG: XAML path = '+xml_path)
tree = ET.parse(xml_path)
root = tree.getroot()
recentProject=[]
for project in root.findall('project'):
myid = project.find('recent').text
path = project.find('path').text
name = project.get('name')
if myid == 'yes':
recentProject.append(myid)
recentProject.append(name)
recentProject.append(path)
return (recentProject)
def Load_mayaDir ( self ):
''' Load only maya folder in requied directory. '''
#Connect XML database
xml_path = os.path.join(ENV_PATH.WORKINGSAPCE_PATH,ENV_PATH.XML_FILE_NAME)
root = ET.parse(xml_path)
MAYA_myProject={}
for project in root.iter('project') :
name = project.get('name')
pjID = project.find('id').text
#print ('>>>'+str(name))
MAYA_myProject.update( {int(pjID):name} )
return (MAYA_myProject)
class ENV_PATH:
WORKINGSAPCE_PATH_tmp = os.path.dirname(os.path.abspath(__file__))
WORKINGSAPCE_PATH = WORKINGSAPCE_PATH_tmp.replace('\\','/')
XML_FILE_NAME = 'ProjectDB.xml'
def __init__(self):
pass;
class XML_mod (object):
''' pass '''
def __init__(self, *args):
pass;
def Check_XML_exists (self,XML_PATH=''):
''' check XML database file exists? '''
#print ('DEBUG|XML_PATH : '+XML_PATH)
if len(XML_PATH)< 1:
cmds.error("\'XML_PATH\' NOT SET!!")
if os.path.exists(XML_PATH)!= True:
#cmds.error("\'XML_PATH\' ERROR Please check \'XML_PATH\' or your xml file")
tempConfDialog = core().maya_ConfirmDialog(
title='Cannot find you Project database file',
message='You want to create new XML file in current directory?')
if tempConfDialog == 'Yes':
self.Create_XMlfile()
else :
pass;
return XML_PATH;
def Create_XMlfile(self,*args):
''' Create new XML file in Working directory as database '''
filename = ENV_PATH.XML_FILE_NAME
filename_tmp = ''
if os.path.exists( os.path.join(ENV_PATH.WORKINGSAPCE_PATH, filename) ) is True:
count = 1
file_newname = ''
while (os.path.exists(os.path.join(ENV_PATH.WORKINGSAPCE_PATH, filename)) == True ):
print ('DEBUG|Create_XMlfile : '+os.path.join(ENV_PATH.WORKINGSAPCE_PATH, filename)+ " is exists")
file_newname = filename+str(count)
count = count+1
filename_tmp = file_newname
filename = filename_tmp
print ('Create_XMlfile|tmp : '+os.path.join(ENV_PATH.WORKINGSAPCE_PATH, filename + '.xml'))
print ('Create_XMlfile|filename :'+ENV_PATH.XML_FILE_NAME)
tmp = open(os.path.join(ENV_PATH.WORKINGSAPCE_PATH, filename),'a')
tmp.write('<data>\n</data>')
tmp.close()
#add new Project
#self.add_XML_project(name=raw_input(),path=cmds.fileDialog2(fileMode=3,dialogStyle=2,caption = 'select project'),recentPrj=1)
#set to recent project
def add_XML_project(self,path,name='default',recentPrj=0):
''' Add new project to database '''
if len(path)<1 :
cmds.error('Path not set')
else :
path= path[0]
if len(name)<1:
cmds.error('name not set')
xml_path = os.path.join(ENV_PATH.WORKINGSAPCE_PATH,ENV_PATH.XML_FILE_NAME)
print ('------------------------------------------')
print ('DEBUG|XML_MOD.add_XML_project.xml_path : ' +xml_path)
#XML file parsing...
tree = ET.parse(xml_path)
root = tree.getroot()
#Prepare variable for loop
id_list = []
#Loop for get all id in XML find for finding gap
for myID in root.iter('id'):
#print ('DEBUG|add_XML_project|myID.text : '+myID.text)
id_list.append(myID.text)
if len(id_list) > 1 :
#Sorting id and fill id gap
#technique use: Set() compare
id_list = sorted(id_list)
id_list = map(int, id_list)
a = range(1, id_list[len(id_list)-1]+1 )
a= Set(a)
set_id = Set(id_list)
tmp =list(a-set_id) # find what difference between range and id list gaped.
if len(tmp)<1:
myID = id_list[(len(id_list)-1)]+1
else :
myID = tmp[0]
#print ('DEBUG|add_XML_project : '+a+' , '+set_id+' , '+tmp+' , '+myID)
#------------------
#print ('DEBUG|add_XML_project : project ID added>> '+ str(myID))
else :
myID = '0'
#Adding data to XML file
project = ET.Element('project')
project.set('name', name)
root.append(project)
prj_id = ET.SubElement(project,'id' )
prj_id.text = str(myID)
prj_path = ET.SubElement(project,'path')
prj_path.text = path
prj_recent = ET.SubElement(project,'recent')
if recentPrj == 1 :
for project in tree.findall('project'):
for myRecentStatus in project.findall('recent') :
if myRecentStatus.text == 'yes':
myRecentStatus.text = 'no'
prj_recent.text = 'yes'
else :
prj_recent.text = 'no'
tree.write(xml_path)
#print ('DEBUG|XML updated : '+xml_path)
if cmds.window('myProjectLoaderWin',exists=True):
myDir = core().Load_mayaDir()
Prjname =[]
for i ,j in myDir.items() :
Prjname.append(j)
cmds.textScrollList('ProjectName_TextScrollList',edit=True, removeAll=True )
cmds.textScrollList('ProjectName_TextScrollList',edit=True, append = Prjname )
def xml_delete_project(self,targetID):
xml_path = os.path.join(ENV_PATH.WORKINGSAPCE_PATH,ENV_PATH.XML_FILE_NAME)
tree = ET.parse(xml_path)
root = tree.getroot()
for project in tree.findall('project'):
for myID in project.findall('id') :
myIDvale = int(myID.text)
if myIDvale == int(targetID):
print ('DEBUG|delete : '+project.get('name'))
root.remove(project)
tree.write(xml_path)
if cmds.window('myProjectLoaderWin',exists=True):
myDir = core().Load_mayaDir()
Prjname =[]
for i ,j in myDir.items() :
Prjname.append(j)
cmds.textScrollList('ProjectName_TextScrollList',edit=True, removeAll=True )
cmds.textScrollList('ProjectName_TextScrollList',edit=True, append = Prjname )
def xml_update_project(self,pjID):
''' description '''
xml_path = os.path.join(ENV_PATH.WORKINGSAPCE_PATH,ENV_PATH.XML_FILE_NAME)
#print ('------------------------------------------')
#XML file parsing...
tree = ET.parse(xml_path)
root = tree.getroot()
for projectRecentStatus in root.iter('recent'):
#print str(projectRecentStatus.text)
if projectRecentStatus.text == 'yes':
projectRecentStatus.text = 'no'
#print ('DEBUG : set old recent project to <no>')
for project in tree.findall('project'):
for myID in project.findall('id') :
myIDvalue = int(myID.text)
if int(myIDvalue) == int(pjID):
#print ('DEBUG : '+str(myID.text)+' was matched !!')
for recent in project.findall('recent'):
recent.text = 'yes'
tree.write(xml_path)
if __name__ == '__main__':
pass
| Shayen/SAL-MAYA-ProjectManager | salprojectmanager/salCore.py | Python | gpl-3.0 | 9,582 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import socket
import select
import time
import os
from DIRAC.Core.DISET.private.Transports.BaseTransport import BaseTransport
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
class PlainTransport(BaseTransport):
def initAsClient(self):
timeout = None
if 'timeout' in self.extraArgsDict:
timeout = self.extraArgsDict['timeout']
try:
self.oSocket = socket.create_connection(self.stServerAddress, timeout)
except socket.error as e:
if e.args[0] != 115:
return S_ERROR("Can't connect: %s" % str(e))
#Connect in progress
oL = select.select([], [self.oSocket], [], self.extraArgsDict['timeout'])[1]
if len(oL) == 0:
self.oSocket.close()
return S_ERROR("Connection timeout")
errno = self.oSocket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if errno != 0:
return S_ERROR("Can't connect: %s" % str((errno, os.strerror(errno))))
self.remoteAddress = self.oSocket.getpeername()
return S_OK(self.oSocket)
def initAsServer(self):
if not self.serverMode():
raise RuntimeError("Must be initialized as server mode")
try:
self.oSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
except socket.error:
# IPv6 is probably disabled on this node, try IPv4 only instead
self.oSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.bAllowReuseAddress:
self.oSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.oSocket.bind(self.stServerAddress)
self.oSocket.listen(self.iListenQueueSize)
return S_OK(self.oSocket)
def close(self):
gLogger.debug("Closing socket")
try:
self.oSocket.shutdown(socket.SHUT_RDWR)
except Exception:
pass
self.oSocket.close()
def setClientSocket(self, oSocket):
if self.serverMode():
raise RuntimeError("Mustbe initialized as client mode")
self.oSocket = oSocket
if 'timeout' in self.extraArgsDict:
self.oSocket.settimeout(self.extraArgsDict['timeout'])
self.remoteAddress = self.oSocket.getpeername()
def acceptConnection(self):
# HACK: Was = PlainTransport( self )
oClientTransport = PlainTransport(self.stServerAddress)
oClientSocket, stClientAddress = self.oSocket.accept()
oClientTransport.setClientSocket(oClientSocket)
return S_OK(oClientTransport)
def _read(self, bufSize=4096, skipReadyCheck=False):
start = time.time()
timeout = False
if 'timeout' in self.extraArgsDict:
timeout = self.extraArgsDict['timeout']
while True:
if timeout:
if time.time() - start > timeout:
return S_ERROR("Socket read timeout exceeded")
try:
data = self.oSocket.recv(bufSize)
return S_OK(data)
except socket.error as e:
if e.errno == 11:
time.sleep(0.001)
else:
return S_ERROR("Exception while reading from peer: %s" % str(e))
except Exception as e:
return S_ERROR("Exception while reading from peer: %s" % str(e))
def _write(self, buf):
sentBytes = 0
timeout = False
if 'timeout' in self.extraArgsDict:
timeout = self.extraArgsDict['timeout']
if timeout:
start = time.time()
while sentBytes < len(buf):
try:
if timeout:
if time.time() - start > timeout:
return S_ERROR("Socket write timeout exceeded")
sent = self.oSocket.send(buf[sentBytes:])
if sent == 0:
return S_ERROR("Connection closed by peer")
if sent > 0:
sentBytes += sent
except socket.error as e:
if e.errno == 11:
time.sleep(0.001)
else:
return S_ERROR("Exception while sending to peer: %s" % str(e))
except Exception as e:
return S_ERROR("Error while sending: %s" % str(e))
return S_OK(sentBytes)
def checkSanity(*args, **kwargs):
return S_OK({})
def delegate(delegationRequest, kwargs):
"""
Check delegate!
"""
return S_OK()
| yujikato/DIRAC | src/DIRAC/Core/DISET/private/Transports/PlainTransport.py | Python | gpl-3.0 | 4,187 |
import sqlite3
# Creates or opens a file called mydb with a SQLite3 DB
db = sqlite3.connect('myTestdb.db')
curser = db.cursor()
def Create():
curser.execute('''
CREATE TABLE program(
id INTEGER PRIMARY KEY,
name TEXT,
net TEXT,
screenlocation TEXT)
''')
db.commit()
def add_entry(Sname,Snet,Sscreenloc):
curser.execute('''INSERT INTO program(name, net, screenlocation) VALUES(?,?,?)''', (Sname, Snet, Sscreenloc))
db.commit()
#Create()
print("executing")
add_entry('teddy', 'true', 'home/teddy/baer')
add_entry('rolf2', 'false','home/rolf/screen.png')
add_entry('lol', 'false','/ich/war/hier')
add_entry('geht','true','Loooks')
print('get all with teddy')
teddy = ('teddy',)
curser.execute('''SELECT ? FROM program WHERE name=? ''',('screenlocation', 'teddy'))
out = curser.fetchone()
print(out[0])
#for row in curser.execute('''SELECT screenlocation FROM program WHERE name=? ''', teddy):
# for i in row:
# print(i)
# break
db.close() | SiebenCorgie/BELL_CustomPackageFramework | reference/DatabaseLearning.py | Python | gpl-3.0 | 1,001 |
def main():
name = input('Your name please? ').strip()
age = int(input('Your age please: ').strip())
reddit = input('Your reddit username please: ').strip()
print('your name is {}, you are {} years old, and your username is {}'.format(name, age, reddit))
#bonus
#import os
filename = os.path.abspath(os.path.join('.', 'data', 'reddituser.log'))
how = 'w'
if os.path.exists(filename):
how = 'a'
with open(filename, how) as fout:
fout.write('{}|{}|{}\n'.format(name, age, reddit))
print('reddituser.log updated!')
| GHMusicalCoder/reddit_daily_challenges | easy/E_challenge_1.py | Python | gpl-3.0 | 572 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# (c) Lankier mailto:lankier@gmail.com
import re
import BeautifulSoup
import sgmllib
from utils import print_log
##
## Customizing SGML/HTML parser
##
class StartTagOpen:
tag = re.compile('<[a-z][-a-z]*')
def match(self, rawdata, i):
if rawdata.startswith("<FictionBook", i):
return True
m = self.tag.match(rawdata, i)
if not m:
return False
return m.group()[1:] in FB2Parser.NESTABLE_TAGS
sgmllib.starttagopen = StartTagOpen()
sgmllib.tagfind = re.compile('[a-zA-Z][-a-zA-Z]*')
if 1:
# xml attribute, should be: key="value" or key = 'value'
sgmllib.attrfind = re.compile(
r'\s*([a-z][-:.a-z0-9]*)' # key
r'(\s*=\s*(' # =
r'\'[^\']*\'|"[^"]*"))' # "value"
)
##
## Main FB2 parser
##
class FB2Parser(BeautifulSoup.BeautifulStoneSoup):
NESTABLE_TAGS = {
'FictionBook': ['[document]'],
'a': ['subtitle', 'text-author', 'p', 'v'],
'annotation': ['title-info', 'src-title-info', 'section'],
'author': ['title-info', 'src-title-info', 'document-info'],
'binary': ['FictionBook'],
'body': ['FictionBook'],
'book-name': ['publish-info'],
'book-title': ['title-info', 'src-title-info'],
'cite': ['epigraph', 'annotation', 'section', 'history'],
'city': ['publish-info'],
'code': ['subtitle', 'text-author', 'p', 'v'],
'coverpage': ['title-info', 'src-title-info'],
'custom-info': ['description'],
'date': ['title-info', 'src-title-info', 'document-info'],
'description': ['FictionBook'],
'document-info': ['description'],
'email': ['author', 'translator'],
'emphasis': ['subtitle', 'text-author', 'p', 'v'],
'empty-line': ['section', 'annotation', 'epigraph', 'title', 'cite', 'history'],
'epigraph': ['body', 'title', 'poem', 'section'],
'first-name': ['author', 'translator'],
'genre': ['title-info', 'src-title-info'],
'history': ['document-info'],
'home-page': ['author', 'translator'],
'id': ['author', 'translator', 'document-info'],
'image': ['body', 'subtitle', 'text-author', 'p', 'v', 'a', 'coverpage', 'section'],
'isbn': ['publish-info'],
'keywords': ['title-info', 'src-title-info'],
'lang': ['title-info', 'src-title-info'],
'last-name': ['author', 'translator'],
'middle-name': ['author', 'translator'],
'nickname': ['author', 'translator'],
'p': ['annotation', 'title', 'section', 'cite', 'epigraph', 'history'],
'poem': ['annotation', 'title', 'section', 'cite', 'epigraph', 'history'],
'program-used': ['document-info'],
'publish-info': ['description'],
'publisher': ['publish-info'],
'section': ['body', 'section'],
'sequence': ['title-info', 'src-title-info', 'publish-info'],
'src-lang': ['title-info', 'src-title-info'],
'src-ocr': ['document-info'],
'src-title-info': ['description'],
'src-url': ['document-info'],
'stanza': ['poem'],
'strikethrough': ['subtitle', 'text-author', 'p', 'v'],
'strong': ['subtitle', 'text-author', 'p', 'v'],
'style': ['subtitle', 'text-author', 'p', 'v'],
'stylesheet': ['FictionBook'],
'sub': ['subtitle', 'text-author', 'p', 'v', 'a'],
'subtitle': ['annotation', 'title', 'section', 'cite', 'epigraph', 'stanza', 'history'],
'sup': ['subtitle', 'text-author', 'p', 'v', 'a'],
'table': ['cite', 'annotation', 'section', 'history'],
'td': ['table'],
'text-author': ['cite', 'epigraph', 'poem', 'annotation'],
'th': ['table'],
'title': ['body', 'section', 'poem'],
'title-info': ['description'],
'tr': ['table'],
'translator': ['title-info', 'src-title-info'],
'v': ['stanza'],
'version': ['document-info'],
'year': ['publish-info'],
}
SELF_CLOSING_TAGS = BeautifulSoup.buildTagMap(
None,
['image', 'empty-line', 'sequence'])
# removed invalid xml chars
rmchars = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1f]+', re.U)
def parse_starttag(self, i):
j = self.rawdata.find('>', i)
if j > 0:
tag = self.rawdata[i:j].strip()
if tag.endswith('/'): # self closing tag (i.e. <empty-line />)
tag = tag[:-1].strip()
if ' ' in tag and '=' not in tag:
# bad attribute
self.handle_data(self.rawdata[i:j+1])
return j+1
return sgmllib.SGMLParser.parse_starttag(self, i)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print 'unknown_starttag:', repr(name)
if name == 'fictionbook':
# sgmllib workaround
name = 'FictionBook'
if name not in self.NESTABLE_TAGS:
# unknown tag
print_log('unknown start tag:', name, level=2)
#attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
attrs = ' '.join(y for x, y in attrs)
self.handle_data('<%s %s>' % (name, attrs))
return
BeautifulSoup.BeautifulStoneSoup.unknown_starttag(self, name, attrs,
selfClosing)
def unknown_endtag(self, name):
if name == 'fictionbook':
# sgmllib workaround
name = 'FictionBook'
if name not in self.NESTABLE_TAGS:
# unknown tag
print_log('unknown end tag:', name, level=2)
self.handle_data('</%s>' % name)
return
BeautifulSoup.BeautifulStoneSoup.unknown_endtag(self, name)
def finish_starttag(self, tag, attrs):
if attrs:
if 'l:href' in (a[0] for a in attrs):
# fix l:href namespace
try:
self.FictionBook['xmlns:l']
except KeyError:
self.FictionBook['xmlns:l'] = 'http://www.w3.org/1999/xlink'
if 'xlink:href' in (a[0] for a in attrs):
# fix xlink:href namespace
try:
self.FictionBook['xmlns:xlink']
except KeyError:
self.FictionBook['xmlns:xlink'] = 'http://www.w3.org/1999/xlink'
if tag not in self.NESTABLE_TAGS:
self.unknown_starttag(tag, attrs)
return -1
return sgmllib.SGMLParser.finish_starttag(self, tag, attrs)
def finish_endtag(self, tag):
if tag not in self.NESTABLE_TAGS:
self.unknown_endtag(tag)
return
sgmllib.SGMLParser.finish_endtag(self, tag)
def endData(self, containerClass=BeautifulSoup.NavigableString):
#print 'endData', self.currentData
if self.currentData:
d = []
for s in self.currentData:
s = (s
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
)
s = self.rmchars.sub('', s)
d.append(s)
self.currentData = d
BeautifulSoup.BeautifulStoneSoup.endData(self, containerClass)
def parse_pi(self, i):
# skip "<?"
rawdata = self.rawdata
if rawdata[i:i+5] != '<?xml':
self.handle_data(rawdata[i:i+2])
return 2
return sgmllib.SGMLParser.parse_pi(self, i)
| jn0/fb2utils | fb2utils/parser.py | Python | gpl-3.0 | 7,624 |
from __future__ import print_function
import jmbitcoin as btc
from jmclient import estimate_tx_fee
from twisted.internet import reactor, task
from txjsonrpc.web.jsonrpc import Proxy
from txjsonrpc.web import jsonrpc
from twisted.web import server
from .configure import get_log
from decimal import Decimal
import binascii
import time
import os
import random
import abc
import sys
from pprint import pformat
import json
from coinswap import (CoinSwapException, CoinSwapPublicParameters,
CoinSwapParticipant, CoinSwapTX, CoinSwapTX01,
CoinSwapTX23, CoinSwapTX45, CoinSwapRedeemTX23Secret,
CoinSwapRedeemTX23Timeout, COINSWAP_SECRET_ENTROPY_BYTES,
get_coinswap_secret, get_current_blockheight,
create_hash_script, get_secret_from_vin,
generate_escrow_redeem_script, cs_single,
get_transactions_from_block)
cslog = get_log()
class CoinSwapCarol(CoinSwapParticipant):
"""
State machine:
State 0: pre-initialisation
State 1: handshake complete
State 2: Parameter negotiation complete.
========SETUP PHASE===============================
State 3: TX0id, H(x), TX2sig received from Alice.
State 4: TX1id, TX2sig, TX3sig sent to Alice.
State 5: TX3 sig received and TX0 seen confirmed.
State 6: TX1 broadcast and confirmed.
==================================================
========REDEEM PHASE==============================
State 7: X received.
State 8: Sent TX5 sig.
State 9: TX4 sig received valid from Alice.
State 10: TX4 broadcast.
==================================================
"""
required_key_names = ["key_2_2_AC_1", "key_2_2_CB_0",
"key_TX2_secret", "key_TX3_lock"]
def consume_nonce(self, nonce):
"""Keep track of nonces for this session to prevent
a replay attack.
"""
if nonce in self.consumed_nonces:
return False
self.consumed_nonces.append(nonce)
return True
def validate_alice_sig(self, sig, msg):
return btc.ecdsa_verify(str(msg), sig,
self.coinswap_parameters.pubkeys["key_session"])
def get_rpc_response(self, cmethod, paramlist):
try:
response_method = getattr(self, "jsonrpc_" + cmethod)
except:
return (False, "Method not found: " + str(cmethod))
return response_method(*paramlist)
def jsonrpc_negotiate(self, *alice_parameter_list):
"""Receive Alice's half of the public parameters,
and return our half if acceptable.
"""
return self.sm.tick(alice_parameter_list)
def jsonrpc_tx0id_hx_tx2sig(self, *params):
return self.sm.tick(*params)
def jsonrpc_sigtx3(self, sig):
return self.sm.tick(sig)
def jsonrpc_phase2_ready(self):
return self.is_phase2_ready()
def jsonrpc_secret(self, secret):
return self.sm.tick(secret)
def jsonrpc_sigtx4(self, sig, txid5):
return self.sm.tick(sig, txid5)
def jsonrpc_confirm_tx4(self):
return self.is_tx4_confirmed()
def get_state_machine_callbacks(self):
return [(self.handshake, False, -1),
(self.negotiate_coinswap_parameters, False, -1),
(self.receive_tx0_hash_tx2sig, False, -1),
(self.send_tx1id_tx2_sig_tx3_sig, True, -1),
(self.receive_tx3_sig, False, -1),
#alice waits for confirms before sending secret; this accounts
#for propagation delays.
(self.push_tx1, False,
cs_single().one_confirm_timeout * cs_single().config.getint(
"TIMEOUT", "tx01_confirm_wait")),
#we also wait for the confirms our side
(self.receive_secret, False,
cs_single().one_confirm_timeout * cs_single().config.getint(
"TIMEOUT", "tx01_confirm_wait")),
#alice waits for confirms on TX5 before sending TX4 sig
(self.send_tx5_sig, True, -1),
(self.receive_tx4_sig, False, cs_single().one_confirm_timeout),
(self.broadcast_tx4, True, -1)]
def set_handshake_parameters(self):
"""Sets the conditions under which Carol is
prepared to do a coinswap.
"""
c = cs_single().config
self.source_chain = c.get("SERVER", "source_chain")
self.destination_chain = c.get("SERVER", "destination_chain")
self.minimum_amount = c.getint("SERVER", "minimum_amount")
self.maximum_amount = c.getint("SERVER", "maximum_amount")
def handshake(self, alice_handshake):
"""Check that the proposed coinswap parameters
are acceptable.
"""
self.set_handshake_parameters()
self.bbmb = self.wallet.get_balance_by_mixdepth(verbose=False)
try:
d = alice_handshake[3]
if d["coinswapcs_version"] != cs_single().CSCS_VERSION:
return (False, "wrong CoinSwapCS version, was: " + \
str(d["coinswapcs_version"]) + ", should be: " + \
str(cs_single().CSCS_VERSION))
#Allow client to decide how long to wait, but within our range:
tx01min, tx01max = [int(x) for x in cs_single().config.get(
"SERVER", "tx01_confirm_range").split(",")]
if not isinstance(d["tx01_confirm_wait"], int):
return (False, "Invalid type confirm wait type (should be int)")
if d["tx01_confirm_wait"] < tx01min or d["tx01_confirm_wait"] > tx01max:
return (False, "Mismatched tx01_confirm_wait, was: " + str(
d["tx01_confirm_wait"]))
self.coinswap_parameters.set_tx01_confirm_wait(d["tx01_confirm_wait"])
self.sm.reset_timeouts([5, 6], cs_single().one_confirm_timeout * d[
"tx01_confirm_wait"])
if not "key_session" in d:
#TODO validate that it's a real pubkey
return (False, "no session key from Alice")
if d["source_chain"] != self.source_chain:
return (False, "source chain was wrong: " + d["source_chain"])
if d["destination_chain"] != self.destination_chain:
return (False, "destination chain was wrong: " + d[
"destination_chain"])
if not isinstance(d["amount"], int):
return (False, "Invalid amount type (should be int)")
if d["amount"] < self.minimum_amount:
return (False, "Requested amount too small: " + str(d["amount"]))
if d["amount"] > self.maximum_amount:
return (False, "Requested amount too large: " + str(d["amount"]))
self.coinswap_parameters.set_base_amount(d["amount"])
if not isinstance(d["bitcoin_fee"], int):
return (False, "Invalid type for bitcoin fee, should be int.")
if d["bitcoin_fee"] < estimate_tx_fee((1, 2, 2), 1,
txtype='p2shMofN')/2.0:
return (False, "Suggested bitcoin transaction fee is too low.")
if d["bitcoin_fee"] > estimate_tx_fee((1, 2, 2), 1,
txtype='p2shMofN')*2.0:
return (False, "Suggested bitcoin transaction fee is too high.")
self.coinswap_parameters.set_bitcoin_fee(d["bitcoin_fee"])
#set the session pubkey for authorising future requests
self.coinswap_parameters.set_pubkey("key_session", d["key_session"])
except Exception as e:
return (False,
"Error parsing handshake from counterparty, ignoring: " + \
repr(e))
return (self.coinswap_parameters.session_id,
"Handshake parameters from Alice accepted")
def negotiate_coinswap_parameters(self, params):
#receive parameters and ephemeral keys, destination address from Alice.
#Send back ephemeral keys and destination address, or rejection,
#if invalid, to Alice.
for k in self.required_key_names:
self.coinswap_parameters.set_pubkey(k, self.keyset[k][1])
try:
self.coinswap_parameters.set_pubkey("key_2_2_AC_0", params[0])
self.coinswap_parameters.set_pubkey("key_2_2_CB_1", params[1])
self.coinswap_parameters.set_pubkey("key_TX2_lock", params[2])
self.coinswap_parameters.set_pubkey("key_TX3_secret", params[3])
#Client's locktimes must be in the acceptable range.
cbh = get_current_blockheight()
serverlockrange = cs_single().config.get("SERVER",
"server_locktime_range")
serverlockmin, serverlockmax = [
int(x) for x in serverlockrange.split(",")]
clientlockrange = cs_single().config.get("SERVER",
"client_locktime_range")
clientlockmin, clientlockmax = [
int(x) for x in clientlockrange.split(",")]
if params[4] not in range(cbh + clientlockmin, cbh + clientlockmax+1):
return (False, "Counterparty LOCK0 out of range")
if params[5] not in range(cbh + serverlockmin, cbh + serverlockmax+1):
return (False, "Counterparty LOCK1 out of range")
#This is enforced in CoinSwapPublicParameters with assert, it must
#not trigger in the server from external input.
if params[4] <= params[5]:
return (False, "LOCK1 must be before LOCK0")
self.coinswap_parameters.set_timeouts(params[4], params[5])
self.coinswap_parameters.set_addr_data(addr5=params[6])
except Exception as e:
return (False,
"Invalid parameter set from counterparty, abandoning: " + \
repr(e))
#on receipt of valid response, complete the CoinswapPublicParameters instance
for k in self.required_key_names:
self.coinswap_parameters.set_pubkey(k, self.keyset[k][1])
if not self.coinswap_parameters.is_complete():
cslog.debug("addresses: " + str(self.coinswap_parameters.addresses_complete))
cslog.debug("pubkeys: " + str(self.coinswap_parameters.pubkeys_complete))
cslog.debug("timeouts: " + str(self.coinswap_parameters.timeouts_complete))
return (False, "Coinswap parameters is not complete")
#Calculate the fee required for the swap now we have valid data.
#The coinswap fee is assessed against the base amount proposed by the client.
self.coinswap_parameters.set_coinswap_fee(
self.coinswap_parameters.fee_policy.get_fee(
self.coinswap_parameters.base_amount))
#Calculate a one time blinding amount for this coinswap within the
#configured max and min
bl_amt = random.randint(cs_single().config.getint("SERVER", "blinding_amount_min"),
cs_single().config.getint("SERVER", "blinding_amount_max"))
#TODO check that we can serve an amount up to base_amt + bl_amt + csfee;
#otherwise need to retry selecting a blinding factor (or do something more
#intelligent).
self.coinswap_parameters.set_blinding_amount(bl_amt)
#first entry confirms acceptance of parameters
to_send = [True,
self.coinswap_parameters.pubkeys["key_2_2_AC_1"],
self.coinswap_parameters.pubkeys["key_2_2_CB_0"],
self.coinswap_parameters.pubkeys["key_TX2_secret"],
self.coinswap_parameters.pubkeys["key_TX3_lock"],
self.coinswap_parameters.output_addresses["tx4_address"],
self.coinswap_parameters.coinswap_fee,
self.coinswap_parameters.blinding_amount,
self.coinswap_parameters.output_addresses["tx2_carol_address"],
self.coinswap_parameters.output_addresses["tx3_carol_address"],
self.coinswap_parameters.output_addresses["tx5_carol_address"],
self.coinswap_parameters.session_id]
#We can now initiate file logging also; .log will be automatically appended
cs_single().logs_path = os.path.join(cs_single().homedir, "logs", self.state_file)
return (to_send, "OK")
def receive_tx0_hash_tx2sig(self, txid0, hashed_secret, tx2sig):
"""On receipt of a utxo for TX0, a hashed secret, and a sig for TX2,
construct TX2, verify the provided signature, create our own sig,
construct TX3, create our own sig,
return back to Alice, the txid1, the sig of TX2 and the sig of TX3.
"""
self.txid0 = txid0
self.hashed_secret = hashed_secret
#**CONSTRUCT TX2**
#,using TXID0 as input; note "txid0" is a utxo string
self.tx2 = CoinSwapTX23.from_params(
self.coinswap_parameters.pubkeys["key_2_2_AC_0"],
self.coinswap_parameters.pubkeys["key_2_2_AC_1"],
self.coinswap_parameters.pubkeys["key_TX2_secret"],
utxo_in=self.txid0,
recipient_amount=self.coinswap_parameters.tx2_amounts["script"],
hashed_secret=self.hashed_secret,
absolutelocktime=self.coinswap_parameters.timeouts["LOCK0"],
refund_pubkey=self.coinswap_parameters.pubkeys["key_TX2_lock"],
carol_only_address=self.coinswap_parameters.output_addresses["tx2_carol_address"],
carol_only_amount=self.coinswap_parameters.tx2_amounts["carol"])
if not self.tx2.include_signature(0, tx2sig):
return (False, "Counterparty sig for TX2 invalid; backing out.")
#create our own signature for it
self.tx2.sign_at_index(self.keyset["key_2_2_AC_1"][0], 1)
self.tx2.attach_signatures()
self.watch_for_tx(self.tx2)
return (True, "OK")
def send_tx1id_tx2_sig_tx3_sig(self):
our_tx2_sig = self.tx2.signatures[0][1]
#**CONSTRUCT TX1**
#This call can throw insufficient funds; handled by backout.
#But, this should be avoided (see handshake). At least, any
#throw here will not cause fees for client.
print('wallet used coins is: ', self.wallet.used_coins)
self.initial_utxo_inputs = self.wallet.select_utxos(0,
self.coinswap_parameters.tx1_amount,
utxo_filter=self.wallet.used_coins)
#Lock these coins; only unlock if there is a pre-funding backout.
self.wallet.used_coins.extend(self.initial_utxo_inputs.keys())
total_in = sum([x['value'] for x in self.initial_utxo_inputs.values()])
self.signing_privkeys = []
for i, v in enumerate(self.initial_utxo_inputs.values()):
privkey = self.wallet.get_key_from_addr(v['address'])
if not privkey:
raise CoinSwapException("Failed to get key to sign TX1")
self.signing_privkeys.append(privkey)
signing_pubkeys = [[btc.privkey_to_pubkey(x)] for x in self.signing_privkeys]
signing_redeemscripts = [btc.address_to_script(
x['address']) for x in self.initial_utxo_inputs.values()]
change_amount = total_in - self.coinswap_parameters.tx1_amount - \
self.coinswap_parameters.bitcoin_fee
cslog.debug("got tx1 change amount: " + str(change_amount))
#get a change address in same mixdepth
change_address = self.wallet.get_internal_addr(0)
self.tx1 = CoinSwapTX01.from_params(
self.coinswap_parameters.pubkeys["key_2_2_CB_0"],
self.coinswap_parameters.pubkeys["key_2_2_CB_1"],
utxo_ins=self.initial_utxo_inputs,
signing_pubkeys=signing_pubkeys,
signing_redeem_scripts=signing_redeemscripts,
output_amount=self.coinswap_parameters.tx1_amount,
change_address=change_address,
change_amount=change_amount,
segwit=True)
#sign and hold signature, recover txid
self.tx1.signall(self.signing_privkeys)
self.tx1.attach_signatures()
self.tx1.set_txid()
cslog.info("Carol created and signed TX1:")
cslog.info(self.tx1)
#**CONSTRUCT TX3**
utxo_in = self.tx1.txid + ":"+str(self.tx1.pay_out_index)
self.tx3 = CoinSwapTX23.from_params(
self.coinswap_parameters.pubkeys["key_2_2_CB_0"],
self.coinswap_parameters.pubkeys["key_2_2_CB_1"],
self.coinswap_parameters.pubkeys["key_TX3_secret"],
utxo_in=utxo_in,
recipient_amount=self.coinswap_parameters.tx3_amounts["script"],
hashed_secret=self.hashed_secret,
absolutelocktime=self.coinswap_parameters.timeouts["LOCK1"],
refund_pubkey=self.coinswap_parameters.pubkeys["key_TX3_lock"],
carol_only_address=self.coinswap_parameters.output_addresses["tx3_carol_address"],
carol_only_amount=self.coinswap_parameters.tx3_amounts["carol"])
#create our signature on TX3
self.tx3.sign_at_index(self.keyset["key_2_2_CB_0"][0], 0)
our_tx3_sig = self.tx3.signatures[0][0]
cslog.info("Carol now has partially signed TX3:")
cslog.info(self.tx3)
return ([self.tx1.txid + ":" + str(self.tx1.pay_out_index),
our_tx2_sig, our_tx3_sig], "OK")
def receive_tx3_sig(self, sig):
"""Receives the sig on transaction TX3 which pays from our txid of TX1,
to the 2 of 2 agreed CB. Then, wait until TX0 seen on network.
"""
if not self.tx3.include_signature(1, sig):
return (False, "TX3 signature received is invalid")
cslog.info("Carol now has fully signed TX3:")
cslog.info(self.tx3)
self.tx3.attach_signatures()
self.watch_for_tx(self.tx3)
#wait until TX0 is seen before pushing ours.
self.loop = task.LoopingCall(self.check_for_phase1_utxos, [self.txid0])
self.loop.start(3.0)
return (True, "Received TX3 sig OK")
def push_tx1(self):
"""Having seen TX0 confirmed, broadcast TX1 and wait for confirmation.
"""
errmsg, success = self.tx1.push()
if not success:
return (False, "Failed to push TX1")
#Monitor the output address of TX1 by importing
cs_single().bc_interface.rpc("importaddress",
[self.tx1.output_address, "joinmarket-notify", False])
#Wait until TX1 seen before confirming phase2 ready.
self.loop = task.LoopingCall(self.check_for_phase1_utxos,
[self.tx1.txid + ":" + str(
self.tx1.pay_out_index)],
self.receive_confirmation_tx_0_1)
self.loop.start(3.0)
return (True, "TX1 broadcast OK")
def receive_confirmation_tx_0_1(self):
"""We wait until client code has confirmed both pay-in txs
before proceeding; note that this doesn't necessarily mean
*1* confirmation, could be safer.
"""
self.phase2_ready = True
def is_phase2_ready(self):
return self.phase2_ready
def receive_secret(self, secret):
"""Receive the secret (preimage of hashed_secret),
validate it, if valid, update state, construct TX4 and sig
and send to Alice.
"""
dummy, verifying_hash = get_coinswap_secret(raw_secret=secret)
if not verifying_hash == self.hashed_secret:
return (False, "Received invalid coinswap secret.")
#Known valid; must be persisted in case recovery needed.
self.secret = secret
return (True, "OK")
def send_tx5_sig(self):
utxo_in = self.tx1.txid + ":" + str(self.tx1.pay_out_index)
#We are now ready to directly spend, make TX5 and half-sign.
self.tx5 = CoinSwapTX45.from_params(
self.coinswap_parameters.pubkeys["key_2_2_CB_0"],
self.coinswap_parameters.pubkeys["key_2_2_CB_1"],
utxo_in=utxo_in,
destination_address=self.coinswap_parameters.output_addresses["tx5_address"],
destination_amount=self.coinswap_parameters.tx5_amounts["alice"],
carol_change_address=self.coinswap_parameters.output_addresses["tx5_carol_address"],
carol_change_amount=self.coinswap_parameters.tx5_amounts["carol"])
self.tx5.sign_at_index(self.keyset["key_2_2_CB_0"][0], 0)
sig = self.tx5.signatures[0][0]
return (sig, "OK")
def receive_tx4_sig(self, sig, txid5):
"""Receives and validates signature on TX4, and the TXID
for TX5 (purely for convenience, not checked.
"""
self.txid5 = txid5
self.tx4 = CoinSwapTX45.from_params(
self.coinswap_parameters.pubkeys["key_2_2_AC_0"],
self.coinswap_parameters.pubkeys["key_2_2_AC_1"],
utxo_in=self.txid0,
destination_address=self.coinswap_parameters.output_addresses["tx4_address"],
destination_amount=self.coinswap_parameters.tx4_amounts["carol"],
carol_change_address=None,
carol_change_amount=None)
if not self.tx4.include_signature(0, sig):
return (False, "Received invalid TX4 signature")
return (True, "OK")
def broadcast_tx4(self):
self.tx4.sign_at_index(self.keyset["key_2_2_AC_1"][0], 1)
errmsg, success = self.tx4.push()
if not success:
return (False, "Failed to push TX4")
self.tx4_loop = task.LoopingCall(self.wait_for_tx4_confirmed)
self.tx4_loop.start(3.0)
return (True, "OK")
def wait_for_tx4_confirmed(self):
result = cs_single().bc_interface.query_utxo_set([self.tx4.txid+":0"],
includeconf=True)
if None in result:
return
for u in result:
if u['confirms'] < 1:
return
self.tx4_loop.stop()
self.tx4_confirmed = True
cslog.info("Carol received: " + self.tx4.txid + ", now ending.")
self.quit()
def is_tx4_confirmed(self):
if self.tx4_confirmed:
return self.tx4.txid
else:
return False
def find_secret_from_tx3_redeem(self, expected_txid=None):
"""Given a txid assumed to be a transaction which spends from TX1
(so must be TX3 whether ours or theirs, since this is the only
doubly-signed tx), and assuming it has been spent from (so this
function is only called if redeeming TX3 fails), find the redeeming
transaction and extract the coinswap secret from its scriptSig(s).
The secret is returned.
If expected_txid is provided, checks that this is the redeeming txid,
in which case returns "True".
"""
assert self.tx3.spending_tx
deser_spending_tx = btc.deserialize(self.tx3.spending_tx)
cslog.info("Here is the spending transaction: " + str(deser_spending_tx))
vins = deser_spending_tx['ins']
self.secret = get_secret_from_vin(vins, self.hashed_secret)
if not self.secret:
cslog.info("Critical error; TX3 spent but no "
"coinswap secret was found.")
return False
return self.secret
def redeem_tx3_with_lock(self):
"""Must be called after LOCK1, and TX3 must be
broadcast but not-already-spent. Returns True if succeeds
in broadcasting a redemption (to tx5_address), False otherwise.
"""
if not self.tx3.txid:
cslog.info("Failed to find TX3 txid, cannot redeem from it")
return False
#**CONSTRUCT TX3-redeem-timeout; use a fresh address to redeem
dest_addr = self.wallet.get_new_addr(0, 1, True)
self.tx3redeem = CoinSwapRedeemTX23Timeout(
self.coinswap_parameters.pubkeys["key_TX3_secret"],
self.hashed_secret,
self.coinswap_parameters.timeouts["LOCK1"],
self.coinswap_parameters.pubkeys["key_TX3_lock"],
self.tx3.txid + ":0",
self.coinswap_parameters.tx3_amounts["script"],
dest_addr)
self.tx3redeem.sign_at_index(self.keyset["key_TX3_lock"][0], 0)
wallet_name = cs_single().bc_interface.get_wallet_name(self.wallet)
msg, success = self.tx3redeem.push()
cslog.info("Redeem tx: ")
cslog.info(self.tx3redeem)
if not success:
cslog.info("RPC error message: " + msg)
cslog.info("Failed to broadcast TX3 redeem; here is raw form: ")
cslog.info(self.tx3redeem.fully_signed_tx)
cslog.info("Readable form: ")
cslog.info(self.tx3redeem)
return False
return True
def redeem_tx2_with_secret(self):
#Broadcast TX3
msg, success = self.tx2.push()
if not success:
cslog.info("RPC error message: " + msg)
cslog.info("Failed to broadcast TX2; here is raw form: ")
cslog.info(self.tx2.fully_signed_tx)
return False
#**CONSTRUCT TX2-redeem-secret; use a fresh address to redeem
dest_addr = self.wallet.get_new_addr(0, 1, True)
tx2redeem_secret = CoinSwapRedeemTX23Secret(self.secret,
self.coinswap_parameters.pubkeys["key_TX2_secret"],
self.coinswap_parameters.timeouts["LOCK0"],
self.coinswap_parameters.pubkeys["key_TX2_lock"],
self.tx2.txid+":0",
self.coinswap_parameters.tx2_amounts["script"],
dest_addr)
tx2redeem_secret.sign_at_index(self.keyset["key_TX2_secret"][0], 0)
wallet_name = cs_single().bc_interface.get_wallet_name(self.wallet)
msg, success = tx2redeem_secret.push()
cslog.info("Redeem tx: ")
cslog.info(tx2redeem_secret)
if not success:
cslog.info("RPC error message: " + msg)
cslog.info("Failed to broadcast TX2 redeem; here is raw form: ")
cslog.info(tx2redeem_secret.fully_signed_tx)
cslog.info(tx2redeem_secret)
return False
else:
cslog.info("Successfully redeemed funds via TX2, to address: "+\
dest_addr + ", in txid: " +\
tx2redeem_secret.txid)
return True
def watch_for_tx3_spends(self, redeeming_txid):
"""Function used to check whether our, or a competing
tx, successfully spends out of TX3. Meant to be polled.
"""
assert self.sm.state in [6, 7, 8]
if self.tx3redeem.is_confirmed:
self.carol_watcher_loop.stop()
cslog.info("Redeemed funds via TX3 OK, txid of redeeming transaction "
"is: " + self.tx3redeem.txid)
self.quit(complete=False, failed=False)
return
if self.tx3.is_spent:
if btc.txhash(self.tx3.spending_tx) != redeeming_txid:
cslog.info("Detected TX3 spent by other party; backing out to TX2")
retval = self.find_secret_from_tx3_redeem()
if not retval:
cslog.info("CRITICAL ERROR: Failed to find secret from TX3 redeem.")
self.quit(False, True)
return
rt2s_success = self.redeem_tx2_with_secret()
self.quit(False, not rt2s_success)
return
def scan_blockchain_for_secret(self):
"""Only required by Carol; in cases where the wallet
monitoring fails (principally because a secret-redeeming
transaction by Alice occurred when we were not on-line),
we must be able to find the secret directly from scanning
the blockchain. This could be achieved with indexing on
our Bitcoin Core instance, but since this requires a lot of
resources, it's simpler to directly parse the relevant blocks.
"""
bh = get_current_blockheight()
starting_blockheight = self.coinswap_parameters.timeouts[
"LOCK0"] - cs_single().config.getint("TIMEOUT", "lock_client")
while bh >= starting_blockheight:
found_txs = get_transactions_from_block(bh)
for t in found_txs:
retval = get_secret_from_vin(t['ins'], self.hashed_secret)
if retval:
self.secret = retval
return True
bh -= 1
cslog.info("Failed to find secret from scanning blockchain.")
return False
| AdamISZ/CoinSwapCS | coinswap/carol.py | Python | gpl-3.0 | 29,137 |
# SyndicateManager
# /src/cm/settings.py
#
# Copyright (c) 2013 Gwyn Howell
#
# LICENSE:
#
# This file is part of SyndicateManager (http://my-syndicate.appspot.com).
#
# SyndicateManager is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# SyndicateManager is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with SyndicateManager. If not, see
# <http://www.gnu.org/licenses/>.
#
#
# @author Gwyn Howell <gwyn[at]howellmail[dot]co[dot]uk>
# @license http://www.gnu.org/licenses/gpl.html
# @copyright 2013 Gwyn Howell
APPLICATION_NAME = 'Syndicate Manager'
APPLICATION_DESCRIPTION = 'Syndicate Manager'
APPLICATION_VERSION = '0.1'
DEBUG = False
GOOGLE_ANALYTICS_ID = ''
webapp2_config = {
'webapp2_extras.sessions': {'secret_key': 'FASDHJDASDHFASHDFAS'},
'webapp2_extras.jinja2': {'template_path': ['templates'],
'environment_args': {'extensions': ['jinja2.ext.with_']}},
'system_name': APPLICATION_NAME,
'version': APPLICATION_VERSION,
'debug_mode':DEBUG,
'google_analytics_id':GOOGLE_ANALYTICS_ID } | gwynhowell/SyndicateManager | src/cm/settings.py | Python | gpl-3.0 | 1,487 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 13:55:27 2019
@author: Guillaumot Luca, PB
"""
import os
import sys
import numpy as np
flopypth = os.path.join('..', '..', 'flopy')
if flopypth not in sys.path:
sys.path.append(flopypth)
import flopy
import flopy.utils.binaryfile as bf
import time
def ModFlow_modelV5(self, path_data, numero, namemodel, StepSize, nrow,ncol, recharge, pumping_datas=[]):
"""
This function runs ModFlow in transient state and save automatically hydraulic heads in the model (with the namemodel)
and the drain's flow ie the capillary rise for CWATM
Input arguments:
namemodel: Rhine+No
input_name: file containg recharge value projected from CWATM to ModFlow grid
properties: topography, porosity, permeability maps and the grid definition
WaterTable3: water level in the third CWATM layer, used to compute the flow from ModFlow to CWATM
output_name: name of the drained flow values, to be imported next in CWATM
return also the volume that exit from ModFLow on the total area
"""
def check_if_close(files):
# sometime modflow does not start because the writing has not finished
for file in files:
#print (file)
try:
os.rename(file, file + "_")
os.rename(file + "_", file)
except OSError as e:
print ("Access-error on file \"" + str(file) + "\"!!! \n" + str(e))
time.sleep(3)
# output and name prefix
pathname = self.var.PathModflowOutput + '/' + self.var.nameModflowModel
## Time discretization parameters
# Number of days simulated
nper = 1
# Modflow Flopy library conform
perlen = np.ones(nper)
nstp = np.ones(nper)
# MODFLOW FUNCTION
ss = np.divide(self.var.poro,self.var.delv2) # Specific storage [1/m]
hk2 = self.var.hk0 * StepSize # Permeability [1/m]
laytyp = 1 # 0 if confined, else laytyp=1
layvka = 1 # If layvka=0 vka=vertical hydraulic conductivity, if not vka=ratio horizontal to vertical conductivity
vka = 5
sy = 0.03 # Specific yield: not used if the layer is confined
## FLOPY OBJECTS
if numero == 1:
mf = flopy.modflow.Modflow(self.var.nameModflowModel, model_ws = self.var.PathModflowOutput , exe_name=self.var.modflowexe , version='mfnwt') # Call the .exe ModFlow
dis = flopy.modflow.ModflowDis(mf, self.var.nlay, nrow, ncol, delr=self.var.res_ModFlow, delc=self.var.res_ModFlow, top=self.var.botm[0], botm=self.var.botm[1:],
nper=nper, perlen=perlen, nstp=nstp, steady = False) # Grid characteristics (itmuni=4 if days, 5 if years)
bas = flopy.modflow.ModflowBas(mf, ibound=self.var.basin, strt=self.var.head) # ibound = Grid containing 0 and 1 (if 1 the cell belows to the basin and it will be an active cell)
upw = flopy.modflow.ModflowUpw(mf, hk=hk2, vka=vka, sy=sy, ss=ss,laytyp=laytyp,layvka=layvka) # Hydrodynamic properties
nwt = flopy.modflow.ModflowNwt(mf, fluxtol=500* StepSize, Continue=True) # If Continue=True the model continue even if percent discrepancy > criteria
## DRAIN PACKAGE
ir = np.repeat(np.arange(nrow),ncol)
ic = np.tile(np.arange(ncol),nrow)
wt = self.var.waterTable3.ravel()
cf = self.var.coef*hk2[0]* self.var.res_ModFlow * self.var.res_ModFlow
cf = cf.ravel()
dd = np.stack([np.zeros(ir.shape[0]),ir, ic,wt,cf])
drain = np.swapaxes(dd,0,1)
lrcec={0:drain}
drn = flopy.modflow.ModflowDrn(mf, stress_period_data=lrcec,options=['NOPRINT'])
## RECHARGE PACKAGE
rch = flopy.modflow.ModflowRch(mf, nrchop=3, rech=recharge)
### Add the pumping case - PUMPING WELL PACKAGE
## first we will consider constant pumping rates along the simulation, so pumping_data is only a 2D array
if self.var.GW_pumping:
wel_sp = []
for kk in range(len(pumping_datas)): # adding each pumping well to the package
wel_sp.append([0, pumping_datas[kk][0], pumping_datas[kk][1], pumping_datas[kk][2]]) # Multiply pumping_datas[kk][2] by StepSize if flow is in m3/day, currently in m3/7days, pumping < 0 implies abstraction
# Pumping [m3/timestep] in the first layer
if wel_sp != []:
wel = flopy.modflow.ModflowWel(mf, stress_period_data=wel_sp)
## OUTPUT CONTROL
oc = flopy.modflow.ModflowOc(mf, stress_period_data=None)
mf.write_input() # Write the model input files
else:
mf = flopy.modflow.Modflow(self.var.nameModflowModel, model_ws = self.var.PathModflowOutput, exe_name=self.var.modflowexe , version='mfnwt') # Call the .exe ModFlow
dis = flopy.modflow.ModflowDis(mf, self.var.nlay, nrow,ncol, delr=self.var.res_ModFlow, delc=self.var.res_ModFlow, top=self.var.botm[0], botm=self.var.botm[1:],
nper=nper, perlen=perlen, nstp=nstp, steady=False) # Grid characteristics
bas = flopy.modflow.ModflowBas(mf, ibound=self.var.basin, strt=self.var.head) # ibound = Grid containing 0 and 1 (if 1 the cell belows to the basin and it will be an active cell)
## PUMPING WELL PACKAGE
## first we will consider constant pumping rates along the simulation, so pumping_data is only a 2D array
if self.var.GW_pumping:
wel_sp = []
for kk in range(len(pumping_datas)): # adding each pumping well to the package
wel_sp.append([0, pumping_datas[kk][0], pumping_datas[kk][1], pumping_datas[kk][2]]) # Multiply pumping_datas[kk][2] by StepSize if flow is in m3/day, currently in m3/7days, pumping < 0 implies abstraction
if wel_sp != []:
wel = flopy.modflow.ModflowWel(mf, stress_period_data=wel_sp) # the well path has to be defined in the .nam file
rch = flopy.modflow.ModflowRch(mf, nrchop=3, rech=recharge)
rch.write_file(check=True)
bas.write_file(check=True)
if (self.var.GW_pumping and wel_sp != []): wel.write_file()
#mf.write_input()
# modify the nam file:
#if numero == 2:
nam_file=open(pathname+'.nam', "w")
nam_file.write(self.var.modflow_text_to_write)
nam_file.close()
files = [rch.fn_path,bas.fn_path,pathname+'.nam']
if (self.var.GW_pumping and wel_sp != []): files.append(wel.fn_path)
check_if_close(files)
### -------- Running MODFLOW -----------------------------
success, mfoutput = mf.run_model(silent=True, pause=False) # Run the model
### ------------------------------------------------------
# TODO: some error routine needed
#if not success:
# raise Exception('MODFLOW did not terminate normally.')
## COMPUTING AND SAVING OUTPUT FLOW BY DRAINS
# Create the headfile object
headobj = bf.HeadFile(pathname + '.hds')
periode=headobj.get_times()
# Matrix of the simulated water levels
self.var.head = headobj.get_data(totim=periode[0])
headobj.close()
# outflow from groundwater # from m per timestep to m/d
gwoutflow = np.where(((self.var.head[0] - self.var.waterTable3)>=0) & (self.var.basin[0] == 1),
(self.var.head[0]-self.var.waterTable3)*self.var.coef*hk2[0] / StepSize,0)
# CapillaryRise and baseflow
cap_rise = gwoutflow *(1 - self.var.riverPercentage)
base_flow = gwoutflow * self.var.riverPercentage
# Groundwater storage in [m]
head = np.where(self.var.head[0] == -999, self.var.botm[0]-self.var.actual_thick[0], self.var.head[0])
self.var.modflowStorGW = (head - (self.var.botm[0]-self.var.actual_thick[0])) * self.var.poro[0]
self.var.modflowTopography = np.where(self.var.head[0] < -900, self.var.head[0], self.var.botm[0])
self.var.modflowDepth = np.where(self.var.head[0] < -900, -999, self.var.botm[0] - self.var.head[0])
self.var.modflowDepth2 = np.ma.masked_values(self.var.modflowDepth, -999)
budget_terms = 0
if self.var.writeerror:
mf_list = flopy.utils.MfListBudget(pathname + '.list')
budget_terms = mf_list.get_data() # (totim=periode[0], incremental=True)
#Error_Percentage = Budget_terms[-1][1]
# -1 because the last value correspond to the percent discrepancy of the simulated period
#return Q1,Q2,Cap_rise, Base_flow, Budget_terms
return cap_rise, base_flow, budget_terms
| CWatM/CWatM | cwatm/hydrological_modules/groundwater_modflow/ModFlow_modelV5.py | Python | gpl-3.0 | 9,153 |
# -*- coding: utf-8 -*-
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
"""HITAP Tazminat Senkronizasyon
Personelin Hitap'taki tazminat bilgilerinin
yereldeki kayıtlarla senkronizasyonunu yapar.
"""
from ulakbus.services.personel.hitap.hitap_sync import HITAPSync
from ulakbus.models.hitap.hitap import HizmetTazminat
class HizmetTazminatSync(HITAPSync):
"""
HITAP Sync servisinden kalıtılmış Tazminat Bilgisi Senkronizasyon servisi
"""
HAS_CHANNEL = True
service_dict = {
'sorgula_service': 'hizmet-tazminat-getir',
'model': HizmetTazminat
}
| zetaops/ulakbus | ulakbus/services/personel/hitap/hizmet_tazminat_sync.py | Python | gpl-3.0 | 690 |
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: Hervé Cauwelier <herve@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from lpod
from element import register_element_class, odf_element, odf_create_element
from smil import odf_create_anim_par, odf_create_anim_transitionFilter
def odf_create_draw_page(page_id, name=None, master_page=None,
page_layout=None, style=None):
"""This element is a container for content in a drawing or presentation
document.
Arguments:
page_id -- str
name -- unicode
master_page -- str
page_layout -- str
style -- str
Return: odf_element
"""
element = odf_create_element('draw:page')
element.set_page_id(page_id)
if name:
element.set_page_name(name)
if style:
element.set_attribute('draw:style-name', style)
if master_page:
element.set_attribute('draw:master-page-name', master_page)
if page_layout:
element.set_attribute('presentation:presentation-page-layout-name',
page_layout)
return element
class odf_draw_page(odf_element):
"""Specialised element for pages of presentation and drawing.
"""
def get_page_name(self):
return self.get_attribute('draw:name')
def set_page_name(self, name):
self.set_attribute('draw:name', name)
def get_page_id(self):
return self.get_attribute('draw:id')
def set_page_id(self, page_id):
self.set_attribute('draw:id', page_id)
def set_transition(self, type, subtype=None, dur='2s'):
# Create the new animation
anim_page = odf_create_anim_par(presentation_node_type="timing-root")
my_page_id = self.get_page_id()
anim_begin = odf_create_anim_par(smil_begin="%s.begin" % my_page_id)
transition = odf_create_anim_transitionFilter(smil_dur=dur,
smil_type=type,
smil_subtype=subtype)
anim_page.append(anim_begin)
anim_begin.append(transition)
# Replace when already a transition:
# anim:seq => After the frame's transition
# cf page 349 of OpenDocument-v1.0-os.pdf
# Conclusion: We must delete the first child 'anim:par'
existing = self.get_element('anim:par')
if existing:
self.delete(existing)
self.append(anim_page)
def get_formatted_text(self, context):
result = []
for element in self.get_children():
if element.get_tag() == 'presentation:notes':
# No need for an advanced odf_notes.get_formatted_text()
# because the text seems to be only contained in paragraphs
# and frames, that we already handle
for child in element.get_children():
result.append(child.get_formatted_text(context))
result.append(u"\n")
result.append(element.get_formatted_text(context))
result.append(u"\n")
return u"".join(result)
register_element_class('draw:page', odf_draw_page)
| uliss/quneiform | tests/py/lpod/draw_page.py | Python | gpl-3.0 | 4,125 |
#!/usr/bin/python3
# this script will remove all jars from .classpath files and will plant new
# ones according to jars in lib
import glob # for glob
import xml.dom.minidom # for parse
raise ValueError("you really should use eclipse variables and not this script")
addJars=False
jar_list=glob.glob('lib/*.jar')
jar_list.extend(glob.glob('static/*.jar'))
classpath_list=glob.glob('projects/*/.classpath')
def iterate_children(parent):
child = parent.firstChild
while child != None:
yield child
child = child.nextSibling
for filename in classpath_list:
document = xml.dom.minidom.parse(filename)
# remove all classpathentry elements with attribute kind=lib
for node in document.getElementsByTagName('classpathentry'):
if node.getAttribute('kind')=='lib':
node.parentNode.removeChild(node)
# add all the jars
if addJars:
for jar in jar_list:
e=document.createElementNS(None,'classpathentry')
e.setAttribute('kind','lib')
e.setAttribute('path','../../'+jar)
document.firstChild.appendChild(e)
# remove white space
remove_list=[]
for child in iterate_children(document.firstChild):
if child.nodeType==child.TEXT_NODE:
value=child.nodeValue
if value.strip()=="":
remove_list.append(child)
for node in remove_list:
node.parentNode.removeChild(node)
# write the output file
with open(filename,"w") as f:
f.write(document.toprettyxml(indent="\t"))
| veltzer/demos-java | scripts/plant_classpath.py | Python | gpl-3.0 | 1,553 |
# Matrix (multiplication)
# Python3
# by Xellor
# Last update 16.03.2016
matrix_n = int(input('Enter matrix size (N x N): ')) # Set size matrix, integer only
matrix_1 = [] # Initialize matrix variables
matrix_2 = []
matrix_3 = [] # matrix_3 is result matrix
for i in range(matrix_n): # Append in matrix variables N lists
matrix_1.append([])
matrix_2.append([])
for i in range(matrix_n):
for j in range(matrix_n): # Input N values in matrix_1 lists
print('Enter M1(' + str(i + 1) + '.' + str(j + 1) + ')')
matrix_1[i].append(int(input()))
print('Now M2')
for i in range(matrix_n):
for j in range(matrix_n): # Input N values in matrix_2 lists
print('Enter M2(' + str(i + 1) + '.' + str(j + 1) + ')')
matrix_2[i].append(int(input()))
s = 0 # temp sum
t = [] # temp matrix
for z in range(matrix_n):
for j in range(matrix_n):
for i in range(matrix_n):
s = s + matrix_1[z][i] * matrix_2[i][j]
t.append(s)
s = 0
matrix_3.append(t)
t = []
print('\nM3 (result):')
for i in range(matrix_n):
print(matrix_3[i])
exit() | Xellor/python-math-training | matrix_multiplication.py | Python | gpl-3.0 | 1,031 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import textwrap
from snapcraft.internal import steps
from snapcraft.internal.project_loader import inspection
from testtools.matchers import Equals
from .. import ProjectLoaderBaseTest
class LifecycleStatusTest(ProjectLoaderBaseTest):
def setUp(self):
super().setUp()
for path in ("src1", "src2"):
os.mkdir(path)
self.config = self.make_snapcraft_project(
textwrap.dedent(
"""\
name: my-snap-name
version: '1.0'
summary: summary
description: description
grade: devel
confinement: devmode
parts:
part1:
plugin: nil
source: src1/
part2:
plugin: nil
source: src2/
after: [part1]
"""
)
)
for part in self.config.all_parts:
part.makedirs()
self.part1 = self.config.parts.get_part("part1")
self.part2 = self.config.parts.get_part("part2")
def test_lifecycle_status(self):
# Stage part1, since it's a dependency
self.part1.mark_pull_done()
self.part1.mark_build_done()
self.part1.mark_stage_done(set(), set())
# Pull part2
self.part2.mark_pull_done()
self.assertThat(
inspection.lifecycle_status(self.config),
Equals(
[
{
"part": "part1",
"pull": "complete",
"build": "complete",
"stage": "complete",
"prime": None,
},
{
"part": "part2",
"pull": "complete",
"build": None,
"stage": None,
"prime": None,
},
]
),
)
# Now prime them both
self.part1.mark_prime_done(set(), set(), set())
self.part2.mark_build_done()
self.part2.mark_stage_done(set(), set())
self.part2.mark_prime_done(set(), set(), set())
self.assertThat(
inspection.lifecycle_status(self.config),
Equals(
[
{
"part": "part1",
"pull": "complete",
"build": "complete",
"stage": "complete",
"prime": "complete",
},
{
"part": "part2",
"pull": "complete",
"build": "complete",
"stage": "complete",
"prime": "complete",
},
]
),
)
# Change the source of part2, which should make its pull step outdated
open(os.path.join("src2", "file"), "w").close()
self.assertThat(
inspection.lifecycle_status(self.config),
Equals(
[
{
"part": "part1",
"pull": "complete",
"build": "complete",
"stage": "complete",
"prime": "complete",
},
{
"part": "part2",
"pull": "outdated (source changed)",
"build": "complete",
"stage": "complete",
"prime": "complete",
},
]
),
)
# Now clean the prime step of part1 and verify that it effects part2
self.part1.mark_cleaned(steps.PRIME)
self.assertThat(
inspection.lifecycle_status(self.config),
Equals(
[
{
"part": "part1",
"pull": "complete",
"build": "complete",
"stage": "complete",
"prime": None,
},
{
"part": "part2",
"pull": "outdated (source changed)",
"build": "complete",
"stage": "complete",
"prime": "dirty ('part1' changed)",
},
]
),
)
| sergiusens/snapcraft | tests/unit/project_loader/inspection/test_lifecycle_status.py | Python | gpl-3.0 | 5,367 |
"""
Remove moderations where the user was already categorized (except self mod).
"""
from django.core.management.base import BaseCommand, CommandError
from moderation.moderate import remove_done_moderations
from community.models import Community
class Command(BaseCommand):
help = (
'Remove moderations from this community where the user was already '
'moderated.'
)
def add_arguments(self, parser):
parser.add_argument(
'-com',
'--community',
nargs='?',
type=str,
help='Indicates the community these changes should be applied to'
)
def handle(self, *args, **kwargs):
community_name = kwargs['community']
try:
Community.objects.get(name=community_name)
except Community.DoesNotExist:
self.stdout.write(self.style.ERROR("This community does not exist."))
return
counter = remove_done_moderations(community_name)
self.stdout.write(
self.style.SUCCESS(
f"{counter} moderations(s) were deleted. :)"
)
) | jeromecc/doctoctocbot | src/moderation/management/commands/remove_done_moderations.py | Python | mpl-2.0 | 1,134 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2017, James R. Barlow (https://github.com/jbarlow83/)
from math import cos, pi, sin
class PdfMatrix:
"""
Support class for PDF content stream matrices
PDF content stream matrices are 3x3 matrices summarized by a shorthand
``(a, b, c, d, e, f)`` which correspond to the first two column vectors.
The final column vector is always ``(0, 0, 1)`` since this is using
`homogenous coordinates <https://en.wikipedia.org/wiki/Homogeneous_coordinates>`_.
PDF uses row vectors. That is, ``vr @ A'`` gives the effect of transforming
a row vector ``vr=(x, y, 1)`` by the matrix ``A'``. Most textbook
treatments use ``A @ vc`` where the column vector ``vc=(x, y, 1)'``.
(``@`` is the Python matrix multiplication operator.)
Addition and other operations are not implemented because they're not that
meaningful in a PDF context (they can be defined and are mathematically
meaningful in general).
PdfMatrix objects are immutable. All transformations on them produce a new
matrix.
"""
def __init__(self, *args):
# fmt: off
if not args:
self.values = ((1, 0, 0), (0, 1, 0), (0, 0, 1))
elif len(args) == 6:
a, b, c, d, e, f = map(float, args)
self.values = ((a, b, 0),
(c, d, 0),
(e, f, 1))
elif isinstance(args[0], PdfMatrix):
self.values = args[0].values
elif len(args[0]) == 6:
a, b, c, d, e, f = map(float, args[0])
self.values = ((a, b, 0),
(c, d, 0),
(e, f, 1))
elif len(args[0]) == 3 and len(args[0][0]) == 3:
self.values = (tuple(args[0][0]),
tuple(args[0][1]),
tuple(args[0][2]))
else:
raise ValueError('invalid arguments: ' + repr(args))
# fmt: on
@staticmethod
def identity():
"""Constructs and returns an identity matrix."""
return PdfMatrix()
def __matmul__(self, other):
"""Multiply this matrix by another matrix
Can be used to concatenate transformations.
"""
a = self.values
b = other.values
return PdfMatrix(
[
[sum(float(i) * float(j) for i, j in zip(row, col)) for col in zip(*b)]
for row in a
]
)
def scaled(self, x, y):
"""Concatenates a scaling matrix on this matrix."""
return self @ PdfMatrix((x, 0, 0, y, 0, 0))
def rotated(self, angle_degrees_ccw):
"""Concatenates a rotation matrix on this matrix."""
angle = angle_degrees_ccw / 180.0 * pi
c, s = cos(angle), sin(angle)
return self @ PdfMatrix((c, s, -s, c, 0, 0))
def translated(self, x, y):
"""Translates this matrix."""
return self @ PdfMatrix((1, 0, 0, 1, x, y))
@property
def shorthand(self):
"""Return the 6-tuple (a,b,c,d,e,f) that describes this matrix."""
return (self.a, self.b, self.c, self.d, self.e, self.f)
@property
def a(self):
return self.values[0][0]
@property
def b(self):
return self.values[0][1]
@property
def c(self):
return self.values[1][0]
@property
def d(self):
return self.values[1][1]
@property
def e(self):
return self.values[2][0]
@property
def f(self):
return self.values[2][1]
def __eq__(self, other):
if isinstance(other, PdfMatrix):
return self.shorthand == other.shorthand
return False
def encode(self):
"""Encode this matrix in binary suitable for including in a PDF."""
return '{:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}'.format(
self.a, self.b, self.c, self.d, self.e, self.f
).encode()
def __repr__(self):
return f"pikepdf.PdfMatrix({repr(self.values)})"
| pikepdf/pikepdf | src/pikepdf/models/matrix.py | Python | mpl-2.0 | 4,208 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webrtc_dev.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| PauKerr/stackato-webrtc-dev | manage.py | Python | mpl-2.0 | 253 |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
import os
# Generate a secret key
try:
from secret_key import *
except ImportError:
from helpers.views.secret_key_generator import *
SETTINGS_DIR = os.path.abspath(os.path.dirname(__file__))
generate_secret_key(os.path.join(SETTINGS_DIR, 'secret_key.py'))
from secret_key import *
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Ajuste o caminho para static
STATIC_ROOT = '/var/lib/sistema-nira/nira/static/'
# Aqui você deve colocar as suas configurações de banco de dados
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'seu_banco',
'USER': 'seu_usuario',
'PASSWORD': 'sua_senha',
'HOST': 'localhost',
}
}
| neuromat/nira | docs/source/instalacao/settings_local.py | Python | mpl-2.0 | 812 |
from bedrock.redirects.util import (redirect, is_firefox_redirector,
platform_redirector, no_redirect)
def firefox_mobile_faq(request, *args, **kwargs):
qs = request.META.get('QUERY_STRING', '')
if 'os=firefox-os' in qs:
return 'https://support.mozilla.org/products/firefox-os'
return 'https://support.mozilla.org/products/mobile'
def firefox_channel(*args, **kwargs):
return platform_redirector('firefox.channel.desktop',
'firefox.channel.android',
'firefox.channel.ios')
redirectpatterns = (
# overrides
# issue 8096
redirect(r'^firefox/beta/all/?$', 'firefox.all', anchor='product-desktop-beta'),
redirect(r'^firefox/developer/all/?$', 'firefox.all', anchor='product-desktop-developer'),
redirect(r'^firefox/aurora/all/?$', 'firefox.all', anchor='product-desktop-developer'),
redirect(r'^firefox/nightly/all/?$', 'firefox.all', anchor='product-desktop-nightly'),
redirect(r'^firefox/organizations/all/?$', 'firefox.all', anchor='product-desktop-esr'),
redirect(r'^firefox/android/all/?$', 'firefox.all', anchor='product-android-release'),
redirect(r'^firefox/android/beta/all/?$', 'firefox.all', anchor='product-android-beta'),
redirect(r'^firefox/android/nightly/all/?$', 'firefox.all', anchor='product-android-nightly'),
# bug 831810 & 1142583 & 1239960, 1329931
redirect(r'^mwc/?$', 'https://support.mozilla.org/products/firefox-os', re_flags='i'),
# bug 748503
redirect(r'^projects/firefox/[^/]+a[0-9]+/firstrun(?P<p>.*)$',
'/firefox/nightly/firstrun{p}'),
# bug 1275483
redirect(r'^firefox/nightly/whatsnew/?', 'firefox.nightly_firstrun'),
# bug 840814
redirect(r'^projects/firefox'
r'(?P<version>/(?:\d+\.\d+\.?(?:\d+)?\.?(?:\d+)?(?:[a|b]?)(?:\d*)(?:pre)?(?:\d)?))'
r'(?P<page>/(?:firstrun|whatsnew))'
r'(?P<rest>/.*)?$', '/firefox{version}{page}{rest}'),
# bug 877165
redirect(r'^firefox/connect', 'mozorg.home'),
# bug 657049, 1238851
redirect(r'^firefox/accountmanager/?$', 'https://developer.mozilla.org/Persona'),
# Bug 1009247, 1101220, 1299947, 1314603, 1328409
redirect(r'^(firefox/)?beta/?$', firefox_channel(), cache_timeout=0, anchor='beta'),
redirect(r'^(firefox/)?aurora/?$', firefox_channel(), cache_timeout=0, anchor='aurora'),
redirect(r'^(firefox/)?nightly/?$', firefox_channel(), cache_timeout=0, anchor='nightly'),
redirect(r'^mobile/beta/?$', 'firefox.channel.android', anchor='beta'),
redirect(r'^mobile/aurora/?$', 'firefox.channel.android', anchor='aurora'),
redirect(r'^mobile/nightly/?$', 'firefox.channel.android', anchor='nightly'),
# bug 988044
redirect(r'^firefox/unsupported-systems\.html$', 'firefox.unsupported-systems'),
# bug 736934, 860865, 1101220, 1153351
redirect(r'^mobile/(?P<channel>(?:beta|aurora)/)?notes/?$',
'/firefox/android/{channel}notes/'),
redirect(r'^firefox/(?P<channel>(?:beta|aurora|organizations)/)?system-requirements(\.html)?$',
'/firefox/{channel}system-requirements/'),
# bug 1155870
redirect(r'^firefox/os/(releases|notes)/?$',
'https://developer.mozilla.org/Firefox_OS/Releases'),
redirect(r'^firefox/os/(?:release)?notes/(?P<v>[^/]+)/?$',
'https://developer.mozilla.org/Firefox_OS/Releases/{v}'),
# bug 878871
redirect(r'^firefoxos', '/firefox/os/'),
# bug 1438302
no_redirect(r'^firefox/download/thanks/?$'),
# Bug 1006616
redirect(r'^download/?$', 'firefox.new'),
# Bug 1409554
redirect(r'^(firefox|mobile)/download', 'firefox.new'),
# bug 837883
redirect(r'^firefox/firefox\.exe$', 'mozorg.home', re_flags='i'),
# bug 821006
redirect(r'^firefox/all(\.html)?$', 'firefox.all'),
# bug 727561
redirect(r'^firefox/search(?:\.html)?$', 'firefox.new'),
# bug 860865, 1101220, issue 8096
redirect(r'^firefox/all-(?:beta|rc)(?:/|\.html)?$', 'firefox.all',
anchor='product-desktop-beta'),
redirect(r'^firefox/all-aurora(?:/|\.html)?$', 'firefox.all',
anchor='product-desktop-developer'),
redirect(r'^firefox/aurora/(?P<page>all|notes|system-requirements)/?$',
'/firefox/developer/{page}/'),
redirect(r'^firefox/organizations/all\.html$', 'firefox.all',
anchor='product-desktop-esr'),
# bug 729329
redirect(r'^mobile/sync', 'firefox.accounts'),
# bug 882845
redirect(r'^firefox/toolkit/download-to-your-devices', 'firefox.new'),
# bug 1014823
redirect(r'^(products/)?firefox/releases/whatsnew/?$', 'firefox.whatsnew'),
# bug 929775
redirect(r'^firefox/update', 'firefox.new', query={
'utm_source': 'firefox-browser',
'utm_medium': 'firefox-browser',
'utm_campaign': 'firefox-update-redirect',
}),
# Bug 868182, 986174
redirect(r'^(m|(firefox/)?mobile)/features/?$', 'firefox.mobile.index'),
redirect(r'^(m|(firefox/)?mobile)/faq/?$', firefox_mobile_faq, query=False),
# bug 884933
redirect(r'^(m|(firefox/)?mobile)/platforms/?$',
'https://support.mozilla.org/kb/will-firefox-work-my-mobile-device'),
redirect(r'^m/?$', 'firefox.new'),
# Bug 730488 deprecate /firefox/all-older.html
redirect(r'^firefox/all-older\.html$', 'firefox.new'),
# bug 1120658
redirect(r'^seamonkey-transition\.html$',
'http://www-archive.mozilla.org/seamonkey-transition.html'),
# Bug 1186373
redirect(r'^firefox/hello/npssurvey/?$',
'https://www.surveygizmo.com/s3/2227372/Firefox-Hello-Product-Survey',
permanent=False),
# Bug 1221739
redirect(r'^firefox/hello/feedbacksurvey/?$',
'https://www.surveygizmo.com/s3/2319863/d2b7dc4b5687',
permanent=False),
# bug 1148127
redirect(r'^products/?$', 'firefox'),
# Bug 1110927
redirect(r'^(products/)?firefox/start/central\.html$', 'firefox.new'),
redirect(r'^firefox/sync/firstrun\.html$', 'firefox.accounts'),
# Bug 920212
redirect(r'^firefox/fx(/.*)?', 'firefox'),
# Bug 979531, 1003727, 979664, 979654, 979660
redirect(r'^firefox/customize/?$', 'https://support.mozilla.org/kb/customize-firefox-controls-buttons-and-toolbars'),
redirect(r'^firefox/(?:performance|happy|speed|memory)/?$', 'firefox.features.fast'),
redirect(r'^firefox/security/?$', 'firefox.features.independent'),
redirect(r'^firefox/technology/?$', 'https://developer.mozilla.org/docs/Tools'),
# Bug 979527
redirect(r'^(products/)?firefox/central(/|\.html|-lite\.html)?$', is_firefox_redirector(
'https://support.mozilla.org/kb/get-started-firefox-overview-main-features',
'firefox.new'), cache_timeout=0),
# bug 868169
redirect(r'^mobile/android-download\.html$',
'https://play.google.com/store/apps/details',
query={'id': 'org.mozilla.firefox'}, merge_query=True),
redirect(r'^mobile/android-download-beta\.html$',
'https://play.google.com/store/apps/details',
query={'id': 'org.mozilla.firefox_beta'}, merge_query=True),
# bug 675031
redirect(r'^projects/fennec(?P<page>/[\/\w\.-]+)?',
'http://website-archive.mozilla.org/www.mozilla.org/fennec_releasenotes/projects/fennec{page}'),
# bug 876581
redirect(r'^firefox/phishing-protection(/?)$',
'https://support.mozilla.org/kb/how-does-phishing-and-malware-protection-work'),
# bug 1006079
redirect(r'^mobile/home/?(?:index\.html)?$',
'https://blog.mozilla.org/services/2012/08/31/retiring-firefox-home/'),
# bug 949562
redirect(r'^mobile/home/1\.0/releasenotes(?:/(?:index\.html)?)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/1.0/releasenotes/'),
redirect(r'^mobile/home/1\.0\.2/releasenotes(?:/(?:index\.html)?)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/1.0.2/releasenotes/'),
redirect(r'^mobile/home/faq(?:/(?:index\.html)?)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/faq/'),
# bug 960064
redirect(r'^firefox/(?P<num>vpat-[.1-5]+)(?:\.html)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_vpat/firefox-{num}.html'),
redirect(r'^firefox/vpat(?:\.html)?',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_vpat/firefox-vpat-3.html'),
# bug 1017564
redirect(r'^mobile/.+/system-requirements/?$',
'https://support.mozilla.org/kb/will-firefox-work-my-mobile-device'),
# bug 858315
redirect(r'^projects/devpreview/firstrun(?:/(?:index\.html)?)?$', '/firefox/firstrun/'),
redirect(r'^projects/devpreview/(?P<page>[\/\w\.-]+)?$',
'http://website-archive.mozilla.org/www.mozilla.org/devpreview_releasenotes/projects/devpreview/{page}'),
# bug 1001238, 1025056
no_redirect(r'^firefox/(24\.[5678]\.\d|28\.0)/releasenotes/?$'),
# bug 1235082
no_redirect(r'^firefox/23\.0(\.1)?/releasenotes/?$'),
# bug 947890, 1069902
redirect(r'^firefox/releases/(?P<v>[01]\.(?:.*))$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US/firefox/releases/{v}'),
redirect(r'^(?P<path>(?:firefox|mobile)/(?:\d)\.(?:.*)/releasenotes(?:.*))$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US/{path}'),
#
# bug 988746, 989423, 994186, 1153351
redirect(r'^mobile/(?P<v>2[38]\.0(?:\.\d)?|29\.0(?:beta|\.\d)?)/releasenotes/?$',
'/firefox/android/{v}/releasenotes/'),
redirect(r'^mobile/(?P<v>[3-9]\d\.\d(?:a2|beta|\.\d)?)/(?P<p>aurora|release)notes/?$',
'/firefox/android/{v}/{p}notes/'),
# bug 1041712, 1069335, 1069902
redirect(r'^(?P<prod>firefox|mobile)/(?P<vers>([0-9]|1[0-9]|2[0-8])\.(\d+(?:beta|a2|\.\d+)?))'
r'/(?P<channel>release|aurora)notes/(?P<page>[\/\w\.-]+)?$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes/en-US'
'/{prod}/{vers}/{channel}notes/{page}'),
# bug 767614 superceeded by bug 957711 and 1003718 and 1239960
redirect(r'^(fennec)/?$', 'firefox'),
# issue 8749
redirect(r'^(mobile)/?$', '/firefox/mobile/'),
# bug 876668
redirect(r'^mobile/customize(?:/.*)?$', '/firefox/mobile/'),
# bug 1211907
redirect(r'^firefox/independent/?$', 'firefox.new'),
redirect(r'^firefox/personal/?$', 'firefox.new'),
# bug 845983
redirect(r'^metrofirefox(?P<path>/.*)?$', '/firefox{path}'),
# bug 1003703, 1009630
redirect(r'^firefox(?P<vers>/.+)/firstrun/eu/?$', '/firefox{vers}/firstrun/', query={
'utm_source': 'direct',
'utm_medium': 'none',
'utm_campaign': 'redirect',
'utm_content': 'eu-firstrun-redirect',
}),
# bug 960543
redirect(r'^firefox/(?P<vers>[23])\.0/eula', '/legal/eula/firefox-{vers}/'),
# bug 1150713
redirect(r'^firefox/sms(?:/.*)?$', 'firefox'),
# Redirects for SeaMonkey project website, now living at seamonkey-project.org
redirect(r'^projects/seamonkey/$', 'http://www.seamonkey-project.org/'),
redirect(r'^projects/seamonkey/artwork\.html$',
'http://www.seamonkey-project.org/dev/artwork'),
redirect(r'^projects/seamonkey/community\.html$',
'http://www.seamonkey-project.org/community'),
redirect(r'^projects/seamonkey/get-involved\.html$',
'http://www.seamonkey-project.org/dev/get-involved'),
redirect(r'^projects/seamonkey/index\.html$', 'http://www.seamonkey-project.org/'),
redirect(r'^projects/seamonkey/news\.html$', 'http://www.seamonkey-project.org/news'),
redirect(r'^projects/seamonkey/project-areas\.html$',
'http://www.seamonkey-project.org/dev/project-areas'),
redirect(r'^projects/seamonkey/releases/$', 'http://www.seamonkey-project.org/releases/'),
redirect(r'^projects/seamonkey/releases/index\.html$',
'http://www.seamonkey-project.org/releases/'),
redirect(r'^projects/seamonkey/review-and-flags\.html$',
'http://www.seamonkey-project.org/dev/review-and-flags'),
redirect(r'^projects/seamonkey/releases/(?P<vers>1\..*)\.html$',
'http://www.seamonkey-project.org/releases/{vers}'),
redirect(r'^projects/seamonkey/releases/seamonkey(?P<x>.*)/index\.html$',
'http://www.seamonkey-project.org/releases/seamonkey{x}/'),
redirect(r'^projects/seamonkey/releases/seamonkey(?P<x>.*/.*)\.html$',
'http://www.seamonkey-project.org/releases/seamonkey{x}'),
redirect(r'^projects/seamonkey/releases/updates/(?P<x>.*)$',
'http://www.seamonkey-project.org/releases/updates/{x}'),
redirect(r'^projects/seamonkey/start/$', 'http://www.seamonkey-project.org/start/'),
# Bug 638948 redirect beta privacy policy link
redirect(r'^firefox/beta/feedbackprivacypolicy/?$', '/privacy/firefox/'),
# Bug 1238248
redirect(r'^firefox/push/?$',
'https://support.mozilla.org/kb/push-notifications-firefox'),
# Bug 1239960
redirect(r'^firefox/partners/?$', 'https://support.mozilla.org/products/firefox-os'),
# Bug 1243060
redirect(r'^firefox/tiles/?$',
'https://support.mozilla.org/kb/about-tiles-new-tab'),
# Bug 1239863, 1329931
redirect(r'^firefox/os(/.*)?$', 'https://support.mozilla.org/products/firefox-os'),
# Bug 1252332
redirect(r'^sync/?$', 'firefox.accounts'),
# Bug 424204
redirect(r'^firefox/help/?$', 'https://support.mozilla.org/'),
redirect(r'^fxandroid/?$', 'firefox.mobile.index'),
# Bug 1255882
redirect(r'^firefox/personal', 'firefox.new'),
redirect(r'^firefox/upgrade', 'firefox.new'),
redirect(r'^firefox/ie', 'firefox.new'),
# must go above the bug 1255882 stuff below
redirect(r'^projects/xul/joy-of-xul\.html$',
'https://developer.mozilla.org/docs/Mozilla/Tech/XUL/The_Joy_of_XUL'),
redirect(r'^projects/xul/xre(old)?\.html$',
'https://developer.mozilla.org/docs/Archive/Mozilla/XULRunner'),
redirect(r'^projects/xslt/js-interface\.html$',
'https://developer.mozilla.org/docs/'
'Web/XSLT/Using_the_Mozilla_JavaScript_interface_to_XSL_Transformations'),
redirect(r'^projects/xslt/faq\.html$',
'https://developer.mozilla.org/docs/'
'Web/API/XSLTProcessor/XSL_Transformations_in_Mozilla_FAQ'),
redirect(r'^projects/xslt/standalone\.html$',
'https://developer.mozilla.org/docs/'
'Archive/Mozilla/Building_TransforMiiX_standalone'),
redirect(r'^projects/plugins/first-install-problem\.html$',
'https://developer.mozilla.org/Add-ons/Plugins/The_First_Install_Problem'),
redirect(r'^projects/plugins/install-scheme\.html$',
'https://developer.mozilla.org/docs/'
'Installing_plugins_to_Gecko_embedding_browsers_on_Windows'),
redirect(r'^projects/plugins/npruntime-sample-in-visual-studio\.html$',
'https://developer.mozilla.org/docs/'
'Compiling_The_npruntime_Sample_Plugin_in_Visual_Studio'),
redirect(r'^projects/plugins/npruntime\.html$',
'https://developer.mozilla.org/docs/Plugins/Guide/Scripting_plugins'),
redirect(r'^projects/plugins/plugin-host-control\.html$',
'https://developer.mozilla.org/docs/'
'Archive/Mozilla/ActiveX_Control_for_Hosting_Netscape_Plug-ins_in_IE'),
redirect(r'^projects/plugins/xembed-plugin-extension\.html$',
'https://developer.mozilla.org/Add-ons/Plugins/XEmbed_Extension_for_Mozilla_Plugins'),
redirect(r'^projects/netlib/http/http-debugging\.html$',
'https://developer.mozilla.org/docs/Mozilla/Debugging/HTTP_logging'),
redirect(r'^projects/netlib/integrated-auth\.html$',
'https://developer.mozilla.org/docs/Mozilla/Integrated_authentication'),
redirect(r'^projects/netlib/Link_Prefetching_FAQ\.html$',
'https://developer.mozilla.org/docs/Web/HTTP/Link_prefetching_FAQ'),
redirect(r'^projects/embedding/GRE\.html$',
'https://developer.mozilla.org/docs/Archive/Mozilla/GRE'),
redirect(r'^projects/embedding/windowAPIs\.html$',
'https://developer.mozilla.org/docs/Mozilla/Tech/Embedded_Dialog_API'),
redirect(r'^projects/embedding/howto/config\.html$',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Roll_your_own_browser'),
redirect(r'^projects/embedding/howto/Initializations\.html$',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Roll_your_own_browser'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasicsTOC\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#toc'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics2\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#Why_Gecko'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics3\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#What_You_Need_to_Embed'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics4\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Getting_the_Code'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics5\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Understanding_the_Coding_Environment'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics6\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XPCOM'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics7\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XPIDL'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics8\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#XPConnect_and_XPT_files'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics9\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#String_classes'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics10\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics#XUL.2FXBL'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics11\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Choosing_Additional_Functionalities'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics12\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#What_Gecko_Provides'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics13\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#What_You_Provide'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics14\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Common_Embedding_Tasks'),
redirect(r'^projects/embedding/embedoverview/EmbeddingBasics16\.html$',
'https://developer.mozilla.org/docs/Mozilla/Gecko/Gecko_Embedding_Basics'
'#Appendix:_Data_Flow_Inside_Gecko'),
redirect(r'^projects/embedding/examples/',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/Roll_your_own_browser'),
# Bug 1255882
redirect(r'^projects/bonecho/anti-phishing/?$',
'https://support.mozilla.org/kb/how-does-phishing-and-malware-protection-work'),
redirect(r'^projects/bonecho(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/bonsai(/.*)?$', 'https://wiki.mozilla.org/Bonsai'),
redirect(r'^projects/camino(/.*)?$', 'http://caminobrowser.org/'),
redirect(r'^projects/cck(/.*)?$', 'https://wiki.mozilla.org/CCK'),
redirect(r'^projects/chimera(/.*)?$', 'http://caminobrowser.org/'),
redirect(r'^projects/deerpark(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/embedding/faq\.html$',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla/FAQ/How_do_I...'),
redirect(r'^projects/embedding(/.*)?$',
'https://developer.mozilla.org/docs/Gecko/Embedding_Mozilla'),
redirect(r'^projects/granparadiso(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/inspector/faq\.html$',
'https://developer.mozilla.org/docs/Tools/Add-ons/DOM_Inspector/DOM_Inspector_FAQ'),
redirect(r'^projects/inspector(/.*)?$',
'https://developer.mozilla.org/docs/Tools/Add-ons/DOM_Inspector'),
redirect(r'^projects/javaconnect(/.*)?$',
'http://developer.mozilla.org/en/JavaXPCOM'),
redirect(r'^projects/minefield(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/minimo(/.*)?$', 'https://wiki.mozilla.org/Mobile'),
redirect(r'^projects/namoroka(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/nspr(?:/.*)?$', 'https://developer.mozilla.org/docs/NSPR'),
redirect(r'^projects/netlib(/.*)?$',
'https://developer.mozilla.org/docs/Mozilla/Projects/Necko'),
redirect(r'^projects/plugins(/.*)?$', 'https://developer.mozilla.org/Add-ons/Plugins'),
redirect(r'^projects/rt-messaging(/.*)?$', 'http://chatzilla.hacksrus.com/'),
redirect(r'^projects/shiretoko(/.*)?$', 'firefox.channel.desktop'),
redirect(r'^projects/string(/.*)?$',
'https://developer.mozilla.org/en/XPCOM_string_guide'),
redirect(r'^projects/tech-evangelism(/.*)?$',
'https://wiki.mozilla.org/Evangelism'),
redirect(r'^projects/venkman(/.*)?$',
'https://developer.mozilla.org/docs/Archive/Mozilla/Venkman'),
redirect(r'^projects/webservices/examples/babelfish-wsdl(/.*)?$',
'https://developer.mozilla.org/docs/SOAP_in_Gecko-based_Browsers'),
redirect(r'^projects/xbl(/.*)?$', 'https://developer.mozilla.org/docs/Mozilla/Tech/XBL'),
redirect(r'^projects/xforms(/.*)?$', 'https://developer.mozilla.org/docs/Archive/Web/XForms'),
redirect(r'^projects/xpcom(/.*)?$', 'https://developer.mozilla.org/docs/Mozilla/Tech/XPCOM'),
redirect(r'^projects/xpinstall(/.*)?$',
'https://developer.mozilla.org/docs/Archive/Mozilla/XPInstall'),
redirect(r'^projects/xslt(/.*)?$', 'https://developer.mozilla.org/docs/Web/XSLT'),
redirect(r'^projects/xul(/.*)?$', 'https://developer.mozilla.org/docs/Mozilla/Tech/XUL'),
redirect(r'^quality/help(/.*)?$', 'http://quality.mozilla.org/get-involved'),
redirect(r'^quality(/.*)?$', 'http://quality.mozilla.org/'),
# Bug 654614 /blocklist -> addons.m.o/blocked
redirect(r'^blocklist(/.*)?$', 'https://addons.mozilla.org/blocked/'),
redirect(r'^products/firebird/compare/?$', '/firefox/browsers/compare/'),
redirect(r'^products/firebird/?$', 'firefox'),
redirect(r'^products/firebird/download/$', 'firefox.new'),
redirect(r'^products/firefox/add-engines\.html$',
'https://addons.mozilla.org/search-engines.php'),
redirect(r'^products/firefox/all$', '/firefox/all/'),
redirect(r'^products/firefox/all\.html$', '/firefox/all/'),
redirect(r'^products/firefox/banners\.html$', '/contribute/friends/'),
redirect(r'^products/firefox/buttons\.html$', '/contribute/friends/'),
redirect(r'^products/firefox/download', 'firefox.new'),
redirect(r'^products/firefox/get$', 'firefox.new'),
redirect(r'^products/firefox/live-bookmarks', '/firefox/features/'),
redirect(r'^products/firefox/mirrors\.html$', 'http://www-archive.mozilla.org/mirrors.html'),
redirect(r'^products/firefox/releases/$', '/firefox/releases/'),
redirect(r'^products/firefox/releases/0\.9\.2\.html$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes'
'/en-US/firefox/releases/0.9.1.html'),
redirect(r'^products/firefox/releases/0\.10\.1\.html$',
'http://website-archive.mozilla.org/www.mozilla.org/firefox_releasenotes'
'/en-US/firefox/releases/0.10.html'),
redirect(r'^products/firefox/search', '/firefox/features/'),
redirect(r'^products/firefox/shelf\.html$', 'https://blog.mozilla.org/press/awards/'),
redirect(r'^products/firefox/smart-keywords\.html$',
'https://support.mozilla.org/en-US/kb/Smart+keywords'),
redirect(r'^products/firefox/support/$', 'https://support.mozilla.org/'),
redirect(r'^products/firefox/switch', 'firefox.new'),
redirect(r'^products/firefox/system-requirements', '/firefox/system-requirements/'),
redirect(r'^products/firefox/tabbed-browsing', 'firefox'),
redirect(r'^products/firefox/text-zoom\.html$',
'https://support.mozilla.org/kb/font-size-and-zoom-increase-size-of-web-pages'),
redirect(r'^products/firefox/themes$', 'https://addons.mozilla.org/themes/'),
redirect(r'^products/firefox/themes\.html$', 'https://addons.mozilla.org/themes/'),
redirect(r'^products/firefox/ui-customize\.html$',
'https://support.mozilla.org/kb/customize-firefox-controls-buttons-and-toolbars'),
redirect(r'^products/firefox/upgrade', 'firefox.new'),
redirect(r'^products/firefox/why/$', 'firefox'),
# bug 857246 redirect /products/firefox/start/ to start.mozilla.org
redirect(r'^products/firefox/start/?$', 'http://start.mozilla.org'),
# issue 9008
redirect(r'^products/firefox(/.*)?$', 'firefox.products.index'),
# bug 1260423
redirect(r'^firefox/choose/?$', 'firefox.new'),
# bug 1288552 - redirect /secondrun/ traffic from funnelcake test
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/secondrun(?:/.*)?',
'firefox.mobile.index', query=False),
# bug 1293539
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/tour/?$',
'https://support.mozilla.org/kb/get-started-firefox-overview-main-features'),
# bug 1295332
redirect(r'^hello/?$', 'https://support.mozilla.org/kb/hello-status'),
redirect(r'^firefox/hello/?$', 'https://support.mozilla.org/kb/hello-status'),
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/hello/start/?$', 'https://support.mozilla.org/kb/hello-status'),
# bug 1299947, 1326383
redirect(r'^firefox/channel/?$', firefox_channel(), cache_timeout=0),
# Bug 1277196
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/firstrun/learnmore/?$', 'firefox.features.index', query={
'utm_source': 'firefox-browser',
'utm_medium': 'firefox-browser',
'utm_campaign': 'redirect',
'utm_content': 'learnmore-tab',
}),
redirect(r'^firefox/windows-10/welcome/?$', 'https://support.mozilla.org/kb/how-change-your-default-browser-windows-10', query={
'utm_source': 'firefox-browser',
'utm_medium': 'firefox-browser',
'utm_campaign': 'redirect',
'utm_content': 'windows10-welcome-tab',
}),
# bug 1369732
redirect(r'^Firefox/?$', 'firefox'),
# bug 1370587
redirect(r'^firefox/sync/?', 'firefox.accounts'),
# bug 1386112
redirect(r'^firefox/android/faq/?', 'https://support.mozilla.org/products/mobile'),
# bug 1392796
redirect(r'^firefox/desktop/fast/?', 'firefox.features.fast'),
redirect(r'^firefox/desktop/trust/?', 'firefox.features.independent'),
redirect(r'^firefox/desktop/tips/?', 'firefox.features.index'),
redirect(r'^firefox/desktop/customize/?', 'https://support.mozilla.org/kb/customize-firefox-controls-buttons-and-toolbars'),
redirect(r'^firefox/private-browsing/?', 'firefox.features.private-browsing'),
# bug 1405436
redirect(r'^firefox/organic', '/firefox/'),
redirect(r'^firefox/landing/better', '/firefox/'),
redirect(r'^firefox/(new/)?addon', 'https://addons.mozilla.org'),
redirect(r'^firefox/tips', '/firefox/features/'),
redirect(r'^firefox/new/.+', '/firefox/new/'),
redirect(r'^firefox/38\.0\.3/releasenotes/$', '/firefox/38.0.5/releasenotes/'),
redirect(r'^firefox/default\.htm', '/firefox/'),
redirect(r'^firefox/android/(?P<version>\d+\.\d+(?:\.\d+)?)$', '/firefox/android/{version}/releasenotes/'),
redirect(r'^firefox/stats/', '/firefox/'),
# bug 1416706
redirect(r'^firefox/desktop/?', 'firefox.new'),
# bug 1418500
redirect(r'^firefox/android/?$', 'firefox.mobile.index'),
redirect(r'^firefox/focus/?$', 'firefox.mobile.index'),
redirect(r'^firefox/ios/?$', 'firefox.mobile.index'),
# bug 1416708
redirect(r'^firefox/quantum/?', 'firefox'),
# bug 1421584, issue 7491
redirect(r'^firefox/organizations/faq/?$', 'firefox.enterprise.index'),
# bug 1425865 - Amazon Fire TV goes to SUMO until we have a product page.
redirect(
r'^firefox/fire-tv/?$',
'https://support.mozilla.org/products/firefox-fire-tv/',
permanent=False,
),
# bug 1430894
redirect(r'^firefox/interest-dashboard/?', 'https://support.mozilla.org/kb/firefox-add-technology-modernizing'),
# bug 1419244
redirect(r'^firefox/mobile-download(/.*)?', 'firefox.mobile.index'),
# bug 960651, 1436973
redirect(r'(firefox|mobile)/([^/]+)/details(/|/.+\.html)?$', 'firefox.unsupported-systems',
locale_prefix=False),
redirect(r'^firefox/unsupported/', 'firefox.unsupported-systems'),
# bug 1428783
redirect(r'^firefox/dnt/?$', 'https://support.mozilla.org/kb/how-do-i-turn-do-not-track-feature'),
# issue 6209
redirect(r'^pocket/?', '/firefox/pocket/'),
# issue 6186
redirect(r'^vote/?', '/firefox/election/'),
# fxa
redirect(r'^firefox/accounts/features/?', 'firefox.accounts'),
redirect(r'^firefox/features/sync/?', 'firefox.accounts'),
# bug 1577449
redirect(r'^firefox/features/send-tabs/?', 'https://support.mozilla.org/kb/send-tab-firefox-desktop-other-devices'),
# issue 6512
redirect(r'^firefox/firefox\.html$', 'firefox.new'),
# issue 6979
redirect(r'^firefoxfightsforyou/?', 'firefox'),
# issue 7210
redirect(r'^firefox/account/?$', 'firefox.accounts'),
# issue 7287
redirect(r'^accounts/?$', 'firefox.accounts'),
# issue 7436
redirect(r'^firefox/feedback/?$', 'https://support.mozilla.org/questions/new/desktop'),
# issue 7491
redirect(r'^firefox/organizations/?$', 'firefox.enterprise.index'),
# issue 7670
redirect(r'^/firefox/fights-for-you/?', 'firefox'),
# issue #7424
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/content-blocking/start/?$', 'https://support.mozilla.org/kb/content-blocking'),
# issue #7424
redirect(r'^firefox(?:\/\d+\.\d+(?:\.\d+)?(?:a\d+)?)?/tracking-protection/start/?$', 'https://support.mozilla.org/kb/tracking-protection'),
# issue 8596
redirect(r'firefox/xr/?$', 'https://support.mozilla.org/kb/webxr-permission-info-page'),
# issue 8419
redirect(r'firefox/this-browser-comes-highly-recommended/?$', 'firefox.developer.index'),
# issue 8420
redirect(r'firefox/dedicated-profiles/?$', 'https://support.mozilla.org/kb/dedicated-profiles-firefox-installation'),
# issue 8641
redirect(r'^/firefox/windows-64-bit/?$', 'firefox.browsers.windows-64-bit'),
redirect(r'^/firefox/best-browser/?$', 'firefox.browsers.best-browser'),
)
| hoosteeno/bedrock | bedrock/firefox/redirects.py | Python | mpl-2.0 | 31,967 |
# Copyright 2014-2020 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Ray transforms."""
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import numpy as np
from odl.discr import DiscretizedSpace
from odl.operator import Operator
from odl.space.weighting import ConstWeighting
from odl.tomo.backends import (
ASTRA_AVAILABLE, ASTRA_CUDA_AVAILABLE, SKIMAGE_AVAILABLE)
from odl.tomo.backends.astra_cpu import AstraCpuImpl
from odl.tomo.backends.astra_cuda import AstraCudaImpl
from odl.tomo.backends.skimage_radon import SkImageImpl
from odl.tomo.geometry import Geometry
from odl.util import is_string
# RAY_TRAFO_IMPLS are used by `RayTransform` when no `impl` is given.
# The last inserted implementation has highest priority.
RAY_TRAFO_IMPLS = OrderedDict()
if SKIMAGE_AVAILABLE:
RAY_TRAFO_IMPLS['skimage'] = SkImageImpl
if ASTRA_AVAILABLE:
RAY_TRAFO_IMPLS['astra_cpu'] = AstraCpuImpl
if ASTRA_CUDA_AVAILABLE:
RAY_TRAFO_IMPLS['astra_cuda'] = AstraCudaImpl
__all__ = ('RayTransform',)
class RayTransform(Operator):
"""Linear X-Ray (Radon) transform operator between L^p spaces."""
def __init__(self, vol_space, geometry, **kwargs):
"""Initialize a new instance.
Parameters
----------
vol_space : `DiscretizedSpace`
Discretized reconstruction space, the domain of the forward
operator or the range of the adjoint (back-projection).
geometry : `Geometry`
Geometry of the transform that contains information about
the data structure.
Other Parameters
----------------
impl : {`None`, 'astra_cuda', 'astra_cpu', 'skimage'}, optional
Implementation back-end for the transform. Supported back-ends:
- ``'astra_cuda'``: ASTRA toolbox, using CUDA, 2D or 3D
- ``'astra_cpu'``: ASTRA toolbox using CPU, only 2D
- ``'skimage'``: scikit-image, only 2D parallel with square
reconstruction space.
For the default ``None``, the fastest available back-end is
used.
proj_space : `DiscretizedSpace`, optional
Discretized projection (sinogram) space, the range of the forward
operator or the domain of the adjoint (back-projection).
Default: Inferred from parameters.
use_cache : bool, optional
If ``True``, data is cached. This gives a significant speed-up
at the expense of a notable memory overhead, both on the GPU
and on the CPU, since a full volume and a projection dataset
are stored. That may be prohibitive in 3D.
Default: True
kwargs
Further keyword arguments passed to the projector backend.
Notes
-----
The ASTRA backend is faster if data are given with
``dtype='float32'`` and storage order 'C'. Otherwise copies will be
needed.
"""
if not isinstance(vol_space, DiscretizedSpace):
raise TypeError(
'`vol_space` must be a `DiscretizedSpace` instance, got '
'{!r}'.format(vol_space))
if not isinstance(geometry, Geometry):
raise TypeError(
'`geometry` must be a `Geometry` instance, got {!r}'
''.format(geometry)
)
# Generate or check projection space
proj_space = kwargs.pop('proj_space', None)
if proj_space is None:
dtype = vol_space.dtype
if not vol_space.is_weighted:
weighting = None
elif (
isinstance(vol_space.weighting, ConstWeighting)
and np.isclose(
vol_space.weighting.const, vol_space.cell_volume
)
):
# Approximate cell volume
# TODO: find a way to treat angles and detector differently
# regarding weighting. While the detector should be uniformly
# discretized, the angles do not have to and often are not.
# The needed partition property is available since
# commit a551190d, but weighting is not adapted yet.
# See also issue #286
extent = float(geometry.partition.extent.prod())
size = float(geometry.partition.size)
weighting = extent / size
else:
raise NotImplementedError('unknown weighting of domain')
proj_tspace = vol_space.tspace_type(
geometry.partition.shape,
weighting=weighting,
dtype=dtype,
)
if geometry.motion_partition.ndim == 0:
angle_labels = []
elif geometry.motion_partition.ndim == 1:
angle_labels = ['$\\varphi$']
elif geometry.motion_partition.ndim == 2:
# TODO: check order
angle_labels = ['$\\vartheta$', '$\\varphi$']
elif geometry.motion_partition.ndim == 3:
# TODO: check order
angle_labels = ['$\\vartheta$', '$\\varphi$', '$\\psi$']
else:
angle_labels = None
if geometry.det_partition.ndim == 1:
det_labels = ['$s$']
elif geometry.det_partition.ndim == 2:
det_labels = ['$u$', '$v$']
else:
det_labels = None
if angle_labels is None or det_labels is None:
# Fallback for unknown configuration
axis_labels = None
else:
axis_labels = angle_labels + det_labels
proj_space = DiscretizedSpace(
geometry.partition,
proj_tspace,
axis_labels=axis_labels
)
else:
# proj_space was given, checking some stuff
if not isinstance(proj_space, DiscretizedSpace):
raise TypeError(
'`proj_space` must be a `DiscretizedSpace` instance, '
'got {!r}'.format(proj_space)
)
if proj_space.shape != geometry.partition.shape:
raise ValueError(
'`proj_space.shape` not equal to `geometry.shape`: '
'{} != {}'
''.format(proj_space.shape, geometry.partition.shape)
)
if proj_space.dtype != vol_space.dtype:
raise ValueError(
'`proj_space.dtype` not equal to `vol_space.dtype`: '
'{} != {}'.format(proj_space.dtype, vol_space.dtype)
)
if vol_space.ndim != geometry.ndim:
raise ValueError(
'`vol_space.ndim` not equal to `geometry.ndim`: '
'{} != {}'.format(vol_space.ndim, geometry.ndim)
)
# Cache for input/output arrays of transforms
self.use_cache = kwargs.pop('use_cache', True)
# Check `impl`
impl = kwargs.pop('impl', None)
impl_type, self.__cached_impl = self._initialize_impl(impl)
self._impl_type = impl_type
if is_string(impl):
self.__impl = impl.lower()
else:
self.__impl = impl_type.__name__
self._geometry = geometry
# Reserve name for cached properties (used for efficiency reasons)
self._adjoint = None
# Extra kwargs that can be reused for adjoint etc. These must
# be retrieved with `get` instead of `pop` above.
self._extra_kwargs = kwargs
# Finally, initialize the Operator structure
super(RayTransform, self).__init__(
domain=vol_space, range=proj_space, linear=True
)
@staticmethod
def _initialize_impl(impl):
"""Internal method to verify the validity of the `impl` kwarg."""
impl_instance = None
if impl is None: # User didn't specify a backend
if not RAY_TRAFO_IMPLS:
raise RuntimeError(
'No `RayTransform` back-end available; this requires '
'3rd party packages, please check the install docs.'
)
# Select fastest available
impl_type = next(reversed(RAY_TRAFO_IMPLS.values()))
else:
# User did specify `impl`
if is_string(impl):
if impl.lower() not in RAY_TRAFO_IMPLS.keys():
raise ValueError(
'The {!r} `impl` is not found. This `impl` is either '
'not supported, it may be misspelled, or external '
'packages required are not available. Consult '
'`RAY_TRAFO_IMPLS` to find the run-time available '
'implementations.'.format(impl)
)
impl_type = RAY_TRAFO_IMPLS[impl.lower()]
elif isinstance(impl, type) or isinstance(impl, object):
# User gave the type and leaves instantiation to us
forward = getattr(impl, "call_forward", None)
backward = getattr(impl, "call_backward", None)
if not callable(forward) and not callable(backward):
raise TypeError(
'Type {!r} must have a `call_forward()` '
'and/or `call_backward()`.'.format(impl)
)
if isinstance(impl, type):
impl_type = impl
else:
# User gave an object for `impl`, meaning to set the
# backend cache to an already initiated object
impl_type = type(impl)
impl_instance = impl
else:
raise TypeError(
'`impl` {!r} should be a string, or an object or type '
'having a `call_forward()` and/or `call_backward()`. '
''.format(type(impl))
)
return impl_type, impl_instance
@property
def impl(self):
"""Implementation name string.
If a custom ``impl`` was provided this method returns a ``str``
of the type."""
return self.__impl
def get_impl(self, use_cache=True):
"""Fetches or instantiates implementation backend for evaluation.
Parameters
----------
bool : use_cache
If ``True`` returns the cached implementation backend, if it
was generated in a previous call (or given with ``__init__``).
If ``False`` a new instance of the backend will be generated,
freeing up GPU memory and RAM used by the backend.
"""
# Use impl creation (__cached_impl) when `use_cache` is True
if not use_cache or self.__cached_impl is None:
# Lazily (re)instantiate the backend
self.__cached_impl = self._impl_type(
self.geometry,
vol_space=self.domain,
proj_space=self.range)
return self.__cached_impl
def _call(self, x, out=None, **kwargs):
"""Forward projection.
Parameters
----------
x : DiscreteLpElement
A volume. Must be an element of `RayTransform.domain`.
out : `RayTransform.range` element, optional
Element to which the result of the operator evaluation is written.
**kwargs
Extra keyword arguments, passed on to the implementation
backend.
Returns
-------
DiscreteLpElement
Result of the transform, an element of the range.
"""
return self.get_impl(self.use_cache).call_forward(x, out, **kwargs)
@property
def geometry(self):
return self._geometry
@property
def adjoint(self):
"""Adjoint of this operator.
The adjoint of the `RayTransform` is the linear `RayBackProjection`
operator. It uses the same geometry and shares the implementation
backend whenever `RayTransform.use_cache` is `True`.
Returns
-------
adjoint : `RayBackProjection`
"""
if self._adjoint is None:
# bring `self` into scope to prevent shadowing in inline class
ray_trafo = self
class RayBackProjection(Operator):
"""Adjoint of the discrete Ray transform between L^p spaces."""
def _call(self, x, out=None, **kwargs):
"""Backprojection.
Parameters
----------
x : DiscreteLpElement
A sinogram. Must be an element of
`RayTransform.range` (domain of `RayBackProjection`).
out : `RayBackProjection.domain` element, optional
A volume to which the result of this evaluation is
written.
**kwargs
Extra keyword arguments, passed on to the
implementation backend.
Returns
-------
DiscreteLpElement
Result of the transform in the domain
of `RayProjection`.
"""
return ray_trafo.get_impl(
ray_trafo.use_cache
).call_backward(x, out, **kwargs)
@property
def geometry(self):
return ray_trafo.geometry
@property
def adjoint(self):
return ray_trafo
kwargs = self._extra_kwargs.copy()
kwargs['domain'] = self.range
self._adjoint = RayBackProjection(
range=self.domain, linear=True, **kwargs
)
return self._adjoint
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| kohr-h/odl | odl/tomo/operators/ray_trafo.py | Python | mpl-2.0 | 14,368 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function, unicode_literals
import argparse
import conditions
import os
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
@CommandProvider
class Sync(object):
"""Interface for sync related commands."""
def __init__(self, context):
self.b2g_home = context.b2g_home
@Command('sync', category='devenv',
conditions=[conditions.is_configured],
description='Sync repositories.')
@CommandArgument('args', nargs=argparse.REMAINDER,
help='Run |mach sync -help| to see full arguments.')
def sync(self, args):
command = [os.path.join(self.b2g_home, 'repo'), 'sync']
command.extend(args)
env = os.environ.copy()
from mozprocess import ProcessHandler
p = ProcessHandler(command, env=env)
p.run()
return p.wait()
@Command('config', category='devenv',
conditions=[],
description='Configure source tree for target device.')
@CommandArgument('device', action='store', nargs='?', default=None,
help='Device to configure.')
@CommandArgument('--branch', action='store',
help='Branch to configure, defaults to master.')
def config(self, device=None, branch=None):
command = [os.path.join(self.b2g_home, 'config.sh')]
env = os.environ.copy()
if branch:
env['BRANCH'] = branch
if device:
command.append(device)
from mozprocess import ProcessHandler
p = ProcessHandler(command, env=env)
p.run()
return p.wait()
| ahal/b2g-commands | b2gcommands/sync_commands.py | Python | mpl-2.0 | 1,820 |
#!/usr/local/bin/python2.6
# hasportal2launchedyet.com
# this script by @lukegb - Luke Granger-Brown
#
# I hereby license this script under the GNU Affero GPLv3
# and ask that any changes you make for the betterment of this script
# be contributed back to me. :)
#
#####################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
##
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
######################################
import urllib2
import datetime, texttime, time
import json
print "Beginning run... %s" % (str(datetime.datetime.fromtimestamp(time.time())))
texttime.LANG = "en"
def format_time(timein):
datet = datetime.datetime.fromtimestamp(timein)
if datet.day != 0:
day = (datet.day - 1) * 24
hour = datet.hour + day
return ("%s:%s" % (hour, datet.strftime("%M:%S")))
if datet.hour != 0:
return datet.strftime("%H:%M:%S")
elif datet.hour == 0 and datet.minute != 0:
return datet.strftime("%M:%S")
return datet.strftime("%S")
store = urllib2.urlopen("http://store.steampowered.com/app/620/").read()
if 'game_area_comingsoon' in store or 'will unlock in' in store:
launched = False
else:
launched = True
base_uri = "http://www.aperturescience.com/glados@home/"
base_get = urllib2.urlopen(base_uri)
#focus = 15540
focus_f = open('/usr/home/lukegb/current_focus.txt', 'r').read()
focus = int(focus_f)
games = {}
gamemap = {
18500: ['Defense Grid: The Awakening', 'http://media.steampowered.com/steamcommunity/public/images/apps/18500/57c5924a97a1c43971acc05590952eddec21c313.jpg'],
1250: ['Killing Floor', 'http://media.steampowered.com/steamcommunity/public/images/apps/1250/d8a2d777cb4c59cf06aa244166db232336520547.jpg'],
26500: ['Cogs', 'http://media.steampowered.com/steamcommunity/public/images/apps/26500/79586b14e3c64d447a3dbb6e18369636b9b5dfb0.jpg'],
38720: ['Rush', 'http://media.steampowered.com/steamcommunity/public/images/apps/38720/734f6b5196a95c73e69e0525ea3e64e90a12fc93.jpg'],
38700: ['Toki Tori', 'http://media.steampowered.com/steamcommunity/public/images/apps/38700/71adfce6503f6a73c094d0dbab17aaa719691d95.jpg'],
63700: ['Bit.Trip Beat', 'http://media.steampowered.com/steamcommunity/public/images/apps/63700/ce2a101a0d36649d06f26b2fd91dfc81a752b3d3.jpg'],
15540: ['1... 2... 3... Kick It!', 'http://media.steampowered.com/steamcommunity/public/images/apps/15540/f6216bea0eb2b435d5a7f06e899f5f0d7df870cd.jpg'],
12900: ['Audiosurf', 'http://media.steampowered.com/steamcommunity/public/images/apps/12900/ae6d0ac6d1dd5b23b961d9f32ea5a6c8d0305cf4.jpg'],
15500: ['The Wonderful End of the World', 'http://media.steampowered.com/steamcommunity/public/images/apps/15500/6af554955e5de9fb0ec16926dc6d11f036ee8e4e.jpg'],
40800: ['Super Meat Boy', 'http://media.steampowered.com/steamcommunity/public/images/apps/40800/64eec20c9375e7473b964f0d0bc41d19f03add3b.jpg'],
57300: ['Amnesia: The Dark Descent', 'http://media.steampowered.com/steamcommunity/public/images/apps/57300/2c08de657a8b273eeb55bb5bf674605ca023e381.jpg'],
15520: ['AaAaAA!!! - A Reckless Disregard for Gravity', 'http://media.steampowered.com/steamcommunity/public/images/apps/15520/fb8827ccf85cf95226b06a661f965885fc2ebd42.jpg'],
35460: ['The Ball', 'http://media.steampowered.com/steamcommunity/public/images/apps/35460/e5d3b3d775d6b60b8e4a5cc3bb5871dc6e57c244.jpg']
}
for game in gamemap.keys():
gamemap[game].append(0)
# weightings
gamemap[18500][2] = 450000
gamemap[1250][2] = 1200000
gamemap[26500][2] = 65000
gamemap[38720][2] = 50000
gamemap[38700][2] = 50000
gamemap[63700][2] = 60000
gamemap[15540][2] = 50000
gamemap[12900][2] = 300000
gamemap[15500][2] = 30000
gamemap[40800][2] = 350000
gamemap[57300][2] = 300000
gamemap[15520][2] = 60000
gamemap[35460][2] = 120000
keepgoing = True
maxgamewidth = 459.0
hitcontent = pcnext = False
while keepgoing:
line = base_get.readline()
if 'id="content"' not in line and not hitcontent:
continue
hitcontent = True
if 'id="overall_progress_bar' in line:
progline = line
if 'id="game_row_' in line:
gamerowd = line[ line.find("game_row_") + len("game_row_") : line.find('">') ]
games[gamerowd] = {'progress': -1, 'cpus': -1}
if 'game_progress' in line:
if 'complete' in line:
games[gamerowd]['progress'] = '100'
games[gamerowd]['bar'] = 459.0
else:
games[gamerowd]['progress'] = str(round(round(int(line[ line.find("width: ") + len("width: ") : line.find("px;") ]) / maxgamewidth * 100, 2), 2))
games[gamerowd]['bar'] = float(line[ line.find("width: ") + len("width: ") : line.find("px;") ])
if 'game_cpus' in line:
games[gamerowd]['cpus'] = -2
if 'COMPLETE' not in line:
tmpcpu = line[ line.find('">') + 2 : line.find(" CURRENT CPUS") ]
tmpcpu = int(tmpcpu.replace(',', ''))
games[gamerowd]['cpus'] = tmpcpu
if 'g_originalEstimate = ' in line:
origtimate = float(line[ line.find(' = ') + len(' = ') : line.find(' + ') ]) + time.time()
print "O"
print line[ line.find(' = ') + len(' = ') : line.find(' + ') ]
print datetime.datetime.fromtimestamp(origtimate).isoformat()
if 'g_updatedEstimate = ' in line:
updatimate = float(line[ line.find(' = ') + len(' = ') : line.find(' + ') ]) + time.time()
print "U"
print line[ line.find(' = ') + len(' = ') : line.find(' + ') ]
print datetime.datetime.fromtimestamp(updatimate).isoformat()
if pcnext:
pcnext = False
potatoesstart = line.find('none;">') + 7
potatoesend = line.find('</', potatoesstart)
potatoes = int(line[ potatoesstart:potatoesend ].replace(',',''))
if 'potato_count' in line:
pcnext = True
if '</body>' in line:
keepgoing = False
import time
unixtimestart = 1302883200
unixtimenow = int(time.time())
secondsgone = unixtimenow - unixtimestart
fh = open('/usr/home/lukegb/percentps.json', 'r')
percentps = json.load(fh)
fh.close()
timelefttotal = 0
playingtotal = 0
for game in games.keys():
gamen = int(game)
games[game]['name'] = gamemap[gamen][0]
games[game]['img'] = gamemap[gamen][1]
#games[game]['incre'] = round(float(games[game]['progress']) / float(secondsgone / 60 / 60), 3)
games[game]['incre'] = round(percentps[str(game)]['tenmin'], 3)
games[game]['incre_multi'] = percentps[str(game)]
if float(games[game]['progress']) < 100:
#owners = round(459.0 / int(games[game]['bar']) * games[game]['cpus'])
#timeq = gamemap[int(game)][2]
#playing = int(games[game]['cpus'])
#eta = timeq - ((round(playing * 100.0 / owners) / 100.0) * 1.0 * timeq)
#timelefttotal = timelefttotal + eta
#eta = (eta / playing) / (potatoes / 100000.0)
#timetogogame = eta * 60 * 60
timetogogame = ((100.0 - float(games[game]['progress'])) / games[game]['incre']) * 60 * 60
# timetogogame = int(secondsgone * 100 / float(games[game]['progress']))
games[game]['eta'] = timetogogame
estimatorgame = format_time(timetogogame)
games[game]['estim'] = estimatorgame
playingtotal = playingtotal + games[game]['cpus']
else:
games[game]['eta'] = -3
games[game]['estim'] = 'COMPLETE'
gamebara = "<tr>"
gamebarb = "</tr><tr>"
gamedata = {}
for game in games.keys():
widthbar = round(float(games[game]['progress']) / 100.0 * 32)
gamebita = "<thCFCLASS id='game-GAMEID'><a href='http://store.steampowered.com/app/GAMEID'><div style='display: inline-block'><div class='gamebox-top' style='background-image: url(BGIMG); height: PHEIGHTpx' title='TOOLTIP'></div><div class='gamebox-bot' style='background-image: url(BGIMG); height: TDHEIGHTpx;' title='TOOLTIP'></div></div></a></th>"
gamebitb = "<tdCFCLASS id='game-top-GAMEID'>AMIFOCUS</td>"
if games[game]['estim'] != 'COMPLETE':
tewltip = "<u>%s</u><br />%s%% (%s%%/hour)<br />Time left: %s" % (games[game]['name'], games[game]['progress'], games[game]['incre'], games[game]['estim'])
else:
tewltip = "<u>%s</u><br />100%%<br /><br />Yes, that means play one of the other games." % (games[game]['name'])
if focus == int(game):
tewltip = "%s<br /><br /><em>CURRENT FOCUS</em>" % (tewltip,)
gamebitb = gamebitb.replace('AMIFOCUS', "<a href='http://store.steampowered.com/app/GAMEID'><b>^ PLAY ME</b></a>")
gamebita = gamebita.replace('CFCLASS', " class='current-focus'")
gamebitb = gamebitb.replace('CFCLASS', " class='current-focus'")
gamebita = gamebita.replace('TOOLTIP', tewltip)
gamebitb = gamebitb.replace('TOOLTIP', tewltip)
gamebitb = gamebitb.replace('AMIFOCUS', '')
gamebita = gamebita.replace('CFCLASS', '')
gamebitb = gamebitb.replace('CFCLASS', '')
gamebita = gamebita.replace('GAMEID', game)
gamebitb = gamebitb.replace('GAMEID', game)
gamebita = gamebita.replace('BGIMG', games[game]['img'])
gamebita = gamebita.replace('PHEIGHT', str(widthbar))
gamebita = gamebita.replace('TDHEIGHT', str(round(32 - widthbar)))
gamebara = "%s%s" % (gamebara, gamebita)
gamebarb = "%s%s" % (gamebarb, gamebitb)
gamedata[games[game]['name']] = {
'progress': games[game]['progress'],
'image': games[game]['img'],
'height': str(widthbar),
'id': game,
'isfocus': (focus == int(game)),
'complete': (games[game]['estim'] == 'COMPLETE'),
'timeleft': games[game]['estim'],
'percentperhour': games[game]['incre'],
'percentperhour_multi': games[game]['incre_multi']
}
gamebar = "<table>%s%s</tr></table>" % (gamebara,gamebarb)
line = progline
if not launched:
width = line.find(': ')+2
endwidth = line.find('px;">')
maxwidth = 494.0
percent = round(int(line[width:endwidth]) / maxwidth * 100, 3)
else:
percent = 100.0
ratimator = round(percent / float(secondsgone / 60 / 60), 2)
logopacity = int(round(percent / 100.0, 4) * 254)
if not launched:
template = open('/usr/local/www/nginx/template.beta.html', 'r')
else:
template = open('/usr/local/www/nginx/launched.beta.html', 'r')
templat = template.read()
#timetogo = secondsgone * 100 / percent
timetogo = updatimate - time.time()
#print timelefttotal
#print playingtotal
#print potatoes
updatimate = time.time() + timetogo
estimator = format_time(timetogo)
# NOTE: only used for comparison below. Overwritten by Djinni timer
timetogo_pred = (timelefttotal / float(playingtotal)) / (float(potatoes) / 100000.0) * 3600 * 1.0
updatimate_pred = time.time() + timetogo_pred
estimator_pred = format_time(timetogo_pred)
# dinnerbone start
gdin = games.keys()
gdin = sorted(gdin, key=lambda inp: games[inp]['eta'])
timetogodin = timetogo
knockoff = 55 * 60
for gamk in gdin:
if games[gamk]['eta'] == -3:
continue
if games[gamk]['eta'] > timetogo:
continue
timetogodin = timetogodin - knockoff
# find min
mindiff = 9999999999999
for gamk in gdin:
if games[gamk]['eta'] < mindiff and games[gamk]['eta'] > 0:
mindiff = games[gamk]['eta']
print "md:"
print mindiff
if timetogodin < mindiff:
timetogodin = mindiff
# print "Knocking off %s min" % str(round(knockoff / 60.0, 1))
print "GLaDOS: %s" % (estimator,)
#print "JS Predictor: %s" % (estimator_pred,)
print "Djinni: %s" % format_time(timetogodin)
# use djinni for predictor
timetogo_pred = timetogodin
updatimate_pred = time.time() + timetogodin
estimator_pred = format_time(timetogo_pred)
#dinnerbone end
standard_release = datetime.datetime.fromtimestamp(origtimate)
#new_release = datetime.datetime.fromtimestamp(unixtimenow + timetogo)
new_release = datetime.datetime.fromtimestamp(updatimate)
new_release_pred = datetime.datetime.fromtimestamp(updatimate_pred)
timefromrelease = standard_release - new_release
timefromrelease_pred = standard_release - new_release_pred
timeahead = texttime.stringify(timefromrelease)
timeahead_pred = texttime.stringify(timefromrelease_pred)
output = templat.replace('PERCENTAGE', str(percent))
output = output.replace('RATE', str(ratimator))
# GLaDOS
output = output.replace('ESTIMATOR', str(estimator))
output = output.replace('ENDPOINT', str(timetogo))
output = output.replace('HOURSAHEAD', str(timeahead))
# ME
output = output.replace('ESTIMAT_PRED', str(estimator_pred))
output = output.replace('END_PRED', str(timetogo_pred))
output = output.replace('HOURS_PRED', str(timeahead_pred))
# GENERAL
output = output.replace('LOGOPACITY', str(logopacity))
output = output.replace('GAMEBAR', str(gamebar))
output = output.replace('BETALINK', 'beta.')
output = output.replace('LASTUPDATE', str(datetime.datetime.fromtimestamp(time.time()).isoformat()))
import os, hashlib
h = hashlib.md5()
h.update(str(os.path.getmtime('/usr/local/www/nginx/template.beta.html')))
refvalue = h.hexdigest()
if launched:
refvalue = 'gladosisgo'
output = output.replace('LASTGEN', refvalue)
output_dark = output_light = output
output_dark = output_dark.replace('BGWHAT', '#000')
output_dark = output_dark.replace('SHOWWHAT', '#fff')
output_light = output_light.replace('BGWHAT', '#eee')
output_light = output_light.replace('SHOWWHAT', '#000')
output_light = output_light.replace('</head>', '<link href="http://hasportal2launchedyet.com/lighter.css" rel="stylesheet" type="text/css" media="screen" /></head>')
outputhndl = open('/usr/local/www/nginx/inside.beta.html', 'w')
outputhndl.write(output_dark)
outputhndl.close()
outputhndl = open('/usr/local/www/nginx/index.beta.html', 'w')
outputhndl.write(output_dark)
outputhndl.close()
outputhndl = open('/usr/local/www/nginx/lighter_inside.beta.html', 'w')
outputhndl.write(output_light)
outputhndl.close()
outputhndl = open('/usr/local/www/nginx/lighter.beta.html', 'w')
outputhndl.write(output_light)
outputhndl.close()
checkstr = '%set%s.' % ('b', 'a')
if 'beta.' != checkstr:
print "Main files written - writing JSON"
jsonout = {
"glados": {
"endpoint": timetogo,
"ahead": timeahead,
},
"estimate": {
"endpoint": timetogo_pred,
"ahead": timeahead_pred,
},
"logowidth": "%dpx" % (logopacity,),
"gamebar": gamebar,
"gamedata": gamedata,
"lastupdate": str(datetime.datetime.fromtimestamp(time.time()).isoformat()),
"overall": percent,
"percentperhour": ratimator,
"potatoes": potatoes,
"refreshvalue": refvalue
}
outputhndl = open('/usr/local/www/nginx/data.json', 'w')
json.dump(jsonout, outputhndl)
outputhndl.close()
print "Ending run... %s" % (str(datetime.datetime.fromtimestamp(time.time())))
| lukegb/HasPortal2LaunchedYet.com | dev.py | Python | agpl-3.0 | 15,265 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2013 Christophe Benz, Romain Bignon, Julien Hebert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import warnings
import datetime
import re
from decimal import Decimal
from copy import deepcopy, copy
from weboob.tools.misc import to_unicode
from weboob.tools.date import new_date, new_datetime
from weboob.tools.ordereddict import OrderedDict
__all__ = ['UserError', 'FieldNotFound', 'NotAvailable',
'NotLoaded', 'IBaseCap', 'Field', 'IntField', 'DecimalField',
'FloatField', 'StringField', 'BytesField', 'DateField',
'DeltaField', 'empty', 'CapBaseObject']
def empty(value):
"""
Checks if a value is empty (None, NotLoaded or NotAvailable).
:rtype: :class:`bool`
"""
for cls in (None, NotLoaded, NotAvailable):
if value is cls:
return True
return False
class UserError(Exception):
"""
Exception containing an error message for user.
"""
class FieldNotFound(Exception):
"""
A field isn't found.
:param obj: object
:type obj: :class:`CapBaseObject`
:param field: field not found
:type field: :class:`Field`
"""
def __init__(self, obj, field):
Exception.__init__(self,
u'Field "%s" not found for object %s' % (field, obj))
class ConversionWarning(UserWarning):
"""
A field's type was changed when setting it.
Ideally, the module should use the right type before setting it.
"""
pass
class AttributeCreationWarning(UserWarning):
"""
A non-field attribute has been created with a name not
prefixed with a _.
"""
class NotAvailableMeta(type):
def __str__(self):
return unicode(self).decode('utf-8')
def __unicode__(self):
return u'Not available'
def __nonzero__(self):
return False
class NotAvailable(object):
"""
Constant to use on non available fields.
"""
__metaclass__ = NotAvailableMeta
class NotLoadedMeta(type):
def __str__(self):
return unicode(self).decode('utf-8')
def __unicode__(self):
return u'Not loaded'
def __nonzero__(self):
return False
class NotLoaded(object):
"""
Constant to use on not loaded fields.
When you use :func:`weboob.tools.backend.BaseBackend.fillobj` on a object based on :class:`CapBaseObject`,
it will request all fields with this value.
"""
__metaclass__ = NotLoadedMeta
class IBaseCap(object):
"""
This is the base class for all capabilities.
A capability may define abstract methods (which raise :class:`NotImplementedError`)
with an explicit docstring to tell backends how to implement them.
Also, it may define some *objects*, using :class:`CapBaseObject`.
"""
class Field(object):
"""
Field of a :class:`CapBaseObject` class.
:param doc: docstring of the field
:type doc: :class:`str`
:param args: list of types accepted
:param default: default value of this field. If not specified, :class:`NotLoaded` is used.
"""
_creation_counter = 0
def __init__(self, doc, *args, **kwargs):
self.types = ()
self.value = kwargs.get('default', NotLoaded)
self.doc = doc
for arg in args:
if isinstance(arg, type):
self.types += (arg,)
else:
raise TypeError('Arguments must be types')
self._creation_counter = Field._creation_counter
Field._creation_counter += 1
def convert(self, value):
"""
Convert value to the wanted one.
"""
return value
class IntField(Field):
"""
A field which accepts only :class:`int` and :class:`long` types.
"""
def __init__(self, doc, **kwargs):
Field.__init__(self, doc, int, long, **kwargs)
def convert(self, value):
return int(value)
class DecimalField(Field):
"""
A field which accepts only :class:`decimal` type.
"""
def __init__(self, doc, **kwargs):
Field.__init__(self, doc, Decimal, **kwargs)
def convert(self, value):
if isinstance(value, Decimal):
return value
return Decimal(value)
class FloatField(Field):
"""
A field which accepts only :class:`float` type.
"""
def __init__(self, doc, **kwargs):
Field.__init__(self, doc, float, **kwargs)
def convert(self, value):
return float(value)
class StringField(Field):
"""
A field which accepts only :class:`unicode` strings.
"""
def __init__(self, doc, **kwargs):
Field.__init__(self, doc, unicode, **kwargs)
def convert(self, value):
return to_unicode(value)
class BytesField(Field):
"""
A field which accepts only :class:`str` strings.
"""
def __init__(self, doc, **kwargs):
Field.__init__(self, doc, str, **kwargs)
def convert(self, value):
if isinstance(value, unicode):
value = value.encode('utf-8')
return str(value)
class DateField(Field):
"""
A field which accepts only :class:`datetime.date` and :class:`datetime.datetime` types.
"""
def __init__(self, doc, **kwargs):
Field.__init__(self, doc, datetime.date, datetime.datetime, **kwargs)
def __setattr__(self, name, value):
if name == 'value':
# Force use of our date and datetime types, to fix bugs in python2
# with strftime on year<1900.
if type(value) is datetime.datetime:
value = new_datetime(value)
if type(value) is datetime.date:
value = new_date(value)
return object.__setattr__(self, name, value)
class TimeField(Field):
"""
A field which accepts only :class:`datetime.time` and :class:`datetime.time` types.
"""
def __init__(self, doc, **kwargs):
Field.__init__(self, doc, datetime.time, datetime.datetime, **kwargs)
class DeltaField(Field):
"""
A field which accepts only :class:`datetime.timedelta` type.
"""
def __init__(self, doc, **kwargs):
Field.__init__(self, doc, datetime.timedelta, **kwargs)
class _CapBaseObjectMeta(type):
def __new__(cls, name, bases, attrs):
fields = [(field_name, attrs.pop(field_name)) for field_name, obj in attrs.items() if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1]._creation_counter)
new_class = super(_CapBaseObjectMeta, cls).__new__(cls, name, bases, attrs)
if new_class._fields is None:
new_class._fields = OrderedDict()
else:
new_class._fields = deepcopy(new_class._fields)
new_class._fields.update(fields)
if new_class.__doc__ is None:
new_class.__doc__ = ''
for name, field in fields:
doc = '(%s) %s' % (', '.join([':class:`%s`' % v.__name__ for v in field.types]), field.doc)
if field.value is not NotLoaded:
doc += ' (default: %s)' % field.value
new_class.__doc__ += '\n:var %s: %s' % (name, doc)
return new_class
class CapBaseObject(object):
"""
This is the base class for a capability object.
A capability interface may specify to return several kind of objects, to formalise
retrieved information from websites.
As python is a flexible language where variables are not typed, we use a system to
force backends to set wanted values on all fields. To do that, we use the :class:`Field`
class and all derived ones.
For example::
class Transfer(CapBaseObject):
" Transfer from an account to a recipient. "
amount = DecimalField('Amount to transfer')
date = Field('Date of transfer', basestring, date, datetime)
origin = Field('Origin of transfer', int, long, basestring)
recipient = Field('Recipient', int, long, basestring)
The docstring is mandatory.
"""
__metaclass__ = _CapBaseObjectMeta
id = None
backend = None
_fields = None
def __init__(self, id, backend=None):
self.id = to_unicode(id)
self.backend = backend
self._fields = deepcopy(self._fields)
@property
def fullid(self):
"""
Full ID of the object, in form '**ID@backend**'.
"""
return '%s@%s' % (self.id, self.backend)
def __iscomplete__(self):
"""
Return True if the object is completed.
It is usefull when the object is a field of an other object which is
going to be filled.
The default behavior is to iter on fields (with iter_fields) and if
a field is NotLoaded, return False.
"""
for key, value in self.iter_fields():
if value is NotLoaded:
return False
return True
def copy(self):
obj = copy(self)
obj._fields = copy(self._fields)
return obj
def set_empty_fields(self, value, excepts=()):
"""
Set the same value on all empty fields.
:param value: value to set on all empty fields
:param excepts: if specified, do not change fields listed
"""
for key, old_value in self.iter_fields():
if empty(old_value) and key not in excepts:
setattr(self, key, value)
def iter_fields(self):
"""
Iterate on the fields keys and values.
Can be overloaded to iterate on other things.
:rtype: iter[(key, value)]
"""
if hasattr(self, 'id') and self.id is not None:
yield 'id', self.id
for name, field in self._fields.iteritems():
yield name, field.value
def __eq__(self, obj):
if isinstance(obj, CapBaseObject):
return self.backend == obj.backend and self.id == obj.id
else:
return False
def __getattr__(self, name):
if self._fields is not None and name in self._fields:
return self._fields[name].value
else:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__.__name__, name))
def __setattr__(self, name, value):
try:
attr = (self._fields or {})[name]
except KeyError:
if not name in dir(self) and not name.startswith('_'):
warnings.warn('Creating a non-field attribute %s. Please prefix it with _' % name,
AttributeCreationWarning, stacklevel=2)
object.__setattr__(self, name, value)
else:
if not empty(value):
try:
# Try to convert value to the wanted one.
nvalue = attr.convert(value)
# If the value was converted
if nvalue is not value:
warnings.warn('Value %s was converted from %s to %s' %
(name, type(value), type(nvalue)),
ConversionWarning, stacklevel=2)
value = nvalue
except Exception:
# error during conversion, it will probably not
# match the wanted following types, so we'll
# raise ValueError.
pass
if not isinstance(value, attr.types) and not empty(value):
raise ValueError(
'Value for "%s" needs to be of type %r, not %r' % (
name, attr.types, type(value)))
attr.value = value
def __delattr__(self, name):
try:
self._fields.pop(name)
except KeyError:
object.__delattr__(self, name)
def to_dict(self):
def iter_decorate(d):
for key, value in d:
if key == 'id' and self.backend is not None:
value = self.fullid
yield key, value
fields_iterator = self.iter_fields()
return OrderedDict(iter_decorate(fields_iterator))
class Currency(object):
CUR_UNKNOWN = 0
CUR_EUR = 1
CUR_CHF = 2
CUR_USD = 3
TXT2CUR = OrderedDict(((u'€', CUR_EUR),
(u'EUR', CUR_EUR),
(u'CHF', CUR_CHF),
(u'$', CUR_USD),
(u'USD', CUR_USD),
))
EXTRACTOR = re.compile(r'[\d\s,\.\-]', re.UNICODE)
@classmethod
def get_currency(klass, text):
u"""
>>> Currency.get_currency(u'42')
0
>>> Currency.get_currency(u'42 €')
1
>>> Currency.get_currency(u'$42')
3
>>> Currency.get_currency(u'42.000,00€')
1
>>> Currency.get_currency(u'$42 USD')
3
>>> Currency.get_currency(u'%42 USD')
3
>>> Currency.get_currency(u'US1D')
0
"""
curtexts = klass.EXTRACTOR.sub(' ', text.upper()).split()
for curtext in curtexts:
cur = klass.TXT2CUR.get(curtext)
if cur is not None:
return cur
return klass.CUR_UNKNOWN
@classmethod
def currency2txt(klass, currency):
for txt, value in klass.TXT2CUR.iteritems():
if value == currency:
return txt
return u''
| blckshrk/Weboob | weboob/capabilities/base.py | Python | agpl-3.0 | 14,050 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-01 12:20
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelview', '0012_auto_20160301_1034'),
]
operations = [
migrations.AlterField(
model_name='basicfactsheet',
name='external_optimizer_yes_text',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1000), default=list, null=True, size=None),
),
migrations.AlterField(
model_name='energymodel',
name='model_class_other_text',
field=models.CharField(max_length=1000, null=True),
),
migrations.AlterField(
model_name='energymodel',
name='observation_period_other_text',
field=models.CharField(max_length=200, null=True),
),
]
| tom-heimbrodt/oeplatform | modelview/migrations/0013_auto_20160301_1320.py | Python | agpl-3.0 | 989 |
import json
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.utils import IntegrityError
import paypalrestsdk
from paypalrestsdk import WebProfile
from ecommerce.extensions.payment.models import PaypalWebProfile
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = """Manage PayPal Web Experience Profiles.
Supported actions:
list Print a JSON-formatted list of existing web profiles.
create [json] Create a new profile using the specified JSON string.
show [id] Print the contents of an existing web profile.
update [id] [json] Update an existing profile using the specified JSON string.
delete [id] Delete an existing profile. (Use -d to automatically disable when deleting.)
enable [id] Enable the web profile in this Django application (send it in PayPal API calls).
disable [id] Disable the web profile in this Django application (don't send in PayPal API calls).
The 'enable' and 'disable' actions are idempotent so it is safe to run them repeatedly in the same environment.
"""
args = "action partner [id] [json]"
PAYPAL_CONFIG_KEY = "paypal"
@staticmethod
def _get_argument(args, variable_name, action_name):
"""
DRY helper. Tries to pop the topmost value from `args` and raises a CommandError
with a formatted message in case of failure. This function mutates `args` in place.
"""
try:
return args.pop(0)
except IndexError:
raise CommandError("Action `{}` requires a {} to be specified.".format(action_name, variable_name))
def print_json(self, data):
self.stdout.write(json.dumps(data, indent=1, ensure_ascii=True))
def handle(self, *args, **options):
"""
Main dispatch.
"""
args = list(args)
if len(args) < 2:
raise CommandError("Required arguments `partner` and `action` are missing")
partner = args.pop(0)
action = args.pop(0)
try:
paypal_configuration = settings.PAYMENT_PROCESSOR_CONFIG[partner.lower()][self.PAYPAL_CONFIG_KEY.lower()]
except KeyError:
raise CommandError(
"Payment Processor configuration for partner `{0}` does not contain PayPal settings".format(partner)
)
# Initialize the PayPal REST SDK
paypalrestsdk.configure({
'mode': paypal_configuration['mode'],
'client_id': paypal_configuration['client_id'],
'client_secret': paypal_configuration['client_secret']
})
try:
handler = getattr(self, 'handle_{}'.format(action))
except IndexError:
raise CommandError("no action specified.")
except AttributeError:
raise CommandError("unrecognized action: {}".format(action))
return handler(args)
def _do_create(self, profile_data):
"""
Creates a new profile in the PayPal account with the specified id, using the specified data.
"""
profile = WebProfile(profile_data)
result = profile.create()
if not result:
raise CommandError("Could not create web profile: {}".format(profile.error))
else:
log.info("Created profile `%s` (id=%s).", profile.name, profile.id)
return profile
def _do_update(self, profile_id, profile_data):
"""
Updates the existing profile in the PayPal account with the specified id, replacing
all data with the specified data.
"""
profile = WebProfile.find(profile_id)
result = profile.update(profile_data)
if not result:
raise CommandError("Could not update web profile: {}".format(profile.error))
# have to re-fetch to show the new state
profile = WebProfile.find(profile_id)
log.info("Updated profile %s.", profile.id)
return profile
def _do_enable(self, profile_id, profile_name):
"""
Create a record in the PaypalWebProfile model that will be found and used to customize
the payment page experience with PayPal checkouts.
"""
try:
__, created = PaypalWebProfile.objects.get_or_create(id=profile_id, name=profile_name)
if created:
log.info("Enabled profile `%s` (id=%s)", profile_name, profile_id)
else:
log.info("Profile `%s` (id=%s) is already enabled", profile_name, profile_id)
except IntegrityError:
# this should never happen, unless the data in the database has gotten out of
# sync with the profiles stored in the PayPal account that this application
# instance has been configured to use.
raise CommandError(
"Could not enable web profile because a profile with the same name exists under "
"a different id. This may indicate a configuration error, or simply stale data."
)
def handle_list(self, args): # pylint: disable=unused-argument
"""Wrapper for paypalrestsdk List operation."""
profiles = WebProfile.all()
result = []
try:
result = [profile.to_dict() for profile in profiles]
except KeyError:
# weird internal paypal sdk behavior; it means the result was empty.
pass
self.print_json(result)
def handle_create(self, args):
"""Wrapper for paypalrestsdk Create operation."""
profile_data = json.loads(self._get_argument(args, 'json string', 'create'))
profile = self._do_create(profile_data)
self.print_json(profile.to_dict())
def handle_show(self, args):
"""Wrapper for paypalrestsdk Find operation."""
profile_id = self._get_argument(args, 'profile_id', 'show')
profile = WebProfile.find(profile_id)
self.print_json(profile.to_dict())
def handle_update(self, args):
"""Wrapper for paypalrestsdk Update operation. This completely replaces the value of the existing profile."""
profile_id = self._get_argument(args, 'profile_id', 'update')
profile_data = json.loads(self._get_argument(args, 'json string', 'update'))
profile = self._do_update(profile_id, profile_data)
self.print_json(profile.to_dict())
def handle_delete(self, args):
"""
Delete a web profile from the configured PayPal account.
Before deleting this function checks to make sure a matching profile is not
presently enabled. If the specified profile is enabled the command will fail
with an error, since leaving things in that state would cause the application
to send invalid profile ids to PayPal, causing errors.
"""
profile_id = self._get_argument(args, 'profile_id', 'delete')
if PaypalWebProfile.objects.filter(id=profile_id).exists():
raise CommandError(
"Web profile {} is currently enabled. You must disable it before you can delete it.".format(profile_id)
)
profile = WebProfile.find(profile_id)
if not profile.delete():
raise CommandError("Could not delete web profile: {}".format(profile.error))
log.info("Deleted profile: %s", profile.id)
self.print_json(profile.to_dict())
def handle_enable(self, args):
"""
Given the id of an existing web profile, save a reference to it in the database.
When PayPal checkouts are set up, we can look this profile up by name and, if
found, specify its id in our API calls to customize the payment page accordingly.
"""
profile_id = self._get_argument(args, 'profile_id', 'enable')
profile = WebProfile.find(profile_id)
self._do_enable(profile.id, profile.name)
def handle_disable(self, args):
"""
Given the id of an existing web profile, find and delete any references to it
in the database. This reverses the effect of `handle_enable` above.
"""
profile_id = self._get_argument(args, 'profile_id', 'disable')
try:
PaypalWebProfile.objects.get(id=profile_id).delete()
log.info("Disabled profile %s.", profile_id)
except PaypalWebProfile.DoesNotExist:
log.info("Did not find an enabled web profile with id %s to disable.", profile_id)
| mferenca/HMS-ecommerce | ecommerce/extensions/payment/management/commands/paypal_profile.py | Python | agpl-3.0 | 8,570 |
# Copyright 2012-2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Server fixture for BIND."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = [
'BINDServer',
]
import argparse
import os
from shutil import copy
import subprocess
from textwrap import dedent
import time
import fixtures
from maastesting.fixtures import TempDirectory
from provisioningserver.dns.config import generate_rndc
from provisioningserver.utils import (
atomic_write,
ensure_dir,
)
from rabbitfixture.server import (
allocate_ports,
preexec_fn,
)
import tempita
from testtools.content import Content
from testtools.content_type import UTF8_TEXT
GENERATED_HEADER = """
# This is a file generated by the bindfixture.
# The bindfixture tries not to overwrite existing configuration files
# so it's safe to edit this file if you need to but be aware that
# these changes won't be persisted.
"""
def should_write(path, overwrite_config=False):
"""Does the DNS config file at `path` need writing?
:param path: File that may need to be written out.
:param overwrite_config: Overwrite config files even if they
already exist?
:return: Whether the file should be written.
:rtype: bool
"""
return overwrite_config or not os.path.exists(path)
class BINDServerResources(fixtures.Fixture):
"""Allocate the resources a BIND server needs.
:ivar port: A port that was free at the time setUp() was
called.
:ivar rndc_port: A port that was free at the time setUp() was
called (used for rndc communication).
:ivar homedir: A directory where to put all the files the
BIND server needs (configuration files and executable).
:ivar log_file: The log file allocated for the server.
:ivar include_in_options: Name of a file under homedir to include inside
the options block.
"""
# The full path where the 'named' executable can be
# found.
# Note that it will be copied over to a temporary
# location in order to by-pass the limitations imposed by
# apparmor if the executable is in /usr/sbin/named.
NAMED_PATH = '/usr/sbin/named'
# The configuration template for the BIND server. The goal here
# is to override the defaults (default configuration files location,
# default port) to avoid clashing with the system's BIND (if
# running).
NAMED_CONF_TEMPLATE = tempita.Template(dedent("""
options {
directory "{{homedir}}";
listen-on port {{port}} {127.0.0.1;};
pid-file "{{homedir}}/named.pid";
session-keyfile "{{homedir}}/session.key";
{{if include_in_options}}
include "{{homedir}}/{{include_in_options}}";
{{endif}}
};
logging{
channel simple_log {
file "{{log_file}}";
severity info;
print-severity yes;
};
category default{
simple_log;
};
};
{{extra}}
"""))
def __init__(self, port=None, rndc_port=None, homedir=None,
log_file=None, include_in_options=None):
super(BINDServerResources, self).__init__()
self._defaults = dict(
port=port,
rndc_port=rndc_port,
homedir=homedir,
log_file=log_file,
include_in_options=include_in_options,
)
def setUp(self, overwrite_config=False):
super(BINDServerResources, self).setUp()
self.__dict__.update(self._defaults)
self.set_up_config()
self.set_up_named(overwrite_config=overwrite_config)
def set_up_named(self, overwrite_config=True):
"""Setup an environment to run 'named'.
- Creates the default configuration for 'named' and sets up rndc.
- Copies the 'named' executable inside homedir. AppArmor won't
let us run the installed version the way we want.
"""
# Generate rndc configuration (rndc config and named snippet).
# Disable remote administration for init scripts by suppressing the
# "controls" statement.
rndcconf, namedrndcconf = generate_rndc(
port=self.rndc_port, key_name='dnsfixture-rndc-key',
include_default_controls=False)
# Write main BIND config file.
if should_write(self.conf_file, overwrite_config):
named_conf = (
self.NAMED_CONF_TEMPLATE.substitute(
homedir=self.homedir, port=self.port,
log_file=self.log_file,
include_in_options=self.include_in_options,
extra=namedrndcconf))
atomic_write(
GENERATED_HEADER + named_conf, self.conf_file)
# Write rndc config file.
if should_write(self.rndcconf_file, overwrite_config):
atomic_write(
GENERATED_HEADER + rndcconf, self.rndcconf_file)
# Copy named executable to home dir. This is done to avoid
# the limitations imposed by apparmor if the executable
# is in /usr/sbin/named.
# named's apparmor profile prevents loading of zone and
# configuration files from outside of a restricted set,
# none of which an ordinary user has write access to.
if should_write(self.named_file, overwrite_config):
named_path = self.NAMED_PATH
assert os.path.exists(named_path), (
"'%s' executable not found. Install the package "
"'bind9' or define an environment variable named "
"NAMED_PATH with the path where the 'named' "
"executable can be found." % named_path)
copy(named_path, self.named_file)
def set_up_config(self):
if self.port is None:
[self.port] = allocate_ports("localhost")
if self.rndc_port is None:
[self.rndc_port] = allocate_ports("localhost")
if self.homedir is None:
self.homedir = self.useFixture(TempDirectory()).path
if self.log_file is None:
self.log_file = os.path.join(self.homedir, 'named.log')
self.named_file = os.path.join(
self.homedir, os.path.basename(self.NAMED_PATH))
self.conf_file = os.path.join(self.homedir, 'named.conf')
self.rndcconf_file = os.path.join(self.homedir, 'rndc.conf')
class BINDServerRunner(fixtures.Fixture):
"""Run a BIND server."""
# Where the executable 'rndc' can be found (belongs to the
# package 'bind9utils').
RNDC_PATH = "/usr/sbin/rndc"
def __init__(self, config):
"""Create a `BINDServerRunner` instance.
:param config: An object exporting the variables
`BINDServerResources` exports.
"""
super(BINDServerRunner, self).__init__()
self.config = config
self.process = None
def setUp(self):
super(BINDServerRunner, self).setUp()
self._start()
def is_running(self):
"""Is the BIND server process still running?"""
if self.process is None:
return False
else:
return self.process.poll() is None
def _spawn(self):
"""Spawn the BIND server process."""
env = dict(os.environ, HOME=self.config.homedir)
with open(self.config.log_file, "wb") as log_file:
with open(os.devnull, "rb") as devnull:
self.process = subprocess.Popen(
[self.config.named_file, "-f", "-c",
self.config.conf_file],
stdin=devnull,
stdout=log_file, stderr=log_file,
close_fds=True, cwd=self.config.homedir,
env=env, preexec_fn=preexec_fn)
self.addCleanup(self._stop)
# Keep the log_file open for reading so that we can still get the log
# even if the log is deleted.
open_log_file = open(self.config.log_file, "rb")
self.addDetail(
os.path.basename(self.config.log_file),
Content(UTF8_TEXT, lambda: open_log_file))
def rndc(self, command):
"""Executes a ``rndc`` command and returns status."""
if isinstance(command, unicode):
command = (command,)
ctl = subprocess.Popen(
(self.RNDC_PATH, "-c", self.config.rndcconf_file) + command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=preexec_fn)
outstr, errstr = ctl.communicate()
return outstr, errstr
def is_server_running(self):
"""Checks that the BIND server is up and running."""
outdata, errdata = self.rndc("status")
return "server is up and running" in outdata
def _start(self):
"""Start the BIND server."""
self._spawn()
# Wait for the server to come up: stop when the process is dead, or
# the timeout expires, or the server responds.
timeout = time.time() + 15
while time.time() < timeout and self.is_running():
if self.is_server_running():
break
time.sleep(0.3)
else:
raise Exception(
"Timeout waiting for BIND server to start: log in %r." %
(self.config.log_file,))
def _request_stop(self):
outstr, errstr = self.rndc("stop")
if outstr:
self.addDetail('stop-out', Content(UTF8_TEXT, lambda: [outstr]))
if errstr:
self.addDetail('stop-err', Content(UTF8_TEXT, lambda: [errstr]))
def _stop(self):
"""Stop the running server. Normally called by cleanups."""
self._request_stop()
self.process.wait()
class BINDServer(fixtures.Fixture):
"""A BIND server fixture.
When setup a BIND instance will be running.
:ivar config: The `BINDServerResources` used to start the server.
"""
def __init__(self, config=None):
super(BINDServer, self).__init__()
self.config = config
def setUp(self):
super(BINDServer, self).setUp()
if self.config is None:
self.config = BINDServerResources()
self.useFixture(self.config)
self.runner = BINDServerRunner(self.config)
self.useFixture(self.runner)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run a BIND server.')
parser.add_argument(
'--homedir',
help=(
"A directory where to put all the files the BIND"
"server needs (configuration files and executable)"
))
parser.add_argument(
'--log-file',
help="The log file allocated for the server")
parser.add_argument(
'--port', type=int,
help="The port that will be used by BIND")
parser.add_argument(
'--rndc-port', type=int,
help="The rndc port that will be used by BIND")
parser.add_argument(
'--overwrite-config', action='store_true',
help="Whether or not to overwrite the configuration files "
"if they already exist", default=False)
parser.add_argument(
'--create-config-only', action='store_true',
help="If set, only create the config files instead of "
"also running the service [default: %(default)s].",
default=False)
arguments = parser.parse_args()
ensure_dir(arguments.homedir)
# Create BINDServerResources with the provided options.
resources = BINDServerResources(
homedir=arguments.homedir, log_file=arguments.log_file,
port=arguments.port, rndc_port=arguments.rndc_port)
resources.setUp(overwrite_config=arguments.overwrite_config)
# exec named.
if not arguments.create_config_only:
os.execlp(
resources.named_file, resources.named_file, "-g", "-c",
resources.conf_file)
| cloudbase/maas | src/maastesting/bindfixture.py | Python | agpl-3.0 | 11,994 |
# ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
import pandas as pd
from activitysim.core import tracing
from activitysim.core import config
from activitysim.core import assign
from activitysim.core import inject
from activitysim.core import simulate
from activitysim.core.util import assign_in_place
logger = logging.getLogger(__name__)
def compute_columns(df, model_settings, locals_dict={}, trace_label=None):
"""
Evaluate expressions_spec in context of df, with optional additional pipeline tables in locals
Parameters
----------
df : pandas DataFrame
or if None, expect name of pipeline table to be specified by DF in model_settings
model_settings : dict or str
dict with keys:
DF - df_alias and (additionally, if df is None) name of pipeline table to load as df
SPEC - name of expressions file (csv suffix optional) if different from model_settings
TABLES - list of pipeline tables to load and make available as (read only) locals
str:
name of yaml file in configs_dir to load dict from
locals_dict : dict
dict of locals (e.g. utility functions) to add to the execution environment
trace_label
Returns
-------
results: pandas.DataFrame
one column for each expression (except temps with ALL_CAP target names)
same index as df
"""
if isinstance(model_settings, str):
model_settings_name = model_settings
model_settings = config.read_model_settings('%s.yaml' % model_settings)
assert model_settings, "Found no model settings for %s" % model_settings_name
else:
model_settings_name = 'dict'
assert isinstance(model_settings, dict)
assert 'DF' in model_settings, \
"Expected to find 'DF' in %s" % model_settings_name
df_name = model_settings.get('DF')
helper_table_names = model_settings.get('TABLES', [])
expressions_spec_name = model_settings.get('SPEC', None)
assert expressions_spec_name is not None, \
"Expected to find 'SPEC' in %s" % model_settings_name
trace_label = tracing.extend_trace_label(trace_label or '', expressions_spec_name)
if not expressions_spec_name.endswith(".csv"):
expressions_spec_name = '%s.csv' % expressions_spec_name
logger.debug(f"{trace_label} compute_columns using expression spec file {expressions_spec_name}")
expressions_spec = assign.read_assignment_spec(config.config_file_path(expressions_spec_name))
assert expressions_spec.shape[0] > 0, \
"Expected to find some assignment expressions in %s" % expressions_spec_name
tables = {t: inject.get_table(t).to_frame() for t in helper_table_names}
# if df was passed in, df might be a slice, or any other table, but DF is it's local alias
assert df_name not in tables, "Did not expect to find df '%s' in TABLES" % df_name
tables[df_name] = df
# be nice and also give it to them as df?
tables['df'] = df
_locals_dict = assign.local_utilities()
_locals_dict.update(locals_dict)
_locals_dict.update(tables)
# FIXME a number of asim model preprocessors want skim_dict - should they request it in model_settings.TABLES?
_locals_dict.update({
# 'los': inject.get_injectable('network_los', None),
'skim_dict': inject.get_injectable('skim_dict', None),
})
results, trace_results, trace_assigned_locals \
= assign.assign_variables(expressions_spec,
df,
_locals_dict,
trace_rows=tracing.trace_targets(df))
if trace_results is not None:
tracing.trace_df(trace_results,
label=trace_label,
slicer='NONE')
if trace_assigned_locals:
tracing.write_csv(trace_assigned_locals, file_name="%s_locals" % trace_label)
return results
def assign_columns(df, model_settings, locals_dict={}, trace_label=None):
"""
Evaluate expressions in context of df and assign resulting target columns to df
Can add new or modify existing columns (if target same as existing df column name)
Parameters - same as for compute_columns except df must not be None
Returns - nothing since we modify df in place
"""
assert df is not None
assert model_settings is not None
results = compute_columns(df, model_settings, locals_dict, trace_label)
assign_in_place(df, results)
# ##################################################################################################
# helpers
# ##################################################################################################
def annotate_preprocessors(
df, locals_dict, skims,
model_settings, trace_label):
locals_d = {}
locals_d.update(locals_dict)
locals_d.update(skims)
preprocessor_settings = model_settings.get('preprocessor', [])
if not isinstance(preprocessor_settings, list):
assert isinstance(preprocessor_settings, dict)
preprocessor_settings = [preprocessor_settings]
simulate.set_skim_wrapper_targets(df, skims)
for model_settings in preprocessor_settings:
results = compute_columns(
df=df,
model_settings=model_settings,
locals_dict=locals_d,
trace_label=trace_label)
assign_in_place(df, results)
def filter_chooser_columns(choosers, chooser_columns):
missing_columns = [c for c in chooser_columns if c not in choosers]
if missing_columns:
logger.debug("filter_chooser_columns missing_columns %s" % missing_columns)
# ignore any columns not appearing in choosers df
chooser_columns = [c for c in chooser_columns if c in choosers]
choosers = choosers[chooser_columns]
return choosers
| synthicity/activitysim | activitysim/core/expressions.py | Python | agpl-3.0 | 5,892 |
# -*- coding: utf-8 -*-
from . import invoice
from . import report | kmee/department | invoice_department/__init__.py | Python | agpl-3.0 | 66 |
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.urls import reverse
# wger
from wger.core.tests.base_testcase import WgerTestCase
from wger.manager.models import Workout
from wger.nutrition.models import NutritionPlan
from wger.weight.models import WeightEntry
class DashboardTestCase(WgerTestCase):
"""
Dashboard (landing page) test case
"""
def dashboard(self):
"""
Helper function to test the dashboard
"""
response = self.client.get(reverse('core:index'))
# Everybody is redirected
self.assertEqual(response.status_code, 302)
# Delete the objects so we can test adding them later
NutritionPlan.objects.all().delete()
Workout.objects.all().delete()
WeightEntry.objects.all().delete()
response = self.client.get(reverse('core:dashboard'))
# There is something to send to the template
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['weight'])
self.assertFalse(response.context['current_workout'])
self.assertFalse(response.context['plan'])
#
# 1. Add a workout
#
self.client.get(reverse('manager:workout:add'))
response = self.client.get(reverse('core:dashboard'))
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['weight'])
self.assertTrue(response.context['current_workout'])
self.assertFalse(response.context['plan'])
self.assertTrue(response.context['weekdays'])
#
# 2. Add a nutrition plan
#
self.client.get(reverse('nutrition:plan:add'))
response = self.client.get(reverse('core:dashboard'))
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['weight'])
self.assertTrue(response.context['current_workout'])
self.assertTrue(response.context['plan'])
self.assertTrue(response.context['weekdays'])
#
# 3. Add a weight entry
#
self.client.post(reverse('weight:add'),
{'weight': 100,
'date': '2012-01-01',
'user': 1},)
response = self.client.get(reverse('core:dashboard'))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['weight'])
self.assertTrue(response.context['current_workout'])
self.assertTrue(response.context['plan'])
self.assertTrue(response.context['weekdays'])
def test_dashboard_logged_in(self):
"""
Test index page as a logged in user
"""
self.user_login('admin')
self.dashboard()
def test_dashboard(self):
"""
Test that the dashboard has the correct CSS files
"""
response = self.client.get(reverse('core:dashboard'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'css/workout-manager.css', html=False)
self.assertContains(response, 'yarn/bootstrap-compiled.css', html=False)
| rolandgeider/wger | wger/core/tests/test_index.py | Python | agpl-3.0 | 3,734 |
VERSION = "0.8.7" | KristianOellegaard/python-nagios-frontend | balbec/__init__.py | Python | agpl-3.0 | 17 |
"""
Copyright (C) 2016 Genome Research Ltd.
Author: Irina Colgiu <ic4@sanger.ac.uk>
This program is part of meta-check
meta-check is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
This file has been created on May 20, 2016.
""" | wtsi-hgi/metadata-check | mcheck/tests/com/__init__.py | Python | agpl-3.0 | 799 |
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore import Location
from courseware.module_render import toc_for_course, get_module_for_descriptor
from courseware.model_data import FieldDataCache
from courseware.views import jump_to_id
from django.core.urlresolvers import reverse
from HTMLParser import HTMLParser
from sgmllib import SGMLParser
from django_comment_client.base.views import ajax_content_response
#from django_comment_client.forum.views import inline_discussion,get_threads
from django_comment_client.utils import JsonResponse, JsonError, extract, get_courseware_context, safe_content
from django_comment_client.permissions import check_permissions_by_view, cached_has_permission
from util.json_request import expect_json, JsonResponse
from course_groups.cohorts import get_cohort_id, is_commentable_cohorted
from courseware.courses import get_course_with_access
import comment_client as cc
import sys, re
import urllib
from django.views.decorators.http import require_POST, require_GET
from django.contrib.auth.decorators import login_required
from course_groups.cohorts import (is_course_cohorted, get_cohort_id, is_commentable_cohorted,
get_cohorted_commentables, get_course_cohorts, get_cohort_by_id)
from django_comment_client.utils import (merge_dict, extract, strip_none, get_courseware_context)
reload(sys)
sys.setdefaultencoding('utf-8')
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
def get_modulestore(category_or_location):
"""
Returns the correct modulestore to use for modifying the specified location
"""
if isinstance(category_or_location, Location):
category_or_location = category_or_location.category
if category_or_location in DIRECT_ONLY_CATEGORIES:
return modulestore('direct')
else:
return modulestore()
def get_discussion_context(request, course, location, parent_location,portfolio_user):
section_descriptor = modulestore().get_instance(course.id, parent_location, depth=None)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course.id, portfolio_user, section_descriptor, depth=None)
descriptor = modulestore().get_item(location)
module = get_module_for_descriptor(portfolio_user, request, descriptor, field_data_cache, course.id,
position=None, wrap_xmodule_display=True, grade_bucket_type=None,
static_asset_path='')
return module.runtime.render(module, None, 'student_view').content
def create_discussion_about_me(request, course, portfolio_user):
category = 'discussion'
context = ''
display_name = 'Discussion'
discussion_visibility = True
'''
if not has_access(request.user, parent_location):
raise PermissionDenied()
'''
course_location = course.id.split('/')
#parent_location = Location('i4x://'+course_location[0]+'/'+course_location[1]+'/'+course_location[2])
parent_location = course.location
parent = get_modulestore(category).get_item(parent_location)
#dest_location = parent_location.replace(category=category, name=uuid4().hex)
dest_location = Location(parent_location).replace(category=category,name=course_location[0]+'_'+course_location[1]+'_'+course_location[2]+'_'+str(portfolio_user.id)+'__am')
#dest_location = Location('i4x://'+course_location[0]+'/'+course_location[1]+'/discussion/'+course_location[0]+'_'+course_location[1]+'_'+course_location[2]+'_'+str(portfolio_user.id)+'_am')
# get the metadata, display_name, and definition from the request
#if modulestore().has_item(course.id, dest_location):
# modulestore().delete_item(dest_location)
if modulestore().has_item(course.id, dest_location) == False:
metadata = {}
data = None
template_id = request.POST.get('boilerplate')
if template_id is not None:
clz = XModuleDescriptor.load_class(category)
if clz is not None:
template = clz.get_template(template_id)
if template is not None:
metadata = template.get('metadata', {})
data = template.get('data')
if display_name is not None:
metadata['display_name'] = display_name
metadata['discussion_category']=''
metadata['discussion_target'] = ''
get_modulestore(category).create_and_save_xmodule(
dest_location,
definition_data=data,
metadata=metadata,
system=parent.system,
)
context = get_discussion_context(request, course, dest_location, parent_location, portfolio_user)
new_post_btn_match=re.compile('<a*[^>]*class="new-post-btn"[^>]*>[\s\S]*?<\/a>')
if request.user.id != portfolio_user.id:
context = context.replace(new_post_btn_match.findall(context)[0], '')
return context
| EduPepperPDTesting/pepper2013-testing | lms/djangoapps/portfolio/about_me.py | Python | agpl-3.0 | 5,168 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>) and the
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
# ########################################################################
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import specification
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| cherrygirl/micronaet7 | chemical_analysis_specification/__init__.py | Python | agpl-3.0 | 1,318 |
"""
Helper classes to specify file dependencies for input and output.
Supports inputs from S3 and local FS.
Supports outputs to HDFS, S3, and local FS.
"""
import boto
import datetime
import fnmatch
import logging
import os
import re
import luigi
import luigi.hdfs
import luigi.format
import luigi.task
from luigi.date_interval import DateInterval
from edx.analytics.tasks.s3_util import generate_s3_sources, get_s3_bucket_key_names
from edx.analytics.tasks.url import ExternalURL, UncheckedExternalURL, url_path_join, get_target_from_url
from edx.analytics.tasks.util import eventlog
log = logging.getLogger(__name__)
class PathSetTask(luigi.Task):
"""
A task to select a subset of files in an S3 bucket or local FS.
Parameters:
src: a URL pointing to a folder in s3:// or local FS.
include: a list of patterns to use to select. Multiple patterns are OR'd.
manifest: a URL pointing to a manifest file location.
"""
src = luigi.Parameter(
is_list=True,
config_path={'section': 'event-logs', 'name': 'source'}
)
include = luigi.Parameter(is_list=True, default=('*',))
manifest = luigi.Parameter(default=None)
def __init__(self, *args, **kwargs):
super(PathSetTask, self).__init__(*args, **kwargs)
self.s3_conn = None
def generate_file_list(self):
"""Yield each individual path given a source folder and a set of file-matching expressions."""
for src in self.src:
if src.startswith('s3'):
# connect lazily as needed:
if self.s3_conn is None:
self.s3_conn = boto.connect_s3()
for _bucket, _root, path in generate_s3_sources(self.s3_conn, src, self.include):
source = url_path_join(src, path)
yield ExternalURL(source)
elif src.startswith('hdfs'):
for source in luigi.hdfs.listdir(src):
if any(fnmatch.fnmatch(source, include_val) for include_val in self.include):
yield ExternalURL(source)
else:
# Apply the include patterns to the relative path below the src directory.
for dirpath, _dirnames, files in os.walk(src):
for filename in files:
filepath = os.path.join(dirpath, filename)
relpath = os.path.relpath(filepath, src)
if any(fnmatch.fnmatch(relpath, include_val) for include_val in self.include):
yield ExternalURL(filepath)
def manifest_file_list(self):
"""Write each individual path to a manifest file and yield the path to that file."""
manifest_target = get_target_from_url(self.manifest)
if not manifest_target.exists():
with manifest_target.open('w') as manifest_file:
for external_url_task in self.generate_file_list():
manifest_file.write(external_url_task.url + '\n')
yield ExternalURL(self.manifest)
def requires(self):
if self.manifest is not None:
return self.manifest_file_list()
else:
return self.generate_file_list()
def complete(self):
# An optimization: just declare that the task is always
# complete, by definition, because it is whatever files were
# requested that match the filter, not a set of files whose
# existence needs to be checked or generated again.
return True
def output(self):
return [task.output() for task in self.requires()]
class EventLogSelectionDownstreamMixin(object):
"""Defines parameters for passing upstream to tasks that use EventLogSelectionMixin."""
source = luigi.Parameter(
is_list=True,
config_path={'section': 'event-logs', 'name': 'source'}
)
interval = luigi.DateIntervalParameter()
expand_interval = luigi.TimeDeltaParameter(
config_path={'section': 'event-logs', 'name': 'expand_interval'}
)
pattern = luigi.Parameter(
is_list=True,
config_path={'section': 'event-logs', 'name': 'pattern'}
)
class EventLogSelectionTask(EventLogSelectionDownstreamMixin, luigi.WrapperTask):
"""
Select all relevant event log input files from a directory.
Recursively list all files in the directory which is expected to contain the input files organized in such a way
that a pattern can be used to find them. Filenames are expected to contain a date which represents an approximation
of the date found in the events themselves.
Parameters:
source: A URL to a path that contains log files that contain the events.
interval: The range of dates to export logs for.
expand_interval: A time interval to add to the beginning and end of the interval to expand the windows of files
captured.
pattern: A regex with a named capture group for the date that approximates the date that the events within were
emitted. Note that the search interval is expanded, so events don't have to be in exactly the right file
in order for them to be processed.
"""
def __init__(self, *args, **kwargs):
super(EventLogSelectionTask, self).__init__(*args, **kwargs)
self.interval = DateInterval(
self.interval.date_a - self.expand_interval,
self.interval.date_b + self.expand_interval
)
self.requirements = None
def requires(self):
# This method gets called several times. Avoid making multiple round trips to S3 by caching the first result.
if self.requirements is None:
log.debug('No saved requirements found, refreshing requirements list.')
self.requirements = self._get_requirements()
else:
log.debug('Using cached requirements.')
return self.requirements
def _get_requirements(self):
"""
Gather the set of requirements needed to run the task.
This can be a rather expensive operation that requires usage of the S3 API to list all files in the source
bucket and select the ones that are applicable to the given date range.
"""
url_gens = []
for source in self.source:
if source.startswith('s3'):
url_gens.append(self._get_s3_urls(source))
elif source.startswith('hdfs'):
url_gens.append(self._get_hdfs_urls(source))
else:
url_gens.append(self._get_local_urls(source))
log.debug('Matching urls using pattern(s)="%s"', self.pattern)
log.debug(
'Date interval: %s <= date < %s', self.interval.date_a.isoformat(), self.interval.date_b.isoformat()
)
return [UncheckedExternalURL(url) for url_gen in url_gens for url in url_gen if self.should_include_url(url)]
def _get_s3_urls(self, source):
"""Recursively list all files inside the source URL directory."""
s3_conn = boto.connect_s3()
bucket_name, root = get_s3_bucket_key_names(source)
bucket = s3_conn.get_bucket(bucket_name)
for key_metadata in bucket.list(root):
if key_metadata.size > 0:
key_path = key_metadata.key[len(root):].lstrip('/')
yield url_path_join(source, key_path)
def _get_hdfs_urls(self, source):
for source in luigi.hdfs.listdir(source):
yield source
def _get_local_urls(self, source):
"""Recursively list all files inside the source directory on the local filesystem."""
for directory_path, _subdir_paths, filenames in os.walk(source):
for filename in filenames:
yield os.path.join(directory_path, filename)
def should_include_url(self, url):
"""
Determine whether the file pointed to by the URL should be included in the set of files used for analysis.
Presently filters first on pattern match and then on the datestamp extracted from the file name.
"""
# Find the first pattern (if any) that matches the URL.
match = None
for pattern in self.pattern:
match = re.match(pattern, url)
if match:
break
if not match:
return False
# If the pattern contains a date group, use that to check if within the requested interval.
# If it doesn't contain such a group, then assume that it should be included.
should_include = True
if 'date' in match.groupdict():
parsed_datetime = datetime.datetime.strptime(match.group('date'), '%Y%m%d')
parsed_date = datetime.date(parsed_datetime.year, parsed_datetime.month, parsed_datetime.day)
should_include = parsed_date in self.interval
return should_include
def output(self):
return [task.output() for task in self.requires()]
class EventLogSelectionMixin(EventLogSelectionDownstreamMixin):
"""
Extract events corresponding to a specified time interval and outputs them from a mapper.
Parameters:
source: A URL to a path that contains log files that contain the events.
interval: The range of dates to export logs for.
pattern: A regex with a named capture group for the date that approximates the date that the events within were
emitted. Note that the search interval is expanded, so events don't have to be in exactly the right file
in order for them to be processed.
"""
def requires(self):
"""Use EventLogSelectionTask to define inputs."""
return EventLogSelectionTask(
source=self.source,
interval=self.interval,
pattern=self.pattern,
)
def init_local(self):
"""Convert intervals to date strings for alpha-numeric comparison."""
super(EventLogSelectionMixin, self).init_local()
self.lower_bound_date_string = self.interval.date_a.strftime('%Y-%m-%d')
self.upper_bound_date_string = self.interval.date_b.strftime('%Y-%m-%d')
def get_event_and_date_string(self, line):
"""Default mapper implementation, that always outputs the log line, but with a configurable key."""
event = eventlog.parse_json_event(line)
if event is None:
return None
event_time = self.get_event_time(event)
if not event_time:
return None
# Don't use strptime to parse the date, it is extremely slow
# to do so. Instead rely on alphanumeric comparisons. The
# timestamp is ISO8601 formatted, so dates will look like
# %Y-%m-%d. For example: 2014-05-20.
date_string = event_time.split("T")[0]
if date_string < self.lower_bound_date_string or date_string >= self.upper_bound_date_string:
return None
return event, date_string
def get_event_time(self, event):
try:
return event['time']
except KeyError:
self.incr_counter('Event', 'Missing Time Field', 1)
return None
| open-craft/edx-analytics-pipeline | edx/analytics/tasks/pathutil.py | Python | agpl-3.0 | 11,171 |
"""Tests for the certificates Python API. """
from contextlib import contextmanager
import ddt
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.conf import settings
from mock import patch
from nose.plugins.attrib import attr
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from config_models.models import cache
from util.testing import EventTestMixin
from certificates import api as certs_api
from certificates.models import (
CertificateStatuses,
CertificateGenerationConfiguration,
ExampleCertificate,
GeneratedCertificate,
certificate_status_for_student,
)
from certificates.queue import XQueueCertInterface, XQueueAddToQueueError
from certificates.tests.factories import GeneratedCertificateFactory
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
class WebCertificateTestMixin(object):
"""
Mixin with helpers for testing Web Certificates.
"""
@contextmanager
def _mock_passing_grade(self):
"""
Mock the grading function to always return a passing grade.
"""
symbol = 'courseware.grades.grade'
with patch(symbol) as mock_grade:
mock_grade.return_value = {'grade': 'Pass', 'percent': 0.75}
yield
@contextmanager
def _mock_queue(self, is_successful=True):
"""
Mock the "send to XQueue" method to return either success or an error.
"""
symbol = 'capa.xqueue_interface.XQueueInterface.send_to_queue'
with patch(symbol) as mock_send_to_queue:
if is_successful:
mock_send_to_queue.return_value = (0, "Successfully queued")
else:
mock_send_to_queue.side_effect = XQueueAddToQueueError(1, self.ERROR_REASON)
yield mock_send_to_queue
def _setup_course_certificate(self):
"""
Creates certificate configuration for course
"""
certificates = [
{
'id': 1,
'name': 'Test Certificate Name',
'description': 'Test Certificate Description',
'course_title': 'tes_course_title',
'signatories': [],
'version': 1,
'is_active': True
}
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
@attr('shard_1')
class CertificateDownloadableStatusTests(WebCertificateTestMixin, ModuleStoreTestCase):
"""Tests for the `certificate_downloadable_status` helper function. """
def setUp(self):
super(CertificateDownloadableStatusTests, self).setUp()
self.student = UserFactory()
self.student_no_cert = UserFactory()
self.course = CourseFactory.create(
org='edx',
number='verified',
display_name='Verified Course'
)
self.request_factory = RequestFactory()
def test_cert_status_with_generating(self):
GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.generating,
mode='verified'
)
self.assertEqual(
certs_api.certificate_downloadable_status(self.student, self.course.id),
{
'is_downloadable': False,
'is_generating': True,
'download_url': None,
'uuid': None,
}
)
def test_cert_status_with_error(self):
GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.error,
mode='verified'
)
self.assertEqual(
certs_api.certificate_downloadable_status(self.student, self.course.id),
{
'is_downloadable': False,
'is_generating': True,
'download_url': None,
'uuid': None
}
)
def test_without_cert(self):
self.assertEqual(
certs_api.certificate_downloadable_status(self.student_no_cert, self.course.id),
{
'is_downloadable': False,
'is_generating': False,
'download_url': None,
'uuid': None,
}
)
def verify_downloadable_pdf_cert(self):
"""
Verifies certificate_downloadable_status returns the
correct response for PDF certificates.
"""
cert = GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url='www.google.com',
)
self.assertEqual(
certs_api.certificate_downloadable_status(self.student, self.course.id),
{
'is_downloadable': True,
'is_generating': False,
'download_url': 'www.google.com',
'uuid': cert.verify_uuid
}
)
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
def test_pdf_cert_with_html_enabled(self):
self.verify_downloadable_pdf_cert()
def test_pdf_cert_with_html_disabled(self):
self.verify_downloadable_pdf_cert()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
def test_with_downloadable_web_cert(self):
CourseEnrollment.enroll(self.student, self.course.id, mode='honor')
self._setup_course_certificate()
with self._mock_passing_grade():
certs_api.generate_user_certificates(self.student, self.course.id)
cert_status = certificate_status_for_student(self.student, self.course.id)
self.assertEqual(
certs_api.certificate_downloadable_status(self.student, self.course.id),
{
'is_downloadable': True,
'is_generating': False,
'download_url': '/certificates/user/{user_id}/course/{course_id}'.format(
user_id=self.student.id, # pylint: disable=no-member
course_id=self.course.id,
),
'uuid': cert_status['uuid']
}
)
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class GenerateUserCertificatesTest(EventTestMixin, WebCertificateTestMixin, ModuleStoreTestCase):
"""Tests for generating certificates for students. """
ERROR_REASON = "Kaboom!"
def setUp(self): # pylint: disable=arguments-differ
super(GenerateUserCertificatesTest, self).setUp('certificates.api.tracker')
self.student = UserFactory.create(
email='joe_user@edx.org',
username='joeuser',
password='foo'
)
self.student_no_cert = UserFactory()
self.course = CourseFactory.create(
org='edx',
number='verified',
display_name='Verified Course',
grade_cutoffs={'cutoff': 0.75, 'Pass': 0.5}
)
self.enrollment = CourseEnrollment.enroll(self.student, self.course.id, mode='honor')
self.request_factory = RequestFactory()
def test_new_cert_requests_into_xqueue_returns_generating(self):
with self._mock_passing_grade():
with self._mock_queue():
certs_api.generate_user_certificates(self.student, self.course.id)
# Verify that the certificate has status 'generating'
cert = GeneratedCertificate.eligible_certificates.get(user=self.student, course_id=self.course.id)
self.assertEqual(cert.status, CertificateStatuses.generating)
self.assert_event_emitted(
'edx.certificate.created',
user_id=self.student.id,
course_id=unicode(self.course.id),
certificate_url=certs_api.get_certificate_url(self.student.id, self.course.id),
certificate_id=cert.verify_uuid,
enrollment_mode=cert.mode,
generation_mode='batch'
)
def test_xqueue_submit_task_error(self):
with self._mock_passing_grade():
with self._mock_queue(is_successful=False):
certs_api.generate_user_certificates(self.student, self.course.id)
# Verify that the certificate has been marked with status error
cert = GeneratedCertificate.eligible_certificates.get(user=self.student, course_id=self.course.id)
self.assertEqual(cert.status, 'error')
self.assertIn(self.ERROR_REASON, cert.error_reason)
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
def test_new_cert_requests_returns_generating_for_html_certificate(self):
"""
Test no message sent to Xqueue if HTML certificate view is enabled
"""
self._setup_course_certificate()
with self._mock_passing_grade():
certs_api.generate_user_certificates(self.student, self.course.id)
# Verify that the certificate has status 'downloadable'
cert = GeneratedCertificate.eligible_certificates.get(user=self.student, course_id=self.course.id)
self.assertEqual(cert.status, CertificateStatuses.downloadable)
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': False})
def test_cert_url_empty_with_invalid_certificate(self):
"""
Test certificate url is empty if html view is not enabled and certificate is not yet generated
"""
url = certs_api.get_certificate_url(self.student.id, self.course.id)
self.assertEqual(url, "")
@attr('shard_1')
@ddt.ddt
class CertificateGenerationEnabledTest(EventTestMixin, TestCase):
"""Test enabling/disabling self-generated certificates for a course. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
def setUp(self): # pylint: disable=arguments-differ
super(CertificateGenerationEnabledTest, self).setUp('certificates.api.tracker')
# Since model-based configuration is cached, we need
# to clear the cache before each test.
cache.clear()
@ddt.data(
(None, None, False),
(False, None, False),
(False, True, False),
(True, None, False),
(True, False, False),
(True, True, True)
)
@ddt.unpack
def test_cert_generation_enabled(self, is_feature_enabled, is_course_enabled, expect_enabled):
if is_feature_enabled is not None:
CertificateGenerationConfiguration.objects.create(enabled=is_feature_enabled)
if is_course_enabled is not None:
certs_api.set_cert_generation_enabled(self.COURSE_KEY, is_course_enabled)
cert_event_type = 'enabled' if is_course_enabled else 'disabled'
event_name = '.'.join(['edx', 'certificate', 'generation', cert_event_type])
self.assert_event_emitted(
event_name,
course_id=unicode(self.COURSE_KEY),
)
self._assert_enabled_for_course(self.COURSE_KEY, expect_enabled)
def test_latest_setting_used(self):
# Enable the feature
CertificateGenerationConfiguration.objects.create(enabled=True)
# Enable for the course
certs_api.set_cert_generation_enabled(self.COURSE_KEY, True)
self._assert_enabled_for_course(self.COURSE_KEY, True)
# Disable for the course
certs_api.set_cert_generation_enabled(self.COURSE_KEY, False)
self._assert_enabled_for_course(self.COURSE_KEY, False)
def test_setting_is_course_specific(self):
# Enable the feature
CertificateGenerationConfiguration.objects.create(enabled=True)
# Enable for one course
certs_api.set_cert_generation_enabled(self.COURSE_KEY, True)
self._assert_enabled_for_course(self.COURSE_KEY, True)
# Should be disabled for another course
other_course = CourseLocator(org='other', course='other', run='other')
self._assert_enabled_for_course(other_course, False)
def _assert_enabled_for_course(self, course_key, expect_enabled):
"""Check that self-generated certificates are enabled or disabled for the course. """
actual_enabled = certs_api.cert_generation_enabled(course_key)
self.assertEqual(expect_enabled, actual_enabled)
@attr('shard_1')
class GenerateExampleCertificatesTest(TestCase):
"""Test generation of example certificates. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
def setUp(self):
super(GenerateExampleCertificatesTest, self).setUp()
def test_generate_example_certs(self):
# Generate certificates for the course
CourseModeFactory.create(course_id=self.COURSE_KEY, mode_slug=CourseMode.HONOR)
with self._mock_xqueue() as mock_queue:
certs_api.generate_example_certificates(self.COURSE_KEY)
# Verify that the appropriate certs were added to the queue
self._assert_certs_in_queue(mock_queue, 1)
# Verify that the certificate status is "started"
self._assert_cert_status({
'description': 'honor',
'status': 'started'
})
def test_generate_example_certs_with_verified_mode(self):
# Create verified and honor modes for the course
CourseModeFactory(course_id=self.COURSE_KEY, mode_slug='honor')
CourseModeFactory(course_id=self.COURSE_KEY, mode_slug='verified')
# Generate certificates for the course
with self._mock_xqueue() as mock_queue:
certs_api.generate_example_certificates(self.COURSE_KEY)
# Verify that the appropriate certs were added to the queue
self._assert_certs_in_queue(mock_queue, 2)
# Verify that the certificate status is "started"
self._assert_cert_status(
{
'description': 'verified',
'status': 'started'
},
{
'description': 'honor',
'status': 'started'
}
)
@contextmanager
def _mock_xqueue(self):
"""Mock the XQueue method for adding a task to the queue. """
with patch.object(XQueueCertInterface, 'add_example_cert') as mock_queue:
yield mock_queue
def _assert_certs_in_queue(self, mock_queue, expected_num):
"""Check that the certificate generation task was added to the queue. """
certs_in_queue = [call_args[0] for (call_args, __) in mock_queue.call_args_list]
self.assertEqual(len(certs_in_queue), expected_num)
for cert in certs_in_queue:
self.assertTrue(isinstance(cert, ExampleCertificate))
def _assert_cert_status(self, *expected_statuses):
"""Check the example certificate status. """
actual_status = certs_api.example_certificates_status(self.COURSE_KEY)
self.assertEqual(list(expected_statuses), actual_status)
| ZLLab-Mooc/edx-platform | lms/djangoapps/certificates/tests/test_api.py | Python | agpl-3.0 | 15,488 |
"""
MIT License
Copyright (c) 2018 Claude SIMON (https://q37.info/s/rmnmqd49)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
_VERSION = "0.13"
import XDHqSHRD
from XDHqSHRD import getEnv
import inspect, os, socket, sys, threading, time
if sys.version_info[0] == 2:
import XDHqFaaS2
l = XDHqFaaS2.l
_writeUInt = XDHqFaaS2.writeUInt
_writeString = XDHqFaaS2.writeString
_readUInt = XDHqFaaS2.readUInt
_getString = XDHqFaaS2.getString
_exitThread = XDHqFaaS2.exitThread
else:
import XDHqFaaS3
l = XDHqFaaS3.l
_writeUInt = XDHqFaaS3.writeUInt
_writeString = XDHqFaaS3.writeString
_readUInt = XDHqFaaS3.readUInt
_getString = XDHqFaaS3.getString
_exitThread = XDHqFaaS3.exitThread
_bye = False # For use in Jupiter notebooks, to quit an application.
_DEFAULT_SUPPLIER_LABEL = "auto"
class _Supplier:
current = None
_actions = {
"none": lambda url : None,
_DEFAULT_SUPPLIER_LABEL: XDHqSHRD.open,
"qrcode": lambda url: XDHqSHRD.open( '"' + url + '&_supplier=qrcode"'),
"jupyter": lambda url : None
}
def _supply(url):
supplier = getEnv("ATK").strip().lower() or _Supplier.current or _DEFAULT_SUPPLIER_LABEL
while True:
supplier = _Supplier._actions[supplier](url) if isinstance(supplier, str) else supplier(url)
if not supplier:
break;
def set_supplier(supplier = None):
_Supplier.current = supplier
_FAAS_PROTOCOL_LABEL = "4c837d30-2eb5-41af-9b3d-6c8bf01d8dbf"
_FAAS_PROTOCOL_VERSION = "0"
_MAIN_PROTOCOL_LABEL = "22bb5d73-924f-473f-a68a-14f41d8bfa83"
_MAIN_PROTOCOL_VERSION = "0"
_SCRIPTS_VERSION = "0"
_FORBIDDEN_ID = -1
_CREATION_ID = -2
_CLOSING_ID = -3
_writeLock = threading.Lock()
_readLock = threading.Lock() # Global read lock.
_readLock.acquire()
def _waitForInstance():
_readLock.acquire()
def _instanceDataRead():
_readLock.release()
_url = ""
class _Instance:
def __init__(self,thread_retriever,id):
# https://github.com/epeios-q37/atlas-python/pull/7 (Condition -> Lock)
self._readLock = threading.Lock() #Per instance read lock.
self._readLock.acquire()
self.quit = False
self.id = id
self.thread = thread_retriever(self)
self.language = None
def getId(self):
return self.id
def waitForData(self):
self._readLock.acquire()
if self.quit:
_instanceDataRead()
_exitThread()
def dataAvailable(self):
self._readLock.release()
def isTokenEmpty():
return not _token or _token[0] == "&"
def writeUInt(value):
global _socket
_writeUInt( _socket, value, lambda: _bye )
def writeSInt(value):
writeUInt( ( ( -value - 1 ) << 1 ) | 1 if value < 0 else value << 1 )
def writeString(string):
global _socket
_writeString(_socket, string, lambda: _bye)
def writeStrings(strings):
writeUInt(len(strings))
for string in strings:
writeString(string)
def readUInt():
global _socket
return _readUInt( _socket, lambda: _bye)
def readSInt():
value = readUInt()
return -( ( value >> 1 ) + 1 ) if ( value & 1 ) else value >> 1
def getString():
global _socket
return _getString(_socket, lambda: _bye)
def getStrings():
amount = readUInt()
strings = []
while amount:
strings.append(getString())
amount -= 1
return strings
def _dismiss(id):
with _writeLock:
writeSInt(id)
writeString("#Dismiss_1")
def _report(message):
with _writeLock:
writeSInt(-1)
writeString("#Inform_1")
writeString(message)
def _init():
global _token, _socket, _wAddr, _wPort, _cgi
pAddr = "faas.q37.info"
pPort = 53700
_token = ""
_wAddr = ""
_wPort = ""
_cgi = "xdh"
pAddr = getEnv("ATK_PADDR", pAddr)
pPort = int(getEnv("ATK_PPORT", str(pPort)))
_wAddr = getEnv("ATK_WADDR", _wAddr)
_wPort = getEnv("ATK_WPORT", _wPort)
if _wAddr == "":
_wAddr = pAddr
if _wPort != "":
_wPort = ":" + _wPort
if isTokenEmpty():
_token = getEnv("ATK_TOKEN")
if _token:
_token = "&" + _token
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connection to '" + str(pAddr) + ":" + str(pPort) + "'...")
try:
_socket.connect((pAddr,pPort))
except:
sys.exit("Unable to connect to '" + str(pAddr) + ":" + str(pPort) + "'!")
else:
print("Connected to '" + str(pAddr) + ":" + str(pPort) + "'.")
_socket.settimeout(1) # In order to quit an application, in Jupyter notebooks.
def _handshakeFaaS():
with _writeLock:
writeString(_FAAS_PROTOCOL_LABEL)
writeString(_FAAS_PROTOCOL_VERSION)
writeString("PYH " + _VERSION)
error = getString()
if error:
sys.exit(error)
notification = getString()
if notification:
print(notification)
def _handshakeMain():
with _writeLock:
writeString(_MAIN_PROTOCOL_LABEL)
writeString(_MAIN_PROTOCOL_VERSION)
writeString(_SCRIPTS_VERSION)
error = getString()
if error:
sys.exit(error)
notification = getString();
if notification:
print(notification)
def _handshakes():
_handshakeFaaS()
_handshakeMain()
def _ignition():
global _token, _url
with _writeLock:
writeString( _token)
writeString(_headContent)
writeString(_wAddr)
writeString("") # Currently not used; for future use.
_token = getString()
if isTokenEmpty():
sys.exit(getString())
if ( _wPort != ":0" ):
_url = getString()
print(_url)
print("".rjust(len(_url),'^'))
print("Open above URL in a web browser (click, right click or copy/paste). Enjoy!\n")
_supply(_url)
def _serve(callback,userCallback,callbacks ):
while True:
id = readSInt()
if id == _FORBIDDEN_ID: # Should never happen.
sys.exit("Received unexpected undefined command id!")
if id == _CREATION_ID: # Value reporting a new session.
id = readSInt() # The id of the new session.
if id in _instances:
_report("Instance of id '" + str(id) + "' exists but should not !")
_instances[id] = _Instance(lambda instance : callback(userCallback, callbacks, instance), id)
elif id == _CLOSING_ID: # Value instructing that a session is closed.
id = readSInt();
if not id in _instances:
_report("Instance of id '" + str(id) + "' not available for destruction!")
else:
instance = _instances.pop(id)
instance.quit = True
instance.dataAvailable()
_waitForInstance()
instance = None # Without this, instance will only be destroyed
# when 'instance" is set to a new instance.
elif not id in _instances:
_report("Unknown instance of id '" + str(id) + "'!")
_dismiss(id)
else:
instance = _instances[id]
if instance.language is None:
instance.language = getString()
else:
instance.dataAvailable()
_waitForInstance()
def launch(callback, userCallback, callbacks, headContent):
global _headContent, _instances
if headContent is None:
if not "_headContent" in globals():
_headContent = ""
else:
_headContent = headContent
_instances = {}
_init()
_handshakes()
_ignition()
_serve(callback,userCallback,callbacks)
def get_app_url(id=""):
return _url + ("&_id=" + str(id) if id else "")
def broadcastAction(action, id = ""):
with _writeLock:
writeSInt(-3)
writeString(action)
writeString(id)
class DOM_FaaS:
_firstLaunch = True
def __init__(self, instance):
self.instance = instance
def _waitForData(self):
self.instance.waitForData()
def _standBy(self):
with _writeLock:
writeSInt(self.instance.getId())
writeString("#StandBy_1")
def getAction(self):
if self._firstLaunch:
self._firstLaunch = False
else:
self._standBy()
self._waitForData()
[id,action] = [getString(),getString()]
_instanceDataRead()
return [action,id]
def call(self, command, type, *args):
if self.instance.quit:
"""
Below function unlocks the main thread,
and exits the thread corresponding
to the current instance.
"""
self._waitForData()
with _writeLock:
writeSInt(self.instance.getId())
writeString(command)
writeUInt(type)
for arg in args:
if isinstance(arg,str):
writeUInt(XDHqSHRD.RT_STRING)
writeString(arg)
else:
writeUInt(XDHqSHRD.RT_STRINGS)
writeStrings(arg)
writeUInt(XDHqSHRD.RT_VOID) # To report end of argument list.
if type == XDHqSHRD.RT_STRING:
self._waitForData()
string = getString()
_instanceDataRead()
return string
elif type == XDHqSHRD.RT_STRINGS:
self._waitForData()
strings = getStrings()
_instanceDataRead()
return strings
elif type != XDHqSHRD.RT_VOID:
sys.exit("Unknown return type !!!")
def setBye(value):
global _bye
_bye = value
| epeios-q37/epeios | tools/xdhq/wrappers/PYH/XDHqFaaS.py | Python | agpl-3.0 | 9,753 |
import pytest
from openfisca_core.entities import Entity, GroupEntity
from .variables import TestVariable
class TestEntity(Entity):
def get_variable(self, variable_name):
result = TestVariable(self)
result.name = variable_name
return result
def check_variable_defined_for_entity(self, variable_name):
return True
class TestGroupEntity(GroupEntity):
def get_variable(self, variable_name):
result = TestVariable(self)
result.name = variable_name
return result
def check_variable_defined_for_entity(self, variable_name):
return True
@pytest.fixture
def persons():
return TestEntity("person", "persons", "", "")
@pytest.fixture
def households():
roles = [{
'key': 'parent',
'plural': 'parents',
'max': 2
}, {
'key': 'child',
'plural': 'children'
}]
return TestGroupEntity("household", "households", "", "", roles)
| openfisca/openfisca-core | tests/fixtures/entities.py | Python | agpl-3.0 | 971 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stages', '0024_mark_fields_dup'),
]
operations = [
migrations.AddField(
model_name='section',
name='has_stages',
field=models.BooleanField(default=False, verbose_name='Planifie la PP sur ce site'),
),
migrations.AlterField(
model_name='period',
name='section',
field=models.ForeignKey(limit_choices_to={'has_stages': True}, on_delete=models.deletion.PROTECT, to='stages.Section', verbose_name='Filière'),
),
]
| epcoullery/epcstages | stages/migrations/0025_section_has_stages.py | Python | agpl-3.0 | 642 |
# Copyright (C) 2013, Carlo de Franchis <carlo.de-franchis@cmla.ens-cachan.fr>
# Copyright (C) 2013, Gabriele Facciolo <facciolo@cmla.ens-cachan.fr>
# Copyright (C) 2013, Enric Meinhardt <enric.meinhardt@cmla.ens-cachan.fr>
# Copyright (C) 2013, Julien Michel <julien.michel@cnes.fr>
# This module contains a dictionary, cfg, containing all the parameters of the
# s2p pipeline. This dictionary is updated at runtime with parameters defined
# by the user in the config.json file. All the optional parameters (that the
# user is not forced to define in config.json) must be defined here, otherwise
# they won't have a default value.
cfg = {}
# path to output directory
cfg['out_dir'] = "s2p_output"
# path to directory where (many) temporary files will be stored
cfg['temporary_dir'] = "s2p_tmp"
# temporary files are erased when s2p terminates. Switch to False to keep them
cfg['clean_tmp'] = True
# remove all generated files except from ply point clouds and tif raster dsm
cfg['clean_intermediate'] = False
# switch to True if you want to process the whole image
cfg['full_img'] = False
# s2p processes the images tile by tile. The tiles are squares cropped from the
# reference image. The width and height of the tiles are given by this param, in pixels.
cfg['tile_size'] = 800
# margins used to increase the footprint of the rectified tiles, to
# account for poor disparity estimation close to the borders
cfg['horizontal_margin'] = 50 # for regularity and occlusions
cfg['vertical_margin'] = 10 # for regularity
# max number of processes launched in parallel. None means the number of available cores
cfg['max_processes'] = None
# max number of processes launched in parallel for stereo_matching
# Uses the value of cfg['max_processes'] if None
cfg['max_processes_stereo_matching'] = None
# max number of OMP threads used by programs compiled with openMP
cfg['omp_num_threads'] = 1
# timeout in seconds, after which a function that runs on a single tile is not
# waited for
cfg['timeout'] = 600
# debug mode (more verbose logs and intermediate results saved)
cfg['debug'] = False
# resolution of the output digital surface model, in meters per pixel
cfg['dsm_resolution'] = 4
# radius to compute altitudes (and to interpolate the small holes)
cfg['dsm_radius'] = 0
# dsm_sigma controls the spread of the blob from each point for the dsm computation
# (dsm_resolution by default)
cfg['dsm_sigma'] = None
# relative sift match threshold (else sift match threshold is absolute)
cfg['relative_sift_match_thresh'] = True
# if cfg['relative_sift_match_thresh'] is True :
# sift threshold on the first over second best match ratio
# else (absolute) a reasonable value is between 200 and 300 (128-vectors SIFT descriptors)
cfg['sift_match_thresh'] = 0.6
# disp range expansion facto
cfg['disp_range_extra_margin'] = 0.2
# Maximum disparity range allowed in block matching
cfg['max_disp_range'] = None
# estimate rectification homographies either blindly using the rpc data or from
# the images actual content thanks to sift matches
cfg['rectification_method'] = 'rpc' # either 'rpc' or 'sift'
# register the rectified images with a shear estimated from the rpc data
cfg['register_with_shear'] = True
# number of ground control points per axis in matches from rpc generation
cfg['n_gcp_per_axis'] = 5
# max distance allowed for a point to the epipolar line of its match
cfg['epipolar_thresh'] = 0.5
# maximal pointing error, in pixels
cfg['max_pointing_error'] = 10
# set these params if you want to impose the disparity range manually (cfg['disp_range_method'] == 'fixed_pixel_range')
cfg['disp_min'] = None
cfg['disp_max'] = None
# set these params if you want to impose the altitude range manually (cfg['disp_range_method'] == 'fixed_altitude_range')
cfg['alt_min'] = None
cfg['alt_max'] = None
# width of a stripe of pixels to be masked along the reference input image borders
cfg['border_margin'] = 10
# radius for erosion of valid disparity areas. Ignored if less than 2
cfg['msk_erosion'] = 2
cfg['fusion_operator'] = 'average_if_close'
# threshold (in meters) used for the fusion of two dems in triplet processing
cfg['fusion_thresh'] = 3
cfg['rpc_alt_range_scale_factor'] = 1
# method to compute the disparity range: "sift", "exogenous", "wider_sift_exogenous", "fixed_pixel_range", "fixed_altitude_range"
cfg['disp_range_method'] = "wider_sift_exogenous"
cfg['disp_range_exogenous_low_margin'] = -10
cfg['disp_range_exogenous_high_margin'] = +100
# whether or not to use SRTM DEM (downloaded from internet) to estimate:
# - the average ground altitude (to project the input geographic AOI to the
# correct place in the input images)
# - a reasonable altitude range (to get a better rectification when
# "rectification_method" is set to "rpc")
cfg['use_srtm'] = False
# exogenous dem. If set, it superseeds SRTM.
cfg['exogenous_dem'] = None
cfg['exogenous_dem_geoid_mode'] = True
### stereo matching parameters
# stereo matching algorithm: 'tvl1', 'msmw', 'hirschmuller08',
# hirschmuller08_laplacian', 'sgbm', 'mgm', 'mgm_multi'
cfg['matching_algorithm'] = 'mgm'
# size of the Census NCC square windows used in mgm
cfg['census_ncc_win'] = 5
# MGM parameter: speckle filter minimum area (REMOVESMALLCC flag)
cfg['stereo_speckle_filter'] = 25
# MGM parameter: regularity (multiplies P1 and P2)
cfg['stereo_regularity_multiplier'] = 1.0
# MGM parameters:
# number of directions explored for regularization
cfg['mgm_nb_directions'] = 8
# timeout in seconds, after which a running mgm process will be killed
cfg['mgm_timeout'] = 600
# distance threshold (in pixels) for the left-right consistency test
cfg['mgm_leftright_threshold'] = 1.0
# controls the mgm left-right consistency check. 0: disabled
# 1 (default): enabled at all scales
# 2: enables only at the last scale (faster)
cfg['mgm_leftright_control'] = 1
# controls the mgm mindiff filter check. -1: disabled (default), produce denser maps
# 1: enabled, produce conservative results
cfg['mgm_mindiff_control'] = -1
# remove isolated 3d points in height maps
cfg['3d_filtering_r'] = None # radius in meters
cfg['3d_filtering_n'] = None # number of points
# clean height maps outliers
cfg['cargarse_basura'] = True
# Output coordinate reference system
# All formats accepted by `pyproj.CRS()` are allowed, for example:
# 32740 (int interpreted as an EPSG code), or
# "epsg:32740+5773" (authority string), or
# "+proj=utm +zone=40 +south +datum=WGS84 +units=m +vunits=m +no_defs +type=crs" (proj4 string)
# If None, the local UTM zone will be used
cfg['out_crs'] = None
| carlodef/s2p | s2p/config.py | Python | agpl-3.0 | 6,724 |
import errno
import sys
import time
import uuid
import paramiko
import logging
import six
from io import StringIO
from paramiko.ssh_exception import (AuthenticationException,
SSHException,
NoValidConnectionsError)
from django.conf import settings as django_settings
from .models import Stack
logger = logging.getLogger(__name__)
ACTIONS = (
ADOPT,
CHECK,
CREATE,
DELETE,
LAUNCH,
RESTORE,
RESUME,
ROLLBACK,
SNAPSHOT,
SUSPEND,
UPDATE
) = (
'ADOPT',
'CHECK',
'CREATE',
'DELETE',
'LAUNCH',
'RESTORE',
'RESUME',
'ROLLBACK',
'SNAPSHOT',
'SUSPEND',
'UPDATE'
)
STATUSES = (
COMPLETE,
ERROR,
FAILED,
IN_PROGRESS,
ISSUED,
PENDING,
RETRY,
TIMEOUT
) = (
'COMPLETE',
'ERROR',
'FAILED',
'IN_PROGRESS',
'ISSUED',
'PENDING',
'RETRY',
'TIMEOUT'
)
# Dynamically define all possible states as attributes in this module. To
# prevent flake8 from complaining about undefined attributes, we add noqa: F821
# where necessary, below.
ALL_STATES = tuple(['%s_%s' % (a, s) for a in ACTIONS for s in STATUSES])
module = sys.modules[__name__]
for state in ALL_STATES:
setattr(module, state, state)
VALID_STATES = (
ADOPT_COMPLETE, # noqa: F821
ADOPT_FAILED, # noqa: F821
ADOPT_IN_PROGRESS, # noqa: F821
CHECK_COMPLETE, # noqa: F821
CHECK_FAILED, # noqa: F821
CHECK_IN_PROGRESS, # noqa: F821
CREATE_COMPLETE, # noqa: F821
CREATE_FAILED, # noqa: F821
CREATE_IN_PROGRESS, # noqa: F821
DELETE_COMPLETE, # noqa: F821
DELETE_FAILED, # noqa: F821
DELETE_IN_PROGRESS, # noqa: F821
DELETE_PENDING, # noqa: F821
LAUNCH_ERROR, # noqa: F821
LAUNCH_PENDING, # noqa: F821
LAUNCH_TIMEOUT, # noqa: F821
RESTORE_COMPLETE, # noqa: F821
RESTORE_FAILED, # noqa: F821
RESTORE_IN_PROGRESS, # noqa: F821
RESUME_COMPLETE, # noqa: F821
RESUME_FAILED, # noqa: F821
RESUME_IN_PROGRESS, # noqa: F821
ROLLBACK_COMPLETE, # noqa: F821
ROLLBACK_FAILED, # noqa: F821
ROLLBACK_IN_PROGRESS, # noqa: F821
SNAPSHOT_COMPLETE, # noqa: F821
SNAPSHOT_FAILED, # noqa: F821
SNAPSHOT_IN_PROGRESS, # noqa: F821
SUSPEND_COMPLETE, # noqa: F821
SUSPEND_FAILED, # noqa: F821
SUSPEND_IN_PROGRESS, # noqa: F821
SUSPEND_ISSUED, # noqa: F821
SUSPEND_PENDING, # noqa: F821
SUSPEND_RETRY, # noqa: F821
UPDATE_COMPLETE, # noqa: F821
UPDATE_FAILED, # noqa: F821
UPDATE_IN_PROGRESS # noqa: F821
)
UP_STATES = (
CREATE_COMPLETE, # noqa: F821
RESUME_COMPLETE, # noqa: F821
UPDATE_COMPLETE, # noqa: F821
ROLLBACK_COMPLETE, # noqa: F821
SNAPSHOT_COMPLETE # noqa: F821
)
DOWN_STATES = (
SUSPEND_COMPLETE, # noqa: F821
DELETE_COMPLETE # noqa: F821
)
PENDING_STATES = (
LAUNCH_PENDING, # noqa: F821
SUSPEND_PENDING, # noqa: F821
DELETE_PENDING # noqa: F821
)
OCCUPANCY_STATES = (
CREATE_COMPLETE, # noqa: F821
RESUME_COMPLETE, # noqa: F821
UPDATE_COMPLETE, # noqa: F821
ROLLBACK_COMPLETE, # noqa: F821
SNAPSHOT_COMPLETE, # noqa: F821
LAUNCH_PENDING, # noqa: F821
SUSPEND_PENDING, # noqa: F821
SUSPEND_ISSUED, # noqa: F821
SUSPEND_RETRY, # noqa: F821
DELETE_PENDING # noqa: F821
)
SETTINGS_KEY = 'hastexo'
DEFAULT_SETTINGS = {
"terminal_url": "/hastexo-xblock/",
"launch_timeout": 900,
"remote_exec_timeout": 300,
"suspend_timeout": 120,
"suspend_interval": 60,
"suspend_concurrency": 4,
"suspend_task_timeout": 900,
"check_timeout": 120,
"delete_age": 14,
"delete_attempts": 3,
"delete_interval": 86400,
"delete_task_timeout": 900,
"sleep_timeout": 10,
"js_timeouts": {
"status": 15000,
"keepalive": 30000,
"idle": 3600000,
"check": 5000
},
"providers": {}
}
class RemoteExecException(Exception):
pass
class RemoteExecTimeout(RemoteExecException):
pass
if sys.version_info < (3,):
def b(x):
return x
else:
import codecs
def b(x):
return codecs.latin_1_encode(x)[0]
def get_xblock_settings():
try:
xblock_settings = django_settings.XBLOCK_SETTINGS
except AttributeError:
settings = DEFAULT_SETTINGS
else:
settings = xblock_settings.get(
SETTINGS_KEY, DEFAULT_SETTINGS)
return settings
def update_stack(name, course_id, student_id, data):
stack = Stack.objects.select_for_update().get(
student_id=student_id,
course_id=course_id,
name=name
)
update_stack_fields(stack, data)
stack.save(update_fields=list(data.keys()))
def update_stack_fields(stack, data):
for field, value in data.items():
if hasattr(stack, field):
setattr(stack, field, value)
def get_stack(name, course_id, student_id, prop=None):
stack = Stack.objects.get(
student_id=student_id,
course_id=course_id,
name=name
)
if prop:
return getattr(stack, prop)
else:
return stack
def read_from_contentstore(course_key, path):
"""
Loads a file directly from the course's content store.
"""
contents = None
try:
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from opaque_keys.edx.locator import CourseLocator
except ImportError:
# We're not running under edx-platform, so ignore.
pass
else:
if isinstance(course_key, six.text_type):
course_key = CourseLocator.from_string(course_key)
loc = StaticContent.compute_location(course_key, path)
asset = contentstore().find(loc)
contents = asset.data
return contents
def ssh_to(user, ip, key):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pkey = paramiko.RSAKey.from_private_key(StringIO(key))
settings = get_xblock_settings()
sleep_timeout = settings.get("sleep_timeout", 10)
connected = False
while not connected:
try:
ssh.connect(ip, username=user, pkey=pkey)
except (EOFError,
AuthenticationException,
SSHException,
NoValidConnectionsError) as e:
# Be more persistent than Paramiko normally
# is, and keep retrying.
logger.debug("Got %s during SSH connection to ip (%s), retrying." %
(e.__class__.__name__, ip))
except EnvironmentError as enve:
if enve.errno in (errno.EAGAIN,
errno.ENETDOWN,
errno.ENETUNREACH,
errno.ENETRESET,
errno.ECONNABORTED,
errno.ECONNRESET,
errno.ENOTCONN,
errno.EHOSTDOWN):
# Be more persistent than Paramiko normally
# is, and keep retrying.
logger.debug("Got errno %s during SSH connection "
"to ip (%s), retrying." % (enve.errno, ip))
elif enve.errno in (errno.ECONNREFUSED,
errno.EHOSTUNREACH):
# Paramiko should catch and wrap
# these. They should never bubble
# up. Still, continue being more
# persistent, and retry.
logger.warning("Got errno %s during SSH connection "
"to ip (%s). Paramiko bug? Retrying." %
(enve.errno, ip))
else:
# Anything else is an unexpected error.
raise
else:
connected = True
if not connected:
time.sleep(sleep_timeout)
return ssh
def remote_exec(ssh, script, params=None, reuse_sftp=None):
if reuse_sftp:
sftp = reuse_sftp
else:
sftp = ssh.open_sftp()
# Generate a temporary filename
script_file = '/tmp/.%s' % uuid.uuid4()
# Open the file remotely and write the script out to it.
f = sftp.open(script_file, 'w')
f.write(script)
f.close()
# Make it executable.
sftp.chmod(script_file, 0o775)
# Add command line arguments, if any.
if params:
command = "%s %s" % (script_file, params)
else:
command = script_file
# Run it.
_, stdout, stderr = ssh.exec_command(command)
# Wait for it to complete.
settings = get_xblock_settings()
timeout = settings.get("remote_exec_timeout", 300)
sleep_timeout = settings.get("sleep_timeout", 10)
try:
start = time.time()
while not stdout.channel.exit_status_ready():
if timeout and time.time() >= start + timeout:
error_msg = ("Remote execution timeout after [%d] seconds." %
timeout)
raise RemoteExecTimeout(error_msg)
time.sleep(sleep_timeout)
finally:
# Remove the file
sftp.remove(script_file)
# Check for errors
retval = stdout.channel.recv_exit_status()
error_msg = None
if retval != 0:
error_msg = stderr.read()
raise RemoteExecException(error_msg)
# Close the sftp session
if not reuse_sftp:
sftp.close()
return retval
| arbrandes/hastexo-xblock | hastexo/common.py | Python | agpl-3.0 | 9,550 |
import socket
import threading
import sys
IP = "0.0.0.0"
try:
TCP_PORT = int(sys.argv[1])
if TCP_PORT < 1 or TCP_PORT > 65535:
raise ValueError()
except IndexError:
print("Usage %s <tcp port>" % sys.argv[0])
sys.exit(1)
except ValueError:
print("Please enter an integer <= 65535")
sys.exit(2)
my_addr_info = (IP, TCP_PORT)
all_connections = []
all_connections_lock = threading.Lock()
def add_conn(conn):
all_connections_lock.acquire()
all_connections.append(conn)
all_connections_lock.release()
def remove_conn(conn):
all_connections_lock.acquire()
all_connections.remove(conn)
all_connections_lock.release()
def do_echo_all(conn, addr_info):
"""
Receive a message and echoes to all connected clients.
"""
# my_conn_info holds a list with 3 elements:
# conn -> the connection object
# addr_info -> (ip, port) tuple
# user -> the username if has been specified
my_conn_info = [conn, addr_info, ""]
add_conn(my_conn_info)
while True:
data = conn.recv(8192)
if data:
sender = my_conn_info[2] or my_conn_info[1]
msg = "%s ha inviato> %s" % (sender, data)
print(msg)
if data.startswith(b"USER "):
my_conn_info[2] = data[len("USER "):]
for every_conn_info in all_connections:
every_conn_info[0].sendall(bytes(msg, "utf-8"))
else:
print("Nessun dato ricevuto, chiudo tutto")
break
remove_conn(my_conn_info)
conn.close()
def main():
sock = socket.socket()
sock.bind(my_addr_info)
sock.listen(50)
try:
while True:
print("accetto connessioni...")
conn, addr_info = sock.accept()
print("connessione accettata")
t = threading.Thread(target=do_echo_all, args=(conn, addr_info))
t.start()
print("Uscito dal while")
t.join()
except Exception as e:
print("Eccezione: %s" % e)
for every_conn_info in all_connections:
try:
every_conn_info[0].close()
except:
pass
sock.close()
if __name__ == "__main__":
main()
| feroda/lessons-itis | 2016-5BI/src/socket/python3/echo-service-broadcast-to-all/echo-all-server-with-user.py | Python | agpl-3.0 | 2,239 |
# Copyright (C) 2013-2021 The Debsources developers
# <qa-debsources@lists.alioth.debian.org>.
# See the AUTHORS file at the top-level directory of this distribution and at
# https://salsa.debian.org/qa/debsources/blob/master/AUTHORS
#
# This file is part of Debsources. Debsources is free software: you can
# redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version. For more information
# see the COPYING file at the top-level directory of this distribution and at
# https://salsa.debian.org/qa/debsources/blob/master/COPYING
# placeholder
| Debian/debsources | lib/debsources/tests/__init__.py | Python | agpl-3.0 | 699 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.core.reanalyse Re-analyse certain simulations.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition, parse_arguments
from pts.core.remote.host import all_host_ids
from pts.core.simulation.remote import get_simulation_for_host
from pts.core.launch.analyser import reanalyse_simulation, all_steps
from pts.core.basics.log import log
# -----------------------------------------------------------------
# Create the configuration definition
definition = ConfigurationDefinition()
definition.add_required("remote", "string", "remote host to mount", choices=all_host_ids())
definition.add_required("ids", "integer_list", "simulation IDs")
definition.add_positional_optional("steps", "string_list", "re-analyse only certain steps", choices=all_steps, default=all_steps)
definition.add_positional_optional("features", "string_list", "re-analyse only certain features (if a single step is defined)")
definition.add_optional("not_steps", "string_list", "don't analyse these steps", choices=all_steps)
definition.add_optional("not_features", "string_list", "don't analyse these features (if a single not_step is defined)")
# Read the command line arguments
config = parse_arguments("reanalyse", definition, description="Re-analyse a certain simulation")
# -----------------------------------------------------------------
# Loop over the simulations
for simulation_id in config.ids:
# Load the simulation
simulation = get_simulation_for_host(config.remote, config.id)
# Check whether retrieved (specific to remote simulations)
if not simulation.retrieved: raise ValueError("The simulation has not been retrieved yet")
# Inform the user
log.info("Re-analysing simulation '" + simulation.name + "' (" + config.remote + " " + str(config.id) + ") ...")
# Reanalyse simulation
reanalyse_simulation(simulation, config.steps, config.features, not_steps=config.not_steps, not_features=config.not_features)
# -----------------------------------------------------------------
| SKIRT/PTS | do/core/reanalyse.py | Python | agpl-3.0 | 2,562 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Homepage(Document):
def validate(self):
if not self.products:
self.setup_items()
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
# set missing routes (?)
doc = frappe.get_doc('Item', d.name)
doc.save()
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image))
| anandpdoshi/erpnext | erpnext/portal/doctype/homepage/homepage.py | Python | agpl-3.0 | 813 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015, 2016, 2017 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.tests import TestCase
from superdesk import get_resource_service
class ItemDeleteTestCase(TestCase):
"""Tests that item_version documents are deleted when the items document is deleted"""
def setUp(self):
self.app.data.insert('items', [
{'_id': 'item1', 'type': 'text'},
{'_id': 'item2', 'type': 'text'},
{'_id': 'item3', 'type': 'text'},
{'_id': 'item4', 'type': 'text'},
{'_id': 'item5', 'type': 'text'}
])
self.app.data.insert('items_versions', [
{'_id': 'ver1', 'type': 'text', '_id_document': 'item1'},
{'_id': 'ver2', 'type': 'text', '_id_document': 'item2'},
{'_id': 'ver3', 'type': 'text', '_id_document': 'item3'},
{'_id': 'ver4', 'type': 'text', '_id_document': 'item4'},
{'_id': 'ver5', 'type': 'text', '_id_document': 'item5'}
])
def test_event_fired(self):
items_service = get_resource_service('items')
items_versions_service = get_resource_service('items_versions')
item_ids = ['item1', 'item2', 'item3', 'item4', 'item5']
ver_ids = ['ver1', 'ver2', 'ver3', 'ver4', 'ver5']
items_service.delete_action(lookup={'_id': {'$in': item_ids}})
for item_id in item_ids:
item = items_service.find_one(req=None, _ia=item_id)
self.assertIsNone(item)
for ver_id in ver_ids:
item_version = items_versions_service.find_one(req=None, _id=ver_id)
self.assertIsNone(item_version)
| ancafarcas/superdesk-core | content_api/tests/items_versions_service_test.py | Python | agpl-3.0 | 1,894 |
import ltree_models
import sqlalchemy
from sqlalchemy import (
create_engine,
)
from sqlalchemy.orm import (
aliased,
)
import testing.postgresql
from test_project import (
test_data
)
from test_project.models import (
DBSession,
ArticleAuthorAssociation,
ArticleByAssoc,
ArticleByObj,
Base,
Blog,
Person,
LtreeNode,
TreeNode,
)
import transaction
import unittest
def setUpModule():
'''Create a test DB and import data.'''
# Create a new database somewhere in /tmp
global postgresql
global engine
postgresql = testing.postgresql.Postgresql(port=7654)
engine = create_engine(postgresql.url())
ltree_models.add_ltree_extension(engine)
DBSession.configure(bind=engine)
def tearDownModule():
'''Throw away test DB.'''
global postgresql
DBSession.close()
postgresql.stop()
class DBTestBase(unittest.TestCase):
def setUp(self):
Base.metadata.create_all(engine)
# Add some basic test data.
test_data.add_to_db(engine)
transaction.begin()
def tearDown(self):
transaction.abort()
Base.metadata.drop_all(engine)
class IllustrateRelatedQueries(DBTestBase):
def test_fk_one_to_many(self):
query = DBSession.query(Blog).select_from(Person).join(
Person.blogs
).filter(
Person.id == '1'
)
alice = DBSession.query(Person).get('1')
self.assertEqual(query.all(), alice.blogs)
def test_fk_many_to_one(self):
query = DBSession.query(Person).select_from(Blog).join(
Blog.owner
).filter(
Blog.id == '1'
)
self.assertEqual(query.one(), DBSession.query(Person).get('1'))
def test_fk_many_to_many_assoc_table(self):
query = DBSession.query(ArticleByAssoc).select_from(Person).join(
Person.articles_by_assoc
).filter(
Person.id == '11'
)
person11 = DBSession.query(Person).get('11')
self.assertEqual(query.all(), person11.articles_by_assoc)
query = DBSession.query(ArticleByAssoc).select_from(Person).join(
Person.articles_by_assoc
).filter(
Person.id == '12'
)
person12 = DBSession.query(Person).get('12')
self.assertEqual(query.all(), person12.articles_by_assoc)
def test_fk_many_to_many_assoc_proxy(self):
rel = sqlalchemy.inspect(Person).all_orm_descriptors['articles_by_proxy']
proxy = rel.for_class(Person)
# print(proxy.local_attr)
# print(proxy.remote_attr)
query = DBSession.query(ArticleByObj).select_from(Person).join(
# Person.article_associations
proxy.local_attr
).join(
# ArticleAuthorAssociation.article
proxy.remote_attr
).filter(
Person.id == '12'
)
person12 = DBSession.query(Person).get('12')
self.assertEqual(
[aa.article for aa in person12.article_associations],
query.all()
)
def test_fk_self_one_to_many(self):
tn2 = aliased(TreeNode)
query = DBSession.query(TreeNode).select_from(tn2).join(
tn2.children
).filter(
tn2.id == '1'
)
root = DBSession.query(TreeNode).get('1')
self.assertEqual(query.all(), root.children)
def test_fk_self_many_to_one(self):
tn2 = aliased(TreeNode)
query = DBSession.query(TreeNode).select_from(tn2).join(
tn2.parent
).filter(
tn2.id == '2'
)
child = DBSession.query(TreeNode).get('2')
self.assertEqual(query.one(), child.parent)
def test_join_condition_one_to_many(self):
query = DBSession.query(Blog).select_from(Person).join(
Person.blogs_from_titles
).filter(
Person.id == '1'
)
alice = DBSession.query(Person).get('1')
self.assertEqual(query.all(), alice.blogs_from_titles)
def test_ltree_node_children(self):
lt2 = aliased(LtreeNode)
query = DBSession.query(LtreeNode).select_from(lt2).join(
lt2.children
).filter(
lt2.id == '1'
)
root = DBSession.query(LtreeNode).get('1')
self.assertEqual(query.all(), root.children)
def test_ltree_node_parent(self):
lt2 = aliased(LtreeNode)
query = DBSession.query(LtreeNode).select_from(lt2).join(
lt2.parent
).filter(
lt2.id == '2'
)
child = DBSession.query(LtreeNode).get('2')
self.assertEqual(query.one(), child.parent)
def test_ltree_node_ancestors(self):
lt2 = aliased(LtreeNode)
query = DBSession.query(LtreeNode).select_from(lt2).join(
lt2.ancestors
).filter(
lt2.node_name == 'r.1.2'
)
node = DBSession.query(LtreeNode).filter(LtreeNode.node_name == 'r.1.2').one()
# self.assertEqual(query.all(), root.children)
print(query.all())
| colinhiggs/pyramid-jsonapi | test_project/test_project/query_tests.py | Python | agpl-3.0 | 5,074 |
# vim:ai:et:ff=unix:fileencoding=utf-8:sw=4:ts=4:
# conveyor/src/main/python/conveyor/platform/osx.py
#
# conveyor - Printing dispatch engine for 3D objects and their friends.
# Copyright © 2012 Matthew W. Samsonoff <matthew.samsonoff@makerbot.com>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, print_function, unicode_literals)
import os.path
DEFAULT_CONFIG_FILE = '/Library/MakerBot/conveyor.conf'
DEFAULT_CONFIG_COMMON_ADDRESS = 'pipe:/var/tmp/conveyord.socket'
DEFAULT_CONFIG_COMMON_PID_FILE = '/var/tmp/conveyord.pid'
DEFAULT_CONFIG_MAKERBOT_DRIVER_AVRDUDE_EXE = '/Library/MakerBot/avrdude'
DEFAULT_CONFIG_MAKERBOT_DRIVER_AVRDUDE_CONF_FILE = '/Library/MakerBot/avrdude.conf'
DEFAULT_CONFIG_MAKERBOT_DRIVER_PROFILE_DIR = '/Library/MakerBot/s3g/profiles/'
DEFAULT_CONFIG_MIRACLE_GRUE_EXE = '/Library/MakerBot/miracle_grue'
DEFAULT_CONFIG_MIRACLE_GRUE_PROFILE_DIR = '/Library/MakerBot/miraclegrue/'
DEFAULT_CONFIG_SKEINFORGE_FILE = '/Library/MakerBot/skeinforge/skeinforge_application/skeinforge.py'
DEFAULT_CONFIG_SKEINFORGE_PROFILE_DIR = '/Library/MakerBot/skeinforge/'
DEFAULT_CONFIG_SERVER_LOGGING_FILE = '/var/log/conveyor/conveyord.log'
DEFAULT_CONFIG_SERVER_UNIFIED_MESH_HACK_EXE = '/Library/MakerBot/unified_mesh_hack'
| makerbot/conveyor | src/main/python/conveyor/platform/osx.py | Python | agpl-3.0 | 1,906 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Leonardo Leite, Saulo Trento, Diego Rabatone
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TODO: Transformar este script em uma função
# Investigar se é possível escolher a cor das bolhas.
import numpy
import analise
import sys
#############################
## PARAMETROS MODIFICÁVEIS ##
#############################
# Partidos a serem incluídos nas análises:
parts = [u'PMDB', u'PTB', u'PDT', u'PT', u'DEM', u'PCdoB', u'PSB', u'PSDB', u'PSC', u'PMN', u'PPS', u'PV', u'PTdoB', u'PP', u'PHS', u'PRB', u'PSOL', u'PR', u'PSD']
ano_inicial = 2002
ano_final = 2011
arquivo_de_saida = 'colar_num_html.txt'
#############################
## Programa em si: ##
#############################
anuais = [] # lista de objetos do tipo Analise (que serão análises anuais)
anos = range(ano_inicial,ano_final+1)
# Fazer as PCAs:
for ano in anos:
anuais.append(analise.Analise(str(ano)+'-01-01', str(ano)+'-12-31', [], parts))
dados = []
print "Fazendo PCAs:"
print '-'*(len(anuais)-1)+'v'
for a in anuais:
dados.append( a.partidos_2d('/dev/null') )
sys.stdout.write('.')
sys.stdout.flush()
# Funções auxiliares:
def quantidade_movimento(i,graus=0,espelho=0):
"""Calcula quantidade de movimento entre o instante i (corresponde ao ano anos[i]) e o instante i+1.
No cálculo o instante i tem os eixos rotacionados (valor graus, entre 0 e 360), e o primeiro eixo multiplicado por -1 se espelho=0.
"""
qm = 0
antes = dados[i]
depois = dados[i+1]
if espelho:
antes = numpy.dot( antes,numpy.array( [[-1.,0.],[0.,1.]] ) )
if graus != 0:
antes = numpy.dot( antes,matrot(graus) )
for j in range(len(parts)):
qm += numpy.sqrt( numpy.dot( antes[j,:] - depois[j,:], antes[j,:] - depois[j,:] ) ) * anuais[i+1].tamanho_partido[j]
return qm
def matrot(graus):
""" Retorna matriz de rotação 2x2 que roda os eixos em graus (0 a 360) no sentido anti-horário (como se os pontos girassem no sentido horário em torno de eixos fixos).
"""
graus = float(graus)
rad = numpy.pi * graus/180.
c = numpy.cos(rad)
s = numpy.sin(rad)
return numpy.array([[c,-s],[s,c]])
print ' '
print 'Espelhando e rotacionando...'
# Rodar e espelhar eixos conforme a necessidade:
# O sentido dos eixos que resultam na PCA é arbitrário, e se não dermos tanta importância ao significado do eixo x e do eixo y, mas sim principalmente à distância entre os partidos dois a dois que se reflete no plano, a rotação dos eixos é também arbitrária. Ao relacionar análises feitas em períodos de tempo diferentes (no caso, anos), os eixos de uma análise não têm relação com os da análise seguinte (pois foram baseados em votações distintas), então se fixarmos os eixos do ano i mais recente, o ano i-1 pode ter o eixo x espelhado ou não, e pode sofrer uma rotação de ângulo qualquer.
# Gostaríamos que estas transformações fossem tais que minimizasse o movimento dos partidos: por exemplo se no ano i o partido A resultou no lado esquerdo do gráfico, e o partido B no lado direito, mas no ano i-1 o posicionamento resultou inverso, seria desejável espelhar o eixo x, ou então rodar os eixos de 180 graus.
# Isso é alcançado através do algoritmo abaixo, de minimização da 'quantidade de movimento' total com a variação da rotação dos eixos e espelhamento do eixo x. Entre dois anos, esta quantidade de movimento é definida pela soma das distâncias euclidianas caminhadas pelos partidos ponderadas pelo tamanho do partido [no ano mais recente].
for i in range(len(anos)-2,-1,-1): # indices dos anos, de tras pra frente
print anos[i]
qm_min = 1000000 # quero minimizar as quantidades de movimento
campeao = (0,0) # (espelhar, graus)
for espelhar in [0,1]:
for graus in [0,45,90,135,180,225,270,315]:
qm_agora = quantidade_movimento(i,graus,espelhar)
#print '%d, %d, %f' % (espelhar,graus,qm_agora )
if qm_agora < qm_min:
campeao = (espelhar, graus)
qm_min = qm_agora
print campeao
if campeao[0] == 1: # espelhar
dados[i] = numpy.dot( dados[i], numpy.array([[-1.,0.],[0.,1.]]) )
if campeao[1] != 0: # rotacionar
dados[i] = numpy.dot( dados[i], matrot(campeao[1]) )
print 'Fim'
# Escrever arquivo:
f = open(arquivo_de_saida,'w')
f.write(""" <script type="text/javascript" src="http://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load('visualization', '1', {packages: ['motionchart']});
function drawVisualization() {
""")
f.write('var data = new google.visualization.DataTable();\n')
f.write("data.addColumn('string', 'Partido');\n")
f.write("data.addColumn('date', 'Data');\n")
f.write("data.addColumn('number', 'Eixo1');\n")
f.write("data.addColumn('number', 'Eixo2');\n")
f.write("data.addColumn('number', 'Tamanho');\n")
f.write("data.addRows([\n")
for ia in range(len(anuais)): # datas
a = anuais[ia]
d_ano = int(a.data_final[0:4])
d_mes = int(a.data_final[5:7])-1 # em js meses sao de 0 a 11
d_dia = int(a.data_final[8:10])
for ip in range(len(parts)): # partidos
linha = " ['%s',new Date(%d,%d,%d), %f,%f,%d],\n" % (parts[ip],d_ano,d_mes,d_dia,dados[ia][ip,0],dados[ia][ip,1],a.tamanho_partido[ip])
f.write(linha)
f.seek(-2,1)
f.write("\n]);")
f.write("""
var motionchart = new google.visualization.MotionChart(
document.getElementById('visualization'));
var options = {};
options['state'] = '{"yAxisOption":"3","xLambda":1,"colorOption":"_UNIQUE_COLOR","playDuration":40000,"showTrails":false,"iconKeySettings":[],"xAxisOption":"2","nonSelectedAlpha":0.4,"uniColorForNonSelected":false,"xZoomedDataMax":0.815577,"sizeOption":"4","orderedByY":false,"iconType":"BUBBLE","dimensions":{"iconDimensions":["dim0"]},"yZoomedDataMax":0.907421,"orderedByX":false,"xZoomedIn":false,"xZoomedDataMin":-0.510363,"time":"2002-12-31","duration":{"timeUnit":"D","multiplier":1},"yLambda":1,"yZoomedIn":false,"yZoomedDataMin":-0.558064}'
options['width'] = 800;
options['height'] = 500;
motionchart.draw(data, options);
}
google.setOnLoadCallback(drawVisualization);
</script>
<div id="visualization" style="width: 800px; height: 400px;"></div>
""")
f.close()
| radar-parlamentar/legado | py/movimento.py | Python | agpl-3.0 | 6,994 |
from . import account_payment_mode
from . import account_move
| avanzosc/odoo-addons | account_invoice_report_show_payment_reference/models/__init__.py | Python | agpl-3.0 | 62 |
# flake8: noqa
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError
class StockPicking(models.Model):
_inherit = 'stock.picking'
block_manual_lines = fields.Boolean(
related='picking_type_id.block_manual_lines',
)
observations = fields.Text(
)
def unlink(self):
"""
To avoid errors we block deletion of pickings in other state than
draft or cancel
"""
not_del_pickings = self.filtered(
lambda x: x.picking_type_id.block_picking_deletion or x.state
not in ('draft', 'cancel'))
if not_del_pickings:
raise ValidationError(
_(
'You can not delete this pickings because "Block picking '
'deletion" is enable on the picking type/s "%s" '
'or the state of the picking is not draft or cancel.\n'
'Picking Ids: %s') %
(','.join(not_del_pickings.mapped('picking_type_id.name')),
not_del_pickings.ids))
return super().unlink()
def copy(self, default=None):
self.ensure_one()
# si no viene default entonces es por interfaz y
# si tiene bloqueado agregar cantidades entonces
# tiene bloqueado duplicar
if not default and self.picking_type_id.block_additional_quantity:
raise UserError(_(
'You can not duplicate a Picking because "Block'
' Additional Quantity"'
' is enable on the picking type "%s"') % (
self.picking_type_id.name))
return super().copy(default=default)
def add_picking_operation(self):
self.ensure_one()
view_id = self.env.ref(
'stock_ux.view_move_line_tree').id
search_view_id = self.env.ref(
'stock_ux.stock_move_line_view_search').id
return {
"type": "ir.actions.act_window",
"res_model": "stock.move.line",
"search_view_id": search_view_id,
"views": [[view_id, "tree"], [False, "form"]],
"domain": [["id", "in", self.move_line_ids.ids]],
"context": {"create": False},
}
@api.onchange('location_id')
def change_location(self):
# we only change moves locations if picking in draft
if self.state == 'draft':
self.move_lines.update({'location_id': self.location_id.id})
@api.onchange('location_dest_id')
def change_location_dest(self):
# we only change moves locations if picking in draft
if self.state == 'draft':
self.move_lines.update(
{'location_dest_id': self.location_dest_id.id})
def action_done(self):
for picking in self:
# con esto arreglamos que odoo dejaria entregar varias veces el
# mismo picking si por alguna razon el boton esta presente
# en nuestro caso pasaba cuando la impresion da algun error
# lo que provoca que el picking se entregue pero la pantalla no
# se actualice
# antes lo haciamo en do_new_transfer, pero como algunas
# veces se llama este metodo sin pasar por do_new_transfer
if picking.state in ['done', 'cancel']:
raise UserError(_(
'No se puede validar un picking que no esté en estado '
'Parcialmente Disponible o Reservado, probablemente el '
'picking ya fue validado, pruebe refrezcar la ventana!'))
res = super().action_done()
for rec in self.with_context(mail_notify_force_send=False).filtered('picking_type_id.mail_template_id'):
try:
rec.message_post_with_template(rec.picking_type_id.mail_template_id.id)
except Exception as error:
title = _(
"ERROR: Picking was not sent via email"
)
rec.message_post(body="<br/><br/>".join([
"<b>" + title + "</b>",
_("Please check the email template associated with"
" the picking type."),
"<code>" + str(error) + "</code>"
]),
)
return res
def new_force_availability(self):
self.action_assign()
for rec in self.mapped('move_lines').filtered(lambda m: m.state not in ['cancel', 'done']):
# this two could go together but we keep similar to odoo sm._quantity_done_set
if not rec.move_line_ids:
rec.quantity_done = rec.product_uom_qty
elif len(rec.move_line_ids) == 1:
rec.quantity_done = rec.product_uom_qty
else:
for line in rec.move_line_ids:
line.qty_done = line.product_uom_qty
# overwrite of odoo method so that we dont suggest backorder because of
# canceled moves. Search for "CHANGE FROM HERE"
def _check_backorder(self):
""" This method will loop over all the move lines of self and
check if creating a backorder is necessary. This method is
called during button_validate if the user has already processed
some quantities and in the immediate transfer wizard that is
displayed if the user has not processed any quantities.
:return: True if a backorder is necessary else False
"""
quantity_todo = {}
quantity_done = {}
# CHANGE FROM HERE
for move in self.mapped('move_lines').filtered(
lambda x: x.state not in 'cancel'):
# for move in self.mapped('move_lines'):
# TO HERE
quantity_todo.setdefault(move.product_id.id, 0)
quantity_done.setdefault(move.product_id.id, 0)
quantity_todo[move.product_id.id] += move.product_uom_qty
quantity_done[move.product_id.id] += move.quantity_done
for ops in self.mapped('move_line_ids').filtered(
lambda x: x.package_id and not x.product_id and not x.move_id):
for quant in ops.package_id.quant_ids:
quantity_done.setdefault(quant.product_id.id, 0)
quantity_done[quant.product_id.id] += quant.qty
for pack in self.mapped('move_line_ids').filtered(
lambda x: x.product_id and not x.move_id):
quantity_done.setdefault(pack.product_id.id, 0)
quantity_done[pack.product_id.id] += pack.product_uom_id._compute_quantity(
pack.qty_done, pack.product_id.uom_id)
return any(quantity_done[x] < quantity_todo.get(x, 0)
for x in quantity_done)
@api.constrains('state')
def check_cancel(self):
if self._context.get('cancel_from_order'):
return
if self.filtered(
lambda x: x.state == 'cancel' and not self.user_has_groups('stock_ux.allow_picking_cancellation')):
raise ValidationError("Only User with 'Picking cancelation allow' rights can cancel pickings")
def _put_in_pack(self, move_line_ids):
# we send to skip a process of check qty when is sending through the copy method.
return super()._put_in_pack(move_line_ids.with_context(put_in_pack=True))
| ingadhoc/stock | stock_ux/models/stock_picking.py | Python | agpl-3.0 | 7,572 |
#!/usr/bin/python
#
# This file is part of jetflows.
#
# Copyright (C) 2014, Henry O. Jacobs (hoj201@gmail.com), Stefan Sommer (sommer@di.ku.dk)
# https://github.com/nefan/jetflows.git
#
# jetflows is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jetflows is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jetflows. If not, see <http://www.gnu.org/licenses/>.
#
"""
Deforming at circle to a C
"""
import match as match
import matching.imagesim as imsim
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
#from PIL import Image, ImageDraw
import logging
# match options
scalegrad = True
visualize = True
visualizeIterations = False
gradTol = None # 1e-5
maxIter = 0
logging.basicConfig(level=logging.DEBUG,format="[%(filename)s:%(lineno)s - %(funcName)6s() ] %(message)s")
fixed = '../data/shootc1.npy'
moving = '../data/shootc2.npy'
pointsPerAxis = 1
border = 110
order = 2
weights = [0*1e-5, 1]
weights = weights/np.sum(weights)
smoothing = 0.02
splineS = None
SIGMAF = 2.**(-2)
h = 1. / pointsPerAxis
immT = None
# generate image
Nx = 301
Ny = 301
c = np.floor([Nx/2,Ny/2])
# horizontal "bars"
im1 = np.zeros((Nx, Ny))
d1 = 20
d2 = 20
disp = -25
im1[c[0]-d1+disp:c[0]+d1+disp,c[1]-d2:c[1]+d2] = 255
im2 = im1
# save images
plt.imshow(im1, interpolation='nearest')
#plt.show(block=True)
plt.gcf().savefig('../data/barim1-1.png')
np.save(fixed,im1)
plt.imshow(im2, interpolation='nearest')
#plt.show(block=True)
plt.gcf().savefig('../data/barim1-2.png')
np.save(moving,im2)
N = 1
DIM = 2
q = np.array([[0.0,0.0]])
p = np.zeros([N,DIM])
mu_1 = np.zeros([N,DIM,DIM])
mu_2 = np.zeros([N,DIM,DIM,DIM])
# initial conditions
#p = np.array([[1.0,0.0]])
#mu_1 = np.array([[[1.0 , 0.0],[0.0,1.0]]])
mu_2 = -400*np.array([[[[1.0,0.0],[0.0,0.0]] , [[0.0,0.0],[0.0,0.0]]]])
# post process
for d in range(DIM): # make triangular
mu_2[0,d] = 0.5*(mu_2[0,d] + mu_2[0,d].T)
print mu_2[0,d]
sim = imsim.get(pointsPerAxis, immname=moving, imfname=fixed, immT=immT, border=border, normalize=True, visualize=visualize, order=order, smoothscaleFactor=smoothing, SIGMAF=SIGMAF, h=h, splineS=splineS)
logging.info("initial point configuration: %s",sim['initial'])
(fstate,res) = match.match(sim,sim['SIGMA'],weights,initial=sim['initial'],initialMomentum=(p,mu_1,mu_2),gradTol=gradTol,order=order,scalegrad=scalegrad,maxIter=maxIter,visualize=visualize, visualizeIterations=visualizeIterations)
#print res
if True: # res.success:
match.genStateData(fstate,sim)
| stefansommer/jetflows | code/examples/shootc.py | Python | agpl-3.0 | 2,944 |
# -*- coding: utf-8 -*-
# Copyright 2019 Sergio Corato (https://efatto.it)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "Italian Localization - Fattura elettronica - Export ZIP",
"summary": "Permette di esportare in uno ZIP diversi file XML di "
"fatture elettroniche",
"version": "10.0.1.0.0",
"development_status": "Beta",
"category": "other",
"website": "https://github.com/OCA/l10n-italy",
"author": "Efatto.it di Sergio Corato, Odoo Community Association (OCA)",
"maintainers": ["sergiocorato"],
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": [
"l10n_it_fatturapa_out",
"l10n_it_fatturapa_in",
],
"data": [
"wizard/export_fatturapa_view.xml",
"views/attachment_view.xml",
],
}
| linkitspa/l10n-italy | l10n_it_fatturapa_export_zip/__manifest__.py | Python | agpl-3.0 | 847 |
# -*- coding: utf8 -*-
#
# Copyright (C) 2018 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp import models, api
class res_config_settings_improved(models.TransientModel):
_inherit = 'res.config.settings'
@api.model
def _get_classified_fields(self):
res = super(res_config_settings_improved, self)._get_classified_fields()
if 'with_config_improved' in self.env.context:
config = self.browse(self.env.context['with_config_improved'])
ctx = self.env.context.copy()
ctx.pop('with_config_improved', None)
current_config = self.with_context(ctx).default_get(fields=[])
res_group = res['group'][:]
i = 0
for name, _, _ in res_group:
if config[name] == current_config[name]:
res['group'].remove(res_group[i])
i += 1
res_module = res['module'][:]
i = 0
for name, module in res_module:
if config[name] and module.state not in ('uninstalled', 'to install', 'to upgrade'):
res['module'].remove(res_module[i])
i += 1
return res
@api.multi
def execute(self):
ctx = self.env.context.copy()
ctx.update({'with_config_improved': self.ids[0]})
return super(res_config_settings_improved, self.with_context(ctx)).execute()
| ndp-systemes/odoo-addons | base_res_config_improved/res_config.py | Python | agpl-3.0 | 2,110 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#=============================================================================
# FileName:
# Desc:
# Author: 苦咖啡
# Email: voilet@qq.com
# HomePage: http://blog.kukafei520.net
# Version: 0.0.1
# LastChange: 2013-02-20 14:52:11
# History:
#=============================================================================
from django import forms
from django.db import models
from uuidfield import UUIDField
class finotify(models.Model):
"""
上报信息
"""
uuid = UUIDField(auto=True, primary_key=True)
file_path = models.CharField(max_length=64, blank=True, null=True, verbose_name='可疑文件')
dangerous = models.TextField(blank=True, null=True, verbose_name='报警内容 ')
server_ip = models.CharField(blank=True, null=True, max_length=64, verbose_name='服务器ip')
files_create_time = models.DateTimeField(blank=True, null=True, max_length=64, verbose_name='监控时间')
| voilet/cmdb | finotify/models.py | Python | agpl-3.0 | 1,014 |
"""
Django settings for apimetaclub project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4b76+q*=fl3)_r3&ey7l_ih&x6!_k0_&dipnsgrtx05&%*)oo_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_swagger',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'apimetaclub.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'apimetaclub.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| MetaClub/api-metaclub | src/apimetaclub/settings.py | Python | agpl-3.0 | 3,157 |
# -*- coding: utf-8 -*-
import cStringIO
import datetime
from itertools import islice
import json
import logging
import re
from sys import maxint
import werkzeug.utils
import werkzeug.wrappers
from PIL import Image
import openerp
from openerp.addons.web import http
from openerp.http import request, Response
logger = logging.getLogger(__name__)
# Completely arbitrary limits
MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT = IMAGE_LIMITS = (1024, 768)
LOC_PER_SITEMAP = 45000
SITEMAP_CACHE_TIME = datetime.timedelta(hours=12)
class Website(openerp.addons.web.controllers.main.Home):
#------------------------------------------------------
# View
#------------------------------------------------------
@http.route('/', type='http', auth="public", website=True)
def index(self, **kw):
page = 'homepage'
try:
main_menu = request.registry['ir.model.data'].get_object(request.cr, request.uid, 'website', 'main_menu')
except Exception:
pass
else:
first_menu = main_menu.child_id and main_menu.child_id[0]
if first_menu:
if not (first_menu.url.startswith(('/page/', '/?', '/#')) or (first_menu.url=='/')):
return request.redirect(first_menu.url)
if first_menu.url.startswith('/page/'):
return request.registry['ir.http'].reroute(first_menu.url)
return self.page(page)
@http.route(website=True, auth="public")
def web_login(self, *args, **kw):
# TODO: can't we just put auth=public, ... in web client ?
return super(Website, self).web_login(*args, **kw)
@http.route('/page/<page:page>', type='http', auth="public", website=True)
def page(self, page, **opt):
values = {
'path': page,
}
# allow shortcut for /page/<website_xml_id>
if '.' not in page:
page = 'website.%s' % page
try:
request.website.get_template(page)
except ValueError, e:
# page not found
if request.website.is_publisher():
page = 'website.page_404'
else:
return request.registry['ir.http']._handle_exception(e, 404)
return request.render(page, values)
@http.route(['/robots.txt'], type='http', auth="public")
def robots(self):
return request.render('website.robots', {'url_root': request.httprequest.url_root}, mimetype='text/plain')
@http.route('/sitemap.xml', type='http', auth="public", website=True)
def sitemap_xml_index(self):
cr, uid, context = request.cr, openerp.SUPERUSER_ID, request.context
ira = request.registry['ir.attachment']
iuv = request.registry['ir.ui.view']
mimetype ='application/xml;charset=utf-8'
content = None
def create_sitemap(url, content):
ira.create(cr, uid, dict(
datas=content.encode('base64'),
mimetype=mimetype,
type='binary',
name=url,
url=url,
), context=context)
sitemap = ira.search_read(cr, uid, [('url', '=' , '/sitemap.xml'), ('type', '=', 'binary')], ('datas', 'create_date'), context=context)
if sitemap:
# Check if stored version is still valid
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
create_date = datetime.datetime.strptime(sitemap[0]['create_date'], server_format)
delta = datetime.datetime.now() - create_date
if delta < SITEMAP_CACHE_TIME:
content = sitemap[0]['datas'].decode('base64')
if not content:
# Remove all sitemaps in ir.attachments as we're going to regenerated them
sitemap_ids = ira.search(cr, uid, [('url', '=like' , '/sitemap%.xml'), ('type', '=', 'binary')], context=context)
if sitemap_ids:
ira.unlink(cr, uid, sitemap_ids, context=context)
pages = 0
first_page = None
locs = request.website.enumerate_pages()
while True:
start = pages * LOC_PER_SITEMAP
values = {
'locs': islice(locs, start, start + LOC_PER_SITEMAP),
'url_root': request.httprequest.url_root[:-1],
}
urls = iuv.render(cr, uid, 'website.sitemap_locs', values, context=context)
if urls.strip():
page = iuv.render(cr, uid, 'website.sitemap_xml', dict(content=urls), context=context)
if not first_page:
first_page = page
pages += 1
create_sitemap('/sitemap-%d.xml' % pages, page)
else:
break
if not pages:
return request.not_found()
elif pages == 1:
content = first_page
else:
# Sitemaps must be split in several smaller files with a sitemap index
content = iuv.render(cr, uid, 'website.sitemap_index_xml', dict(
pages=range(1, pages + 1),
url_root=request.httprequest.url_root,
), context=context)
create_sitemap('/sitemap.xml', content)
return request.make_response(content, [('Content-Type', mimetype)])
#------------------------------------------------------
# Edit
#------------------------------------------------------
@http.route('/website/add/<path:path>', type='http', auth="user", website=True)
def pagenew(self, path, noredirect=False, add_menu=None):
xml_id = request.registry['website'].new_page(request.cr, request.uid, path, context=request.context)
if add_menu:
model, id = request.registry["ir.model.data"].get_object_reference(request.cr, request.uid, 'website', 'main_menu')
request.registry['website.menu'].create(request.cr, request.uid, {
'name': path,
'url': "/page/" + xml_id,
'parent_id': id,
}, context=request.context)
# Reverse action in order to allow shortcut for /page/<website_xml_id>
url = "/page/" + re.sub(r"^website\.", '', xml_id)
if noredirect:
return werkzeug.wrappers.Response(url, mimetype='text/plain')
return werkzeug.utils.redirect(url)
@http.route(['/website/snippets'], type='json', auth="public", website=True)
def snippets(self):
return request.website._render('website.snippets')
@http.route('/website/reset_templates', type='http', auth='user', methods=['POST'], website=True)
def reset_template(self, templates, redirect='/'):
templates = request.httprequest.form.getlist('templates')
modules_to_update = []
for temp_id in templates:
view = request.registry['ir.ui.view'].browse(request.cr, request.uid, int(temp_id), context=request.context)
if view.page:
continue
view.model_data_id.write({
'noupdate': False
})
if view.model_data_id.module not in modules_to_update:
modules_to_update.append(view.model_data_id.module)
if modules_to_update:
module_obj = request.registry['ir.module.module']
module_ids = module_obj.search(request.cr, request.uid, [('name', 'in', modules_to_update)], context=request.context)
if module_ids:
module_obj.button_immediate_upgrade(request.cr, request.uid, module_ids, context=request.context)
return request.redirect(redirect)
@http.route('/website/customize_template_get', type='json', auth='user', website=True)
def customize_template_get(self, xml_id, full=False, bundles=False):
""" Lists the templates customizing ``xml_id``. By default, only
returns optional templates (which can be toggled on and off), if
``full=True`` returns all templates customizing ``xml_id``
``bundles=True`` returns also the asset bundles
"""
imd = request.registry['ir.model.data']
view_model, view_theme_id = imd.get_object_reference(
request.cr, request.uid, 'website', 'theme')
user = request.registry['res.users']\
.browse(request.cr, request.uid, request.uid, request.context)
user_groups = set(user.groups_id)
views = request.registry["ir.ui.view"]\
._views_get(request.cr, request.uid, xml_id, bundles=bundles, context=request.context)
done = set()
result = []
for v in views:
if not user_groups.issuperset(v.groups_id):
continue
if full or (v.application != 'always' and v.inherit_id.id != view_theme_id):
if v.inherit_id not in done:
result.append({
'name': v.inherit_id.name,
'id': v.id,
'xml_id': v.xml_id,
'inherit_id': v.inherit_id.id,
'header': True,
'active': False
})
done.add(v.inherit_id)
result.append({
'name': v.name,
'id': v.id,
'xml_id': v.xml_id,
'inherit_id': v.inherit_id.id,
'header': False,
'active': v.application in ('always', 'enabled'),
})
return result
@http.route('/website/get_view_translations', type='json', auth='public', website=True)
def get_view_translations(self, xml_id, lang=None):
lang = lang or request.context.get('lang')
views = self.customize_template_get(xml_id, full=True)
views_ids = [view.get('id') for view in views if view.get('active')]
domain = [('type', '=', 'view'), ('res_id', 'in', views_ids), ('lang', '=', lang)]
irt = request.registry.get('ir.translation')
return irt.search_read(request.cr, request.uid, domain, ['id', 'res_id', 'value','state','gengo_translation'], context=request.context)
@http.route('/website/set_translations', type='json', auth='public', website=True)
def set_translations(self, data, lang):
irt = request.registry.get('ir.translation')
for view_id, trans in data.items():
view_id = int(view_id)
for t in trans:
initial_content = t['initial_content'].strip()
new_content = t['new_content'].strip()
tid = t['translation_id']
if not tid:
old_trans = irt.search_read(
request.cr, request.uid,
[
('type', '=', 'view'),
('res_id', '=', view_id),
('lang', '=', lang),
('src', '=', initial_content),
])
if old_trans:
tid = old_trans[0]['id']
if tid:
vals = {'value': new_content}
irt.write(request.cr, request.uid, [tid], vals)
else:
new_trans = {
'name': 'website',
'res_id': view_id,
'lang': lang,
'type': 'view',
'source': initial_content,
'value': new_content,
}
if t.get('gengo_translation'):
new_trans['gengo_translation'] = t.get('gengo_translation')
new_trans['gengo_comment'] = t.get('gengo_comment')
irt.create(request.cr, request.uid, new_trans)
return True
@http.route('/website/attach', type='http', auth='user', methods=['POST'], website=True)
def attach(self, func, upload=None, url=None):
# the upload argument doesn't allow us to access the files if more than
# one file is uploaded, as upload references the first file
# therefore we have to recover the files from the request object
Attachments = request.registry['ir.attachment'] # registry for the attachment table
uploads = []
message = None
if not upload: # no image provided, storing the link and the image name
uploads.append({'website_url': url})
name = url.split("/").pop() # recover filename
attachment_id = Attachments.create(request.cr, request.uid, {
'name':name,
'type': 'url',
'url': url,
'res_model': 'ir.ui.view',
}, request.context)
else: # images provided
try:
for c_file in request.httprequest.files.getlist('upload'):
image_data = c_file.read()
image = Image.open(cStringIO.StringIO(image_data))
w, h = image.size
if w*h > 42e6: # Nokia Lumia 1020 photo resolution
raise ValueError(
u"Image size excessive, uploaded images must be smaller "
u"than 42 million pixel")
attachment_id = Attachments.create(request.cr, request.uid, {
'name': c_file.filename,
'datas': image_data.encode('base64'),
'datas_fname': c_file.filename,
'res_model': 'ir.ui.view',
}, request.context)
[attachment] = Attachments.read(
request.cr, request.uid, [attachment_id], ['website_url'],
context=request.context)
uploads.append(attachment)
except Exception, e:
logger.exception("Failed to upload image to attachment")
message = unicode(e)
return """<script type='text/javascript'>
window.parent['%s'](%s, %s);
</script>""" % (func, json.dumps(uploads), json.dumps(message))
@http.route(['/website/publish'], type='json', auth="public", website=True)
def publish(self, id, object):
_id = int(id)
_object = request.registry[object]
obj = _object.browse(request.cr, request.uid, _id)
values = {}
if 'website_published' in _object._all_columns:
values['website_published'] = not obj.website_published
_object.write(request.cr, request.uid, [_id],
values, context=request.context)
obj = _object.browse(request.cr, request.uid, _id)
return bool(obj.website_published)
#------------------------------------------------------
# Themes
#------------------------------------------------------
def get_view_ids(self, xml_ids):
ids = []
imd = request.registry['ir.model.data']
for xml_id in xml_ids:
if "." in xml_id:
xml = xml_id.split(".")
view_model, id = imd.get_object_reference(request.cr, request.uid, xml[0], xml[1])
else:
id = int(xml_id)
ids.append(id)
return ids
@http.route(['/website/theme_customize_get'], type='json', auth="public", website=True)
def theme_customize_get(self, xml_ids):
view = request.registry["ir.ui.view"]
enable = []
disable = []
ids = self.get_view_ids(xml_ids)
for v in view.browse(request.cr, request.uid, ids, context=request.context):
if v.application != "disabled":
enable.append(v.xml_id)
else:
disable.append(v.xml_id)
return [enable, disable]
@http.route(['/website/theme_customize'], type='json', auth="public", website=True)
def theme_customize(self, enable, disable):
""" enable or Disable lists of ``xml_id`` of the inherit templates
"""
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
view = pool["ir.ui.view"]
def set_application(ids, application):
write_ids = []
for v in view.browse(cr, uid, self.get_view_ids(ids), context=context):
if v.application == 'always':
continue
if v.application != application:
write_ids.append(v.id)
if write_ids:
view.write(cr, uid, write_ids, {'application': application})
set_application(disable, 'disabled')
set_application(enable, 'enabled')
return True
@http.route(['/website/theme_customize_reload'], type='http', auth="public", website=True)
def theme_customize_reload(self, href, enable, disable):
self.theme_customize(enable and enable.split(",") or [],disable and disable.split(",") or [])
return request.redirect(href + ("&theme=true" if "#" in href else "#theme=true"))
#------------------------------------------------------
# Helpers
#------------------------------------------------------
@http.route(['/website/kanban'], type='http', auth="public", methods=['POST'], website=True)
def kanban(self, **post):
return request.website.kanban_col(**post)
def placeholder(self, response):
return request.registry['website']._image_placeholder(response)
@http.route([
'/website/image',
'/website/image/<xmlid>',
'/website/image/<xmlid>/<field>',
'/website/image/<model>/<id>/<field>'
], auth="public", website=True)
def website_image(self, model=None, id=None, field=None, xmlid=None, max_width=None, max_height=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~.placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
xmlid can be used to load the image. But the field image must by base64-encoded
"""
if xmlid and "." in xmlid:
xmlid = xmlid.split(".", 1)
try:
model, id = request.registry['ir.model.data'].get_object_reference(request.cr, request.uid, xmlid[0], xmlid[1])
except:
raise werkzeug.exceptions.NotFound()
if model == 'ir.attachment':
field = "datas"
if not model or not id or not field:
raise werkzeug.exceptions.NotFound()
response = werkzeug.wrappers.Response()
return request.registry['website']._image(
request.cr, request.uid, model, id, field, response, max_width, max_height)
#------------------------------------------------------
# Server actions
#------------------------------------------------------
@http.route('/website/action/<path_or_xml_id_or_id>', type='http', auth="public", website=True)
def actions_server(self, path_or_xml_id_or_id, **post):
cr, uid, context = request.cr, request.uid, request.context
res, action_id, action = None, None, None
ServerActions = request.registry['ir.actions.server']
# find the action_id: either an xml_id, the path, or an ID
if isinstance(path_or_xml_id_or_id, basestring) and '.' in path_or_xml_id_or_id:
action_id = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, request.uid, path_or_xml_id_or_id, raise_if_not_found=False)
if not action_id:
action_ids = ServerActions.search(cr, uid, [('website_path', '=', path_or_xml_id_or_id), ('website_published', '=', True)], context=context)
action_id = action_ids and action_ids[0] or None
if not action_id:
try:
action_id = int(path_or_xml_id_or_id)
except ValueError:
pass
# check it effectively exists
if action_id:
action_ids = ServerActions.exists(cr, uid, [action_id], context=context)
action_id = action_ids and action_ids[0] or None
# run it, return only if we got a Response object
if action_id:
action = ServerActions.browse(cr, uid, action_id, context=context)
if action.state == 'code' and action.website_published:
action_res = ServerActions.run(cr, uid, [action_id], context=context)
if isinstance(action_res, werkzeug.wrappers.Response):
res = action_res
if res:
return res
return request.redirect('/')
| npiganeau/odoo | addons/website/controllers/main.py | Python | agpl-3.0 | 21,255 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Akretion LTDA.
# authors: Raphaël Valyi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock No Dashboard',
'version': '1.1',
'category': 'Generic Modules/Stock',
'description': """
This module disable the stock dashboard, which makes OpenERP fast again with the web-client and real production data that otherwise makes the dashboard very slow and that break your navigation.
Notice that the dashboard is still available through it's dedicated menu.""",
'author': 'Akretion',
'website': 'http://www.akretion.com',
'depends': ['stock'],
'init_xml': [],
'update_xml': ['stock_view.xml'],
'demo_xml': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| iw3hxn/LibrERP | stock_no_dashboard/__openerp__.py | Python | agpl-3.0 | 1,657 |
# Second degree Laplacian edge detection, copied from Gimp
__all__ = ['laplacian']
def _py_laplacian(im):
im = im.convert('RGB')
pix = im.load()
orig_im = im.copy()
im2 = im.copy()
pix2 = im2.load()
for i in xrange(1, im.size[0]-1):
for j in xrange(1, im.size[1]-1):
neigh4 = (
pix[i-1,j],
pix[i+1,j],
pix[i,j],
pix[i,j-1],
pix[i,j+1]
)
by_color4 = zip(*neigh4)
minv = [min(a) for a in by_color4]
maxv = [max(a) for a in by_color4]
p = pix[i,j]
grad = tuple([0.5 * max(maxv[n]-p[n], p[n]-minv[n]) for n in range(3)])
neigh8 = (
pix[i-1, j-1],
pix[ i, j-1],
pix[i+1, j-1],
pix[i-1, j],
# pix[i+1, j-1],
pix[i+1, j],
pix[i-1, j+1],
pix[ i, j+1],
pix[i+1, j+1]
)
by_color8 = zip(*neigh8)
new_pix = []
for x in xrange(3):
if (sum(by_color8[x]) - 8*p[x]) > 0:
new_pix.append(grad[x])
else:
new_pix.append(128 + grad[x])
pix2[i,j] = tuple((int(a) for a in new_pix))
im = im2
pix = pix2
im2 = im.copy()
pix2 = im2.load()
for i in xrange(1, im.size[0]-1):
for j in xrange(1, im.size[1]-1):
neigh8 = (
pix[i-1, j-1],
pix[ i, j-1],
pix[i+1, j-1],
pix[i-1, j],
# pix[i+1, j-1],
pix[i+1, j],
pix[i-1, j+1],
pix[ i, j+1],
pix[i+1, j+1]
)
by_color8 = zip(*neigh8)
p = pix[i,j]
new_p = []
for x in xrange(3):
if (p[x] <= 128) and any((n > 128 for n in by_color8[x])):
if p[x] >= 128:
new_p.append(p[x] - 128)
else:
new_p.append(p[x])
else:
new_p.append(0)
if any((a > 15 for a in new_p)):
pix2[i,j] = tuple(new_p)
else:
pix2[i,j] = (255, 255, 255)
return im2
try:
import _laplacian
_laplacian.ppm_laplacian # this is foo!
except (ImportError, AttributeError), e:
print e
_laplacian = None
if _laplacian is None:
laplacian = _py_laplacian
else:
#print 'Using C laplacian'
from cStringIO import StringIO
import re
from PIL import Image
def laplacian(im):
s = StringIO()
im.save(s, 'ppm')
ppm = s.getvalue()
del s
header_match = re.match('^P6\n(\d+)\s(\d+)\n(\d+)\n', ppm, re.M)
header, raw_data = ppm[:header_match.end()], ppm[header_match.end():]
result = _laplacian.ppm_laplacian(im.size, raw_data)
s = StringIO()
s.write(header)
s.write(result)
s.seek(0)
im = Image.open(s)
im.load()
del s
return im
| zejn/arsoapi | arsoapi/laplacian.py | Python | agpl-3.0 | 2,437 |
#!/bin/sh
"""": # -*-python-*-
bup_python="$(dirname "$0")/bup-python" || exit $?
exec "$bup_python" "$0" ${1+"$@"}
"""
# end of bup preamble
from __future__ import absolute_import
import sys, os, struct
from bup import options, helpers
optspec = """
bup on--server
--
This command is run automatically by 'bup on'
"""
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
if extra:
o.fatal('no arguments expected')
# get the subcommand's argv.
# Normally we could just pass this on the command line, but since we'll often
# be getting called on the other end of an ssh pipe, which tends to mangle
# argv (by sending it via the shell), this way is much safer.
buf = sys.stdin.read(4)
sz = struct.unpack('!I', buf)[0]
assert(sz > 0)
assert(sz < 1000000)
buf = sys.stdin.read(sz)
assert(len(buf) == sz)
argv = buf.split('\0')
argv = [argv[0], 'mux', '--'] + argv
# stdin/stdout are supposedly connected to 'bup server' that the caller
# started for us (often on the other end of an ssh tunnel), so we don't want
# to misuse them. Move them out of the way, then replace stdout with
# a pointer to stderr in case our subcommand wants to do something with it.
#
# It might be nice to do the same with stdin, but my experiments showed that
# ssh seems to make its child's stderr a readable-but-never-reads-anything
# socket. They really should have used shutdown(SHUT_WR) on the other end
# of it, but probably didn't. Anyway, it's too messy, so let's just make sure
# anyone reading from stdin is disappointed.
#
# (You can't just leave stdin/stdout "not open" by closing the file
# descriptors. Then the next file that opens is automatically assigned 0 or 1,
# and people *trying* to read/write stdin/stdout get screwed.)
os.dup2(0, 3)
os.dup2(1, 4)
os.dup2(2, 1)
fd = os.open('/dev/null', os.O_RDONLY)
os.dup2(fd, 0)
os.close(fd)
os.environ['BUP_SERVER_REVERSE'] = helpers.hostname()
os.execvp(argv[0], argv)
sys.exit(99)
| ToxicFrog/bup | cmd/on--server-cmd.py | Python | lgpl-2.1 | 1,955 |
import time
import uuid
from tendrl.commons import objects
from tendrl.commons.utils import log_utils as logger
class DeleteMonitoringDetails(objects.BaseAtom):
def __init__(self, *args, **kwargs):
super(DeleteMonitoringDetails, self).__init__(*args, **kwargs)
def run(self):
integration_id = self.parameters['TendrlContext.integration_id']
_job_id = str(uuid.uuid4())
payload = {
"tags": ["tendrl/integration/monitoring"],
"run": "monitoring.flows.DeleteMonitoringData",
"status": "new",
"parameters": self.parameters,
"parent": self.parameters['job_id'],
"type": "monitoring"
}
NS.tendrl.objects.Job(
job_id=_job_id,
status="new",
payload=payload
).save()
# Wait for 2 mins for the job to complete
loop_count = 0
wait_count = 24
while True:
if loop_count >= wait_count:
logger.log(
"error",
NS.publisher_id,
{
"message": "Clearing monitoring data for cluster "
"(%s) not yet complete. Timing out." %
NS.tendrl.objects.Cluster(
integration_id=integration_id
).load().short_name
},
job_id=self.parameters['job_id'],
flow_id=self.parameters['flow_id'],
)
return False
time.sleep(5)
finished = True
job = NS.tendrl.objects.Job(job_id=_job_id).load()
if job.status != "finished":
finished = False
if finished:
break
else:
loop_count += 1
continue
return True
| r0h4n/commons | tendrl/commons/objects/cluster/atoms/delete_monitoring_details/__init__.py | Python | lgpl-2.1 | 1,914 |
# -*- python -*-
# pylogsparser - Logs parsers python library
#
# Copyright (C) 2011 Wallix Inc.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import timeit
from logsparser.lognormalizer import LogNormalizer
if __name__ == "__main__":
path = os.environ['NORMALIZERS_PATH']
ln = LogNormalizer(path)
def test():
l = {'raw' : "<29>Jul 18 08:55:35 naruto squid[3245]: 1259844091.407 307 82.238.42.70 TCP_MISS/200 1015 GET http://www.ietf.org/css/ietf.css fbo DIRECT/64.170.98.32 text/css" }
l = ln.uuidify(l)
ln.normalize(l)
print "Testing speed ..."
t = timeit.Timer("test()", "from __main__ import test")
speed = t.timeit(100000)/100000
print "%.2f microseconds per pass, giving a theoretical speed of %i logs/s." % (speed * 1000000, 1 / speed)
print "Testing speed with minimal normalization ..."
ln.set_active_normalizers({'syslog' : True})
ln.reload()
t = timeit.Timer("test()", "from __main__ import test")
speed = t.timeit(100000)/100000
print "%.2f microseconds per pass, giving a theoretical speed of %i logs/s." % (speed * 1000000, 1 / speed)
| wallix/pylogsparser | tests/test_norm_chain_speed.py | Python | lgpl-2.1 | 1,829 |
#!/usr/bin/env python
#
# @file BaseBindingsFiles.py
# @brief class for generating base files for other library implementations
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2018 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
import re
import os
from ..base_files import BaseInterfaceFile, BaseFile, BaseCMakeFile, BaseTemplateFile
from ..code_files import CppHeaderFile
from ..code_files import CppCodeFile
from ..util import strFunctions, global_variables
class BaseBindingsFiles(BaseTemplateFile.BaseTemplateFile):
"""Class for all Base files"""
def __init__(self, elements, binding, verbose=False):
# members from object
BaseTemplateFile.BaseTemplateFile.__init__(self,
global_variables.prefix,
'bindings_files')
self.elements = elements
self.binding = binding
self.verbose = verbose
def write_files(self):
if self.binding == 'swig':
self.write_swig_files()
else:
self.write_other_files()
def write_other_files(self):
self.write_interface('lib')
self.write_interface('local')
self.write_code('local')
self.write_cmake_files()
self.write_assembly_file()
def write_swig_files(self):
self.write_all_files('OStream')
self.write_header('ListWrapper')
self.write_interface('std_set')
self.write_header('lib')
self.write_interface('lib')
self.write_interface('ASTNodes')
def write_all_files(self, name):
self.write_header(name)
self.write_code(name)
def write_header(self, name):
base_descrip = self.create_base_description(name)
fileout = CppHeaderFile.CppHeaderFile(base_descrip, False)
filein = '{1}{2}{0}.h'.format(name, self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
fileout.add_file_header()
fileout.skip_line(2)
self.copy_file_contents(fileout, filein)
fileout.close_file()
def write_interface(self, name):
if name == 'lib':
filename = global_variables.library_name.lower()
else:
filename = name
fileout = BaseInterfaceFile.BaseInterfaceFile(filename)
filein = '{1}{2}{0}.i'.format(name, self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
fileout.add_file_header()
fileout.skip_line(2)
self.copy_file_contents(fileout, filein)
fileout.close_file()
def write_code(self, name):
base_descrip = self.create_base_description(name)
fileout = CppCodeFile.CppCodeFile(base_descrip, False)
filein = '{1}{2}{0}.cpp'.format(name, self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
fileout.add_file_header()
fileout.skip_line(2)
self.copy_file_contents(fileout, filein)
fileout.close_file()
def write_cmake_files(self):
fileout = BaseFile.BaseFile('CMakeLists', 'txt')
filein = '{0}{1}CMakeLists.txt'.format(self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
self.copy_file_contents(fileout, filein)
fileout.close_file()
if self.binding == 'csharp':
fileout = BaseCMakeFile.BaseCMakeFile('compile-native-files')
filein = '{0}{1}native.cmake'.format(self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
fileout.add_file_header()
fileout.skip_line(2)
self.copy_file_contents(fileout, filein)
fileout.close_file()
elif self.binding == 'java':
fileout = BaseCMakeFile.BaseCMakeFile('compile-native-files')
filein = '{0}{1}native.cmake'.format(self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
fileout.add_file_header()
fileout.skip_line(2)
self.copy_file_contents(fileout, filein)
fileout.close_file()
def write_assembly_file(self):
if self.binding == 'csharp':
fileout = BaseFile.BaseFile('AssemblyInfo.cs', 'in')
filein = '{0}{1}assembly.in'.format(self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
self.copy_file_contents(fileout, filein)
fileout.close_file()
elif self.binding == 'java':
fileout = BaseFile.BaseFile('Manifest.txt', 'in')
filein = '{0}{1}Manifest.txt.in'.format(self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
self.copy_file_contents(fileout, filein)
fileout.close_file()
elif self.binding == 'python':
fileout = BaseFile.BaseFile('local-contrib', 'i')
filein = '{0}{1}local-contrib.i'.format(self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
self.copy_file_contents(fileout, filein)
fileout.close_file()
fileout = BaseFile.BaseFile('add_version', 'cmake')
filein = '{0}{1}add_version.cmake'.format(self.binding, os.sep)
if self.verbose:
print('Writing file {0}'.format(fileout.filename))
self.copy_file_contents(fileout, filein)
fileout.close_file()
###########################################################
def print_includes(self, fileout):
for element in self.elements:
if not element['name'].endswith('Document'):
name = strFunctions.prefix_name(element['name'])
fileout.copy_line_verbatim('%include <{0}/{1}.h>\n'
''.format(global_variables.language,
name))
if element['hasListOf']:
loname = strFunctions.prefix_name(
strFunctions.list_of_name(element['name']))
fileout.copy_line_verbatim('%include <{0}/{1}.h>\n'
''.format(global_variables.language,
loname))
def print_derived_types(self, fileout):
libname = global_variables.library_name.lower()
if self.binding == 'java' or self.binding == 'csharp':
for element in self.elements:
if not element['name'].endswith('Document'):
if not 'document' in element or not element['document']:
name = strFunctions.prefix_name(element['name'])
fileout.copy_line_verbatim(' case (int) {0}.{1}:'
'\n'.format(libname,
element['typecode']))
fileout.copy_line_verbatim(' return new {0}(cPtr, owner)'
';\n'.format(name))
fileout.skip_line()
else:
for element in self.elements:
if not element['name'].endswith('Document'):
if not 'document' in element or not element['document']:
name = strFunctions.prefix_name(element['name'])
fileout.copy_line_verbatim(' case {0}:'
'\n'.format(element['typecode']))
fileout.copy_line_verbatim(' return SWIGTYPE_p_{0}'
';\n'.format(name))
fileout.skip_line()
def print_derived_listof_types(self, fileout):
if self.binding == 'java' or self.binding == 'csharp':
for element in self.elements:
if not element['name'].endswith('Document') and \
element['hasListOf']:
name = strFunctions.lower_list_of_name_no_prefix(
element['name'])
loname = strFunctions.prefix_name(
strFunctions.list_of_name(element['name']))
fileout.copy_line_verbatim(' else if (name == \"{0}\")\n'
''.format(name))
fileout.copy_line_verbatim(' {\n')
fileout.copy_line_verbatim(' return new {0}(cPtr, '
'owner);\n'.format(loname))
fileout.copy_line_verbatim(' }\n')
else:
for element in self.elements:
if not element['name'].endswith('Document') and \
element['hasListOf']:
name = strFunctions.lower_list_of_name_no_prefix(
element['name'])
loname = strFunctions.prefix_name(
strFunctions.list_of_name(element['name']))
fileout.copy_line_verbatim(' else if (name == \"{0}\")\n'
''.format(name))
fileout.copy_line_verbatim(' {\n')
fileout.copy_line_verbatim(' return SWIGTYPE_p_{0};\n'.format(loname))
fileout.copy_line_verbatim(' }\n')
def print_for_all_classes(self, fileout, line, classes=True, lists=True):
l_len = len(line)
declaration = line[0:l_len-1]
if classes:
for element in self.elements:
name = strFunctions.prefix_name(element['name'])
fileout.copy_line_verbatim('{0}({1})\n'.format(declaration,
name))
if lists:
for element in self.elements:
if element['hasListOf']:
loname = strFunctions.prefix_name(
strFunctions.list_of_name(element['name']))
fileout.copy_line_verbatim('{0}({1})\n'.format(declaration,
loname))
def print_dependency_includes(self, fileout, header):
if header.startswith('header'):
include = '#'
else:
include = '%'
lines = []
for depend in global_variables.dependency:
lang = depend['prefix'].lower()
lines.append('{0}include <{1}/common/extern.h>\n'.format(include, lang))
lines.append('{0}include <{1}/common/lib{1}-namespace.h>\n'
''.format(include, lang))
lines.append('{0}include <{1}/common/lib{1}-version.h>\n'
''.format(include, lang))
# hack for NUML
if depend['prefix'] == 'NUML':
lines.append('{0}include <{1}/common/'
'operationReturnValues.h>\n'
''.format(include, lang))
else:
lines.append('{0}include <{1}/common/{2}'
'OperationReturnValues.h>\n'
''.format(include, lang, depend['prefix']))
lines.append('\n')
lines.append('{0}include <{1}/{2}Namespaces.h>\n'
''.format(include, lang, depend['prefix']))
lines.append('{0}include <{1}/{2}TypeCodes.h>\n'
''.format(include, lang, depend['prefix']))
lines.append('{0}include <{1}/{2}Types.h>\n'
''.format(include, lang, depend['prefix']))
lines.append('{0}include <{1}/{2}Error.h>\n'
''.format(include, lang, depend['prefix']))
lines.append('{0}include <{1}/{2}ErrorLog.h>\n'
''.format(include, lang, depend['prefix']))
for line in lines:
fileout.copy_line_verbatim(line)
def print_dependency_library(self, fileout, include=False):
for depend in global_variables.dependency:
lib = depend['library'].upper()
if include:
fileout.copy_line_verbatim('include_directories'
'(BEFORE ${1}{0}_INCLUDE_DIR{2})'
'\n'.format(lib, '{', '}'))
else:
fileout.copy_line_verbatim(' -I${1}{0}_INCLUDE_DIR{2}/'
'\n'.format(lib, '{', '}'))
| sbmlteam/deviser | deviser/bindings_files/BaseBindingsFiles.py | Python | lgpl-2.1 | 14,698 |
# Written by Arno Bakker
# see LICENSE.txt for license information
import sys
import os
import pickle
STATEDIR_DLCONFIG = "dlconfig.pickle"
# Global variable containing the DownloadStartupConfig to use for crearing
# Downloads
from Tribler.Core.DownloadConfig import DownloadStartupConfig
from Tribler.Core.defaults import DLDEFAULTS_VERSION,dldefaults
class DefaultDownloadStartupConfig(DownloadStartupConfig):
__single = None
def __init__(self,dlconfig=None):
if DefaultDownloadStartupConfig.__single:
raise RuntimeError, "DefaultDownloadStartupConfig is singleton"
DefaultDownloadStartupConfig.__single = self
DownloadStartupConfig.__init__(self,dlconfig=dlconfig)
def getInstance(*args, **kw):
if DefaultDownloadStartupConfig.__single is None:
DefaultDownloadStartupConfig(*args, **kw)
return DefaultDownloadStartupConfig.__single
getInstance = staticmethod(getInstance)
def updateToCurrentVersion(self):
newKeys = DownloadStartupConfig.updateToCurrentVersion(self)
if newKeys:
for key in newKeys:
print >>sys.stderr,"DefaultDownloadStartupConfig: Adding field",key
#
# Class method
#
def load(filename):
"""
Load a saved DownloadStartupConfig from disk.
@param filename An absolute Unicode filename
@return DefaultDownloadStartupConfig object
"""
# Class method, no locking required
f = open(filename,"rb")
dlconfig = pickle.load(f)
dscfg = DefaultDownloadStartupConfig(dlconfig)
f.close()
dscfg.updateToCurrentVersion()
return dscfg
load = staticmethod(load)
def get_default_dscfg_filename(session):
return os.path.join(session.get_state_dir(),STATEDIR_DLCONFIG)
| egbertbouman/tribler-g | Tribler/Main/globals.py | Python | lgpl-2.1 | 1,940 |
from __future__ import annotations
from datetime import datetime
from typing import Any
from flask_sqlalchemy import BaseQuery
from sqlalchemy.schema import Column
from sqlalchemy.types import DateTime, Integer
from whoosh.fields import ID
from abilian.core.extensions import db
from abilian.core.util import fqcn
#: Base Model class.
class Model(db.Model):
__abstract__ = True
query: BaseQuery
class Info(dict):
def __init__(self, **kw: Any):
for k, v in kw.items():
self[k] = v
def copy(self) -> Info:
# dict.copy would return an instance of dict
return self.__class__(**self)
def __add__(self, other: dict) -> Info:
d = self.copy()
d.update(other)
return d
__or__ = __add__
EDITABLE = Info(editable=True)
NOT_EDITABLE = Info(editable=False)
AUDITABLE = Info(auditable=True)
AUDITABLE_HIDDEN = Info(auditable=True, audit_hide_content=True)
NOT_AUDITABLE = Info(auditable=False)
SEARCHABLE = Info(searchable=True)
NOT_SEARCHABLE = Info(searchable=False)
EXPORTABLE = Info(exportable=True)
NOT_EXPORTABLE = Info(exportable=False)
#: SYSTEM properties are properties defined by the system
#: and not supposed to be changed manually.
SYSTEM = Info(editable=False, auditable=False)
class IdMixin:
id = Column(Integer, primary_key=True, info=SYSTEM | SEARCHABLE)
class Indexable:
"""Mixin with sensible defaults for indexable objects."""
__indexable__ = True
__index_to__ = (
("object_key", (("object_key", ID(stored=True, unique=True)),)),
("object_type", (("object_type", ID(stored=True)),)),
)
id: int
@classmethod
def _object_type(cls) -> str:
return fqcn(cls)
@property
def object_type(self) -> str:
return self._object_type()
@property
def object_key(self) -> str:
return f"{self.object_type}:{self.id}"
class TimestampedMixin:
#: creation date
created_at = Column(DateTime, default=datetime.utcnow, info=SYSTEM | SEARCHABLE)
#: last modification date
updated_at = Column(
DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow,
info=SYSTEM | SEARCHABLE,
)
deleted_at = Column(DateTime, default=None, info=SYSTEM)
| abilian/abilian-core | src/abilian/core/models/base.py | Python | lgpl-2.1 | 2,272 |
from __future__ import absolute_import
import re
q = "'"
qq = '"'
class QuoteError(Exception):
pass
def _quotesplit(line):
inquote = None
inescape = None
wordstart = 0
word = ''
for i in range(len(line)):
c = line[i]
if inescape:
if inquote == q and c != q:
word += '\\' # single-q backslashes can only quote single-q
word += c
inescape = False
elif c == '\\':
inescape = True
elif c == inquote:
inquote = None
# this is un-sh-like, but do it for sanity when autocompleting
yield (wordstart, word)
word = ''
wordstart = i+1
elif not inquote and not word and (c == q or c == qq):
# the 'not word' constraint on this is un-sh-like, but do it
# for sanity when autocompleting
inquote = c
wordstart = i
elif not inquote and c in [' ', '\n', '\r', '\t']:
if word:
yield (wordstart, word)
word = ''
wordstart = i+1
else:
word += c
if word:
yield (wordstart, word)
if inquote or inescape or word:
raise QuoteError()
def quotesplit(line):
"""Split 'line' into a list of offset,word tuples.
The words are produced after removing doublequotes, singlequotes, and
backslash escapes.
Note that this implementation isn't entirely sh-compatible. It only
dequotes words that *start* with a quote character, that is, a string like
hello"world"
will not have its quotes removed, while a string like
hello "world"
will be turned into [(0, 'hello'), (6, 'world')] (ie. quotes removed).
"""
l = []
try:
for i in _quotesplit(line):
l.append(i)
except QuoteError:
pass
return l
def unfinished_word(line):
"""Returns the quotechar,word of any unfinished word at the end of 'line'.
You can use this to determine if 'line' is a completely parseable line
(ie. one that quotesplit() will finish successfully) or if you need
to read more bytes first.
Args:
line: an input string
Returns:
quotechar,word: the initial quote char (or None), and the partial word.
"""
try:
for (wordstart,word) in _quotesplit(line):
pass
except QuoteError:
firstchar = line[wordstart]
if firstchar in [q, qq]:
return (firstchar, word)
else:
return (None, word)
else:
return (None, '')
def quotify(qtype, word, terminate):
"""Return a string corresponding to given word, quoted using qtype.
The resulting string is dequotable using quotesplit() and can be
joined with other quoted strings by adding arbitrary whitespace
separators.
Args:
qtype: one of '', shquote.qq, or shquote.q
word: the string to quote. May contain arbitrary characters.
terminate: include the trailing quote character, if any.
Returns:
The quoted string.
"""
if qtype == qq:
return qq + word.replace(qq, '\\"') + (terminate and qq or '')
elif qtype == q:
return q + word.replace(q, "\\'") + (terminate and q or '')
else:
return re.sub(r'([\"\' \t\n\r])', r'\\\1', word)
def quotify_list(words):
"""Return a minimally-quoted string produced by quoting each word.
This calculates the qtype for each word depending on whether the word
already includes singlequote characters, doublequote characters, both,
or neither.
Args:
words: the list of words to quote.
Returns:
The resulting string, with quoted words separated by ' '.
"""
wordout = []
for word in words:
qtype = q
if word and not re.search(r'[\s\"\']', word):
qtype = ''
elif q in word and qq not in word:
qtype = qq
wordout.append(quotify(qtype, word, True))
return ' '.join(wordout)
def what_to_add(qtype, origword, newword, terminate):
"""Return a qtype that is needed to finish a partial word.
For example, given an origword of '\"frog' and a newword of '\"frogston',
returns either:
terminate=False: 'ston'
terminate=True: 'ston\"'
This is useful when calculating tab completion strings for readline.
Args:
qtype: the type of quoting to use (ie. the first character of origword)
origword: the original word that needs completion.
newword: the word we want it to be after completion. Must start with
origword.
terminate: true if we should add the actual quote character at the end.
Returns:
The string to append to origword to produce (quoted) newword.
"""
if not newword.startswith(origword):
return ''
else:
qold = quotify(qtype, origword, terminate=False)
return quotify(qtype, newword, terminate=terminate)[len(qold):]
| ToxicFrog/bup | lib/bup/shquote.py | Python | lgpl-2.1 | 4,949 |
#!/usr/bin/env python
from github_traffic_base import *
# Construct an instance of the PlotData class for plotting views.
class PlotViews(PlotData):
"""
Plot the weekly views and unique visitors.
"""
def __init__(self):
super(PlotData, self).__init__()
self.left_axis_label = 'Weekly page views'
self.right_axis_label = 'Avg. Daily Unique Visitors'
self.weekly_plot_filename = 'weekly_github_traffic.pdf'
self.monthly_plot_filename = 'monthly_github_traffic.pdf'
self.title_string1 = 'Total Pageviews:'
self.title_string2 = 'Avg. Daily Unique Visitors:'
self.data_array = [
'2014-Feb-17', 274, 25,
'2014-Feb-18', 145, 30,
'2014-Feb-19', 129, 27,
'2014-Feb-20', 202, 24,
'2014-Feb-21', 240, 22,
'2014-Feb-22', 62, 17,
'2014-Feb-23', 28, 12,
'2014-Feb-24', 217, 19,
'2014-Feb-25', 90, 25,
'2014-Feb-26', 189, 36,
'2014-Feb-27', 112, 26,
'2014-Feb-28', 81, 20,
'2014-Mar-01', 113, 17,
'2014-Mar-02', 53, 16,
'2014-Mar-03', 41, 21,
'2014-Mar-04', 144, 35,
'2014-Mar-05', 51, 20,
'2014-Mar-06', 157, 25,
'2014-Mar-07', 50, 22,
'2014-Mar-08', 50, 11,
'2014-Mar-09', 42, 13,
'2014-Mar-10', 61, 16,
'2014-Mar-11', 27, 16,
'2014-Mar-12', 111, 20,
'2014-Mar-13', 66, 20,
'2014-Mar-14', 223, 25,
'2014-Mar-15', 46, 9,
'2014-Mar-16', 26, 17,
'2014-Mar-17', 80, 29,
'2014-Mar-18', 59, 30,
'2014-Mar-19', 85, 31,
'2014-Mar-20', 122, 18,
'2014-Mar-21', 61, 21,
'2014-Mar-22', 33, 18,
'2014-Mar-23', 64, 14,
'2014-Mar-24', 95, 24,
'2014-Mar-25', 75, 28,
'2014-Mar-26', 49, 18,
'2014-Mar-27', 57, 24,
'2014-Mar-28', 33, 16,
'2014-Mar-29', 41, 16,
'2014-Mar-30', 19, 11,
'2014-Mar-31', 52, 12,
'2014-Apr-01', 120, 21,
'2014-Apr-02', 68, 23,
'2014-Apr-03', 98, 28,
'2014-Apr-04', 77, 21,
'2014-Apr-05', 80, 15,
'2014-Apr-06', 55, 15,
'2014-Apr-07', 71, 31,
'2014-Apr-08', 84, 26,
'2014-Apr-09', 33, 18,
'2014-Apr-10', 32, 16,
'2014-Apr-11', 51, 20,
'2014-Apr-12', 25, 15,
'2014-Apr-13', 49, 20,
'2014-Apr-14', 120, 23,
'2014-Apr-15', 191, 27,
'2014-Apr-16', 219, 24,
'2014-Apr-17', 216, 30,
'2014-Apr-18', 63, 19,
'2014-Apr-19', 36, 11,
'2014-Apr-20', 25, 7,
'2014-Apr-21', 115, 24,
'2014-Apr-22', 128, 31,
'2014-Apr-23', 87, 25,
'2014-Apr-24', 108, 23,
'2014-Apr-25', 111, 20,
'2014-Apr-26', 89, 9,
'2014-Apr-27', 29, 11,
'2014-Apr-28', 177, 28,
'2014-Apr-29', 170, 27,
'2014-Apr-30', 183, 28,
'2014-May-01', 97, 25,
'2014-May-02', 64, 23,
'2014-May-03', 43, 12,
'2014-May-04', 32, 14,
'2014-May-05', 125, 28,
'2014-May-06', 68, 24,
'2014-May-07', 68, 19,
'2014-May-08', 114, 14,
'2014-May-09', 47, 20,
'2014-May-10', 139, 20,
'2014-May-11', 14, 9,
'2014-May-12', 90, 27,
'2014-May-13', 92, 22,
'2014-May-14', 197, 32,
'2014-May-15', 140, 26,
'2014-May-16', 59, 20,
'2014-May-17', 21, 9 ,
'2014-May-18', 54, 16,
'2014-May-19', 117, 28,
'2014-May-20', 47, 18,
'2014-May-21', 55, 19,
'2014-May-22', 77, 26,
'2014-May-23', 28, 12,
'2014-May-24', 38, 13,
'2014-May-25', 36, 14,
'2014-May-26', 44, 13,
'2014-May-27', 166, 24,
'2014-May-28', 139, 20,
'2014-May-29', 67, 25,
'2014-May-30', 73, 11,
'2014-May-31', 60, 9 ,
'2014-Jun-01', 22, 11,
'2014-Jun-02', 87, 18,
'2014-Jun-03', 103, 31,
'2014-Jun-04', 105, 27,
'2014-Jun-05', 74, 22,
'2014-Jun-06', 55, 16,
'2014-Jun-07', 53, 15,
'2014-Jun-08', 19, 5 ,
'2014-Jun-09', 91, 14,
'2014-Jun-10', 136, 19,
'2014-Jun-11', 104, 27,
'2014-Jun-12', 195, 22,
'2014-Jun-13', 51, 18,
'2014-Jun-14', 4, 4 ,
'2014-Jun-15', 19, 8 ,
'2014-Jun-16', 86, 19,
'2014-Jun-17', 60, 20,
'2014-Jun-18', 115, 25,
'2014-Jun-19', 73, 20,
'2014-Jun-20', 24, 12,
'2014-Jun-21', 12, 4 ,
'2014-Jun-22', 30, 10,
'2014-Jun-23', 106, 23,
'2014-Jun-24', 51, 16,
'2014-Jun-25', 115, 25,
'2014-Jun-26', 77, 24,
'2014-Jun-27', 91, 24,
'2014-Jun-28', 30, 9 ,
'2014-Jun-29', 9, 7 ,
'2014-Jun-30', 80, 25,
'2014-Jul-01', 118, 17,
'2014-Jul-02', 124, 18,
'2014-Jul-03', 103, 22,
'2014-Jul-04', 33, 11,
'2014-Jul-05', 37, 13,
'2014-Jul-06', 25, 11,
'2014-Jul-07', 147, 27,
'2014-Jul-08', 123, 14,
'2014-Jul-09', 75, 24,
'2014-Jul-10', 68, 16,
'2014-Jul-11', 103, 22,
'2014-Jul-12', 21, 6 ,
'2014-Jul-13', 16, 3 ,
'2014-Jul-14', 103, 24,
'2014-Jul-15', 86, 16,
'2014-Jul-16', 90, 20,
'2014-Jul-17', 92, 18,
'2014-Jul-18', 70, 17,
'2014-Jul-19', 27, 8 ,
'2014-Jul-20', 7, 4 ,
'2014-Jul-21', 66, 19,
'2014-Jul-22', 63, 16,
'2014-Jul-23', 56, 14,
'2014-Jul-24', 110, 19,
'2014-Jul-25', 27, 14,
'2014-Jul-26', 9, 8 ,
'2014-Jul-27', 27, 9 ,
'2014-Jul-28', 73, 23,
'2014-Jul-29', 136, 22,
'2014-Jul-30', 25, 14,
'2014-Jul-31', 113, 29,
'2014-Aug-01', 68, 20,
'2014-Aug-02', 34, 5 ,
'2014-Aug-03', 17, 5 ,
'2014-Aug-04', 28, 17,
'2014-Aug-05', 66, 15,
'2014-Aug-06', 62, 24,
'2014-Aug-07', 123, 17,
'2014-Aug-08', 92, 19,
'2014-Aug-09', 29, 9 ,
'2014-Aug-10', 9, 5 ,
'2014-Aug-11', 75, 17,
'2014-Aug-12', 108, 19,
'2014-Aug-13', 173, 25,
'2014-Aug-14', 109, 28,
'2014-Aug-15', 46, 17,
'2014-Aug-16', 33, 11,
'2014-Aug-17', 109, 15,
'2014-Aug-18', 154, 20,
'2014-Aug-19', 143, 23,
'2014-Aug-20', 54, 10,
'2014-Aug-21', 31, 19,
'2014-Aug-22', 86, 16,
'2014-Aug-23', 30, 7 ,
'2014-Aug-24', 19, 8 ,
'2014-Aug-25', 135, 18,
'2014-Aug-26', 140, 20,
'2014-Aug-27', 81, 23,
'2014-Aug-28', 87, 21,
'2014-Aug-29', 40, 11,
'2014-Aug-30', 102, 11,
'2014-Aug-31', 26, 8 ,
'2014-Sep-01', 37, 11,
'2014-Sep-02', 64, 11,
'2014-Sep-03', 52, 19,
'2014-Sep-04', 172, 37,
'2014-Sep-05', 42, 13,
'2014-Sep-06', 29, 15,
'2014-Sep-07', 24, 8 ,
'2014-Sep-08', 56, 13,
'2014-Sep-09', 87, 25,
'2014-Sep-10', 80, 14,
'2014-Sep-11', 82, 22,
'2014-Sep-12', 53, 18,
'2014-Sep-13', 22, 9 ,
'2014-Sep-14', 31, 10,
'2014-Sep-15', 99, 28,
'2014-Sep-16', 174, 32,
'2014-Sep-17', 137, 24,
'2014-Sep-18', 96, 30,
'2014-Sep-19', 84, 25,
'2014-Sep-20', 45, 15,
'2014-Sep-21', 34, 11,
'2014-Sep-22', 57, 21,
'2014-Sep-23', 130, 19,
'2014-Sep-24', 169, 30,
'2014-Sep-25', 195, 29,
'2014-Sep-26', 82, 17,
'2014-Sep-27', 32, 10,
'2014-Sep-28', 19, 8 ,
'2014-Sep-29', 71, 15,
'2014-Sep-30', 45, 18,
'2014-Oct-01', 136, 19,
'2014-Oct-02', 132, 19,
'2014-Oct-03', 127, 20,
'2014-Oct-04', 61, 15,
'2014-Oct-05', 6, 4 ,
'2014-Oct-06', 72, 16,
'2014-Oct-07', 98, 26,
'2014-Oct-08', 33, 17,
'2014-Oct-09', 65, 10,
'2014-Oct-10', 39, 17,
'2014-Oct-11', 14, 8 ,
'2014-Oct-12', 44, 9 ,
'2014-Oct-13', 36, 14,
'2014-Oct-14', 160, 27,
'2014-Oct-15', 311, 35,
'2014-Oct-16', 333, 35,
'2014-Oct-17', 147, 32,
'2014-Oct-18', 57, 13,
'2014-Oct-19', 114, 19,
'2014-Oct-20', 135, 31,
'2014-Oct-21', 176, 42,
'2014-Oct-22', 180, 38,
'2014-Oct-23', 251, 38,
'2014-Oct-24', 193, 27,
'2014-Oct-25', 75, 18,
'2014-Oct-26', 30, 15,
'2014-Oct-27', 76, 28,
'2014-Oct-28', 162, 34,
'2014-Oct-29', 408, 46,
'2014-Oct-30', 197, 31,
'2014-Oct-31', 99, 33,
'2014-Nov-01', 31, 10,
'2014-Nov-02', 130, 22,
'2014-Nov-03', 147, 31,
'2014-Nov-04', 131, 42,
'2014-Nov-05', 135, 39,
'2014-Nov-06', 99, 29,
'2014-Nov-07', 68, 24,
'2014-Nov-08', 53, 19,
'2014-Nov-09', 25, 11,
'2014-Nov-10', 126, 23,
'2014-Nov-11', 165, 33,
'2014-Nov-12', 101, 27,
'2014-Nov-13', 40, 18,
'2014-Nov-14', 57, 20,
'2014-Nov-15', 94, 13,
'2014-Nov-16', 9, 6 ,
'2014-Nov-17', 66, 29,
'2014-Nov-18', 112, 30,
'2014-Nov-19', 89, 22,
'2014-Nov-20', 54, 15,
'2014-Nov-21', 66, 24,
'2014-Nov-22', 38, 13,
'2014-Nov-23', 12, 8 ,
'2014-Nov-24', 102, 25,
'2014-Nov-25', 113, 20,
'2014-Nov-26', 63, 22,
'2014-Nov-27', 39, 14,
'2014-Nov-28', 67, 21,
'2014-Nov-29', 29, 11,
'2014-Nov-30', 116, 11,
'2014-Dec-01', 95, 28,
'2014-Dec-02', 101, 31,
'2014-Dec-03', 170, 24,
'2014-Dec-04', 124, 34,
'2014-Dec-05', 88, 13,
'2014-Dec-06', 8, 7,
'2014-Dec-07', 14, 8,
'2014-Dec-08', 28, 15,
'2014-Dec-09', 69, 20,
'2014-Dec-10', 176, 21,
'2014-Dec-11', 158, 34,
'2014-Dec-12', 53, 13,
'2014-Dec-13', 8, 5,
'2014-Dec-14', 16, 7,
'2014-Dec-15', 187, 24,
'2014-Dec-16', 93, 20,
'2014-Dec-17', 81, 24,
'2014-Dec-18', 76, 18,
'2014-Dec-19', 52, 18,
'2014-Dec-20', 32, 13,
'2014-Dec-21', 22, 6,
'2014-Dec-22', 38, 18,
'2014-Dec-23', 24, 13,
'2014-Dec-24', 15, 11,
'2014-Dec-25', 34, 9,
'2014-Dec-26', 26, 8,
'2014-Dec-27', 16, 8,
'2014-Dec-28', 9, 5,
'2014-Dec-29', 45, 8,
'2014-Dec-30', 79, 7,
'2014-Dec-31', 16, 10,
'2015-Jan-01', 2, 2,
'2015-Jan-02', 22, 13,
'2015-Jan-03', 11, 7,
'2015-Jan-04', 90, 4,
'2015-Jan-05', 40, 21,
'2015-Jan-06', 77, 18,
'2015-Jan-07', 101, 22,
'2015-Jan-08', 160, 30,
'2015-Jan-09', 94, 22,
'2015-Jan-10', 23, 9,
'2015-Jan-11', 26, 10,
'2015-Jan-12', 84, 26,
'2015-Jan-13', 140, 31,
'2015-Jan-14', 207, 27,
'2015-Jan-15', 113, 23,
'2015-Jan-16', 134, 27,
'2015-Jan-17', 45, 9,
'2015-Jan-18', 34, 11,
'2015-Jan-19', 62, 20,
'2015-Jan-20', 63, 16,
'2015-Jan-21', 74, 24,
'2015-Jan-22', 69, 26,
'2015-Jan-23', 77, 17,
'2015-Jan-24', 63, 14,
'2015-Jan-25', 27, 9,
'2015-Jan-26', 57, 22,
'2015-Jan-27', 118, 19,
'2015-Jan-28', 91, 21,
'2015-Jan-29', 66, 21,
'2015-Jan-30', 123, 28,
'2015-Jan-31', 27, 11,
'2015-Feb-01', 52, 9,
'2015-Feb-02', 89, 22,
'2015-Feb-03', 47, 14,
'2015-Feb-04', 82, 22,
'2015-Feb-05', 127, 27,
'2015-Feb-06', 74, 24,
'2015-Feb-07', 23, 8,
'2015-Feb-08', 20, 11,
'2015-Feb-09', 61, 22,
'2015-Feb-10', 90, 30,
'2015-Feb-11', 86, 20,
'2015-Feb-12', 127, 23,
'2015-Feb-13', 259, 27,
'2015-Feb-14', 17, 9,
'2015-Feb-15', 7, 3,
'2015-Feb-16', 120, 27,
'2015-Feb-17', 130, 28,
'2015-Feb-18', 49, 16,
'2015-Feb-19', 219, 21,
'2015-Feb-20', 126, 31,
'2015-Feb-21', 69, 9,
'2015-Feb-22', 51, 10,
'2015-Feb-23', 100, 28,
'2015-Feb-24', 206, 19,
'2015-Feb-25', 95, 22,
'2015-Feb-26', 115, 35,
'2015-Feb-27', 80, 20,
'2015-Feb-28', 100, 16,
'2015-Mar-01', 53, 8,
'2015-Mar-02', 193, 22,
'2015-Mar-03', 101, 19,
'2015-Mar-04', 135, 33,
'2015-Mar-05', 201, 31,
'2015-Mar-06', 65, 20,
'2015-Mar-07', 61, 12,
'2015-Mar-08', 35, 9,
'2015-Mar-09', 86, 23,
'2015-Mar-10', 38, 21,
'2015-Mar-11', 118, 28,
'2015-Mar-12', 107, 19,
'2015-Mar-13', 124, 28,
'2015-Mar-14', 17, 12,
'2015-Mar-15', 42, 12,
'2015-Mar-16', 111, 24,
'2015-Mar-17', 180, 24,
'2015-Mar-18', 83, 27,
'2015-Mar-19', 96, 19,
'2015-Mar-20', 106, 21,
'2015-Mar-21', 31, 14,
'2015-Mar-22', 58, 11,
'2015-Mar-23', 56, 19,
'2015-Mar-24', 107, 28,
'2015-Mar-25', 119, 25,
'2015-Mar-26', 99, 22,
'2015-Mar-27', 259, 20,
'2015-Mar-28', 76, 12,
'2015-Mar-29', 22, 9,
'2015-Mar-30', 156, 31,
'2015-Mar-31', 79, 22,
'2015-Apr-01', 128, 31,
'2015-Apr-02', 71, 22,
'2015-Apr-03', 47, 8,
'2015-Apr-04', 62, 12,
'2015-Apr-05', 75, 12,
'2015-Apr-06', 62, 22,
'2015-Apr-07', 90, 23,
'2015-Apr-08', 67, 25,
'2015-Apr-09', 68, 24,
'2015-Apr-10', 63, 18,
'2015-Apr-11', 53, 9,
'2015-Apr-12', 19, 9,
'2015-Apr-13', 35, 11,
'2015-Apr-14', 106, 27,
'2015-Apr-15', 200, 29,
'2015-Apr-16', 218, 29,
'2015-Apr-17', 125, 25,
'2015-Apr-18', 61, 8,
'2015-Apr-19', 26, 8,
'2015-Apr-20', 162, 23,
'2015-Apr-21', 110, 22,
'2015-Apr-22', 187, 31,
'2015-Apr-23', 137, 24,
'2015-Apr-24', 200, 20,
'2015-Apr-25', 87, 12,
'2015-Apr-26', 49, 13,
'2015-Apr-27', 67, 19,
'2015-Apr-28', 135, 24,
'2015-Apr-29', 127, 26,
'2015-Apr-30', 140, 28,
'2015-May-01', 40, 16,
'2015-May-02', 44, 9,
'2015-May-03', 31, 12,
'2015-May-04', 155, 18,
'2015-May-05', 168, 22,
'2015-May-06', 121, 18,
'2015-May-07', 77, 14,
'2015-May-08', 128, 21,
'2015-May-09', 29, 13,
'2015-May-10', 35, 5,
'2015-May-11', 156, 27,
'2015-May-12', 138, 19,
'2015-May-13', 118, 25,
'2015-May-14', 144, 29,
'2015-May-15', 181, 16,
'2015-May-16', 58, 6,
'2015-May-17', 72, 5,
'2015-May-18', 84, 21,
'2015-May-19', 51, 18,
'2015-May-20', 128, 32,
'2015-May-21', 147, 22,
'2015-May-22', 103, 21,
'2015-May-23', 29, 8,
'2015-May-24', 59, 10,
'2015-May-25', 133, 11,
'2015-May-26', 177, 32,
'2015-May-27', 130, 23,
'2015-May-28', 183, 29,
'2015-May-29', 98, 19,
'2015-May-30', 23, 10,
'2015-May-31', 18, 9,
'2015-Jun-01', 96, 17,
'2015-Jun-02', 193, 32,
'2015-Jun-03', 204, 27,
'2015-Jun-04', 213, 30,
'2015-Jun-05', 451, 32,
'2015-Jun-06', 23, 12,
'2015-Jun-07', 26, 7,
'2015-Jun-08', 134, 25,
'2015-Jun-09', 198, 23,
'2015-Jun-10', 90, 21,
'2015-Jun-11', 221, 22,
'2015-Jun-12', 90, 19,
'2015-Jun-13', 31, 12,
'2015-Jun-14', 18, 10,
'2015-Jun-15', 108, 25,
'2015-Jun-16', 207, 34,
'2015-Jun-17', 160, 30,
'2015-Jun-18', 84, 23,
'2015-Jun-19', 69, 20,
'2015-Jun-20', 40, 8,
'2015-Jun-21', 36, 7,
'2015-Jun-22', 95, 24,
'2015-Jun-23', 97, 22,
'2015-Jun-24', 99, 18,
'2015-Jun-25', 77, 24,
'2015-Jun-26', 90, 17,
'2015-Jun-27', 87, 7,
'2015-Jun-28', 42, 9,
'2015-Jun-29', 89, 25,
'2015-Jun-30', 142, 25,
'2015-Jul-01', 95, 19,
'2015-Jul-02', 72, 18,
'2015-Jul-03', 47, 15,
'2015-Jul-04', 46, 9,
'2015-Jul-05', 75, 11,
'2015-Jul-06', 112, 15,
'2015-Jul-07', 198, 23,
'2015-Jul-08', 55, 20,
'2015-Jul-09', 73, 18,
'2015-Jul-10', 72, 22,
'2015-Jul-11', 14, 7,
'2015-Jul-12', 113, 12,
'2015-Jul-13', 88, 26,
'2015-Jul-14', 89, 20,
'2015-Jul-15', 98, 22,
'2015-Jul-16', 92, 21,
'2015-Jul-17', 89, 27,
'2015-Jul-18', 19, 9,
'2015-Jul-19', 23, 11,
'2015-Jul-20', 61, 13,
'2015-Jul-21', 125, 28,
'2015-Jul-22', 74, 21,
'2015-Jul-23', 68, 17,
'2015-Jul-24', 47, 12,
'2015-Jul-25', 22, 13,
'2015-Jul-26', 26, 9,
'2015-Jul-27', 51, 16,
'2015-Jul-28', 89, 17,
'2015-Jul-29', 41, 18,
'2015-Jul-30', 25, 14,
'2015-Jul-31', 51, 19,
'2015-Aug-01', 33, 10,
'2015-Aug-02', 32, 9,
'2015-Aug-03', 45, 20,
'2015-Aug-04', 42, 15,
'2015-Aug-05', 64, 30,
'2015-Aug-06', 91, 26,
'2015-Aug-07', 103, 27,
'2015-Aug-08', 82, 9,
'2015-Aug-09', 30, 7,
'2015-Aug-10', 33, 18,
'2015-Aug-11', 98, 22,
'2015-Aug-12', 146, 22,
'2015-Aug-13', 88, 44,
'2015-Aug-14', 136, 31,
'2015-Aug-15', 30, 4,
'2015-Aug-16', 60, 15,
'2015-Aug-17', 73, 22,
'2015-Aug-18', 90, 26,
'2015-Aug-19', 125, 28,
'2015-Aug-20', 113, 30,
'2015-Aug-21', 85, 23,
'2015-Aug-22', 58, 14,
'2015-Aug-23', 37, 13,
'2015-Aug-24', 102, 25,
'2015-Aug-25', 86, 25,
'2015-Aug-26', 217, 27,
'2015-Aug-27', 156, 25,
'2015-Aug-28', 152, 16,
'2015-Aug-29', 15, 7,
'2015-Aug-30', 27, 12,
'2015-Aug-31', 87, 22,
'2015-Sep-01', 57, 23,
'2015-Sep-02', 109, 27,
'2015-Sep-03', 63, 23,
'2015-Sep-04', 71, 16,
'2015-Sep-05', 44, 13,
'2015-Sep-06', 24, 9,
'2015-Sep-07', 65, 17,
'2015-Sep-08', 80, 29,
'2015-Sep-09', 102, 18,
'2015-Sep-10', 155, 24,
'2015-Sep-11', 187, 33,
'2015-Sep-12', 39, 10,
'2015-Sep-13', 11, 6,
'2015-Sep-14', 69, 20,
'2015-Sep-15', 59, 20,
'2015-Sep-16', 110, 28,
'2015-Sep-17', 83, 24,
'2015-Sep-18', 126, 25,
'2015-Sep-19', 21, 8,
'2015-Sep-20', 24, 9,
'2015-Sep-21', 99, 25,
'2015-Sep-22', 161, 30,
'2015-Sep-23', 170, 26,
'2015-Sep-24', 199, 31,
'2015-Sep-25', 108, 27,
'2015-Sep-26', 60, 13,
'2015-Sep-27', 30, 11,
'2015-Sep-28', 79, 20,
'2015-Sep-29', 108, 23,
'2015-Sep-30', 85, 24,
'2015-Oct-01', 142, 32,
'2015-Oct-02', 91, 29,
'2015-Oct-03', 41, 15,
'2015-Oct-04', 26, 11,
'2015-Oct-05', 33, 14, # We probably missed some data on Monday, Oct. 5. Github switched to providing only 1 week of traffic data.
'2015-Oct-06', 74, 27,
'2015-Oct-07', 143, 29,
'2015-Oct-08', 47, 21,
'2015-Oct-09', 71, 28,
'2015-Oct-10', 20, 7,
'2015-Oct-11', 63, 18,
'2015-Oct-12', 175, 35,
'2015-Oct-13', 125, 20,
'2015-Oct-14', 136, 25,
'2015-Oct-15', 121, 26,
'2015-Oct-16', 229, 28,
'2015-Oct-17', 60, 16,
'2015-Oct-18', 30, 9,
'2015-Oct-19', 110, 23,
'2015-Oct-20', 148, 29,
'2015-Oct-21', 310, 28,
'2015-Oct-22', 97, 29,
'2015-Oct-23', 66, 23,
'2015-Oct-24', 37, 14,
'2015-Oct-25', 28, 10,
'2015-Oct-26', 57, 16,
'2015-Oct-27', 190, 28,
'2015-Oct-28', 217, 22,
'2015-Oct-29', 239, 28,
'2015-Oct-30', 76, 28,
'2015-Oct-31', 34, 10,
'2015-Nov-01', 93, 18,
'2015-Nov-02', 134, 21,
'2015-Nov-03', 133, 34,
'2015-Nov-04', 131, 25,
'2015-Nov-05', 318, 31,
'2015-Nov-06', 172, 27,
'2015-Nov-07', 41, 13,
'2015-Nov-08', 61, 15,
'2015-Nov-09', 153, 29,
'2015-Nov-10', 158, 30,
'2015-Nov-11', 155, 35,
'2015-Nov-12', 125, 21,
'2015-Nov-13', 85, 24,
'2015-Nov-14', 72, 14,
'2015-Nov-15', 28, 11,
'2015-Nov-16', 136, 20,
'2015-Nov-17', 114, 24,
'2015-Nov-18', 91, 24,
'2015-Nov-19', 167, 33,
'2015-Nov-20', 173, 28,
'2015-Nov-21', 76, 13,
'2015-Nov-22', 23, 12,
'2015-Nov-23', 291, 20,
'2015-Nov-24', 55, 20,
'2015-Nov-25', 66, 20,
'2015-Nov-26', 127, 22,
'2015-Nov-27', 47, 14,
'2015-Nov-28', 39, 11,
'2015-Nov-29', 38, 11,
'2015-Nov-30', 91, 21,
'2015-Dec-01', 110, 26,
'2015-Dec-02', 107, 27,
'2015-Dec-03', 144, 23,
'2015-Dec-04', 132, 34,
'2015-Dec-05', 45, 11,
'2015-Dec-06', 30, 13,
'2015-Dec-07', 62, 21,
'2015-Dec-08', 143, 33,
'2015-Dec-09', 80, 20,
'2015-Dec-10', 57, 22,
'2015-Dec-11', 69, 18,
'2015-Dec-12', 41, 9,
'2015-Dec-13', 84, 15,
'2015-Dec-14', 82, 14,
'2015-Dec-15', 138, 37,
'2015-Dec-16', 142, 28,
'2015-Dec-17', 77, 24,
'2015-Dec-18', 249, 17,
'2015-Dec-19', 67, 8,
'2015-Dec-20', 85, 12,
'2015-Dec-21', 117, 18,
'2015-Dec-22', 57, 18,
'2015-Dec-23', 18, 11,
'2015-Dec-24', 69, 15,
'2015-Dec-25', 26, 6,
'2015-Dec-26', 16, 9,
'2015-Dec-27', 27, 10,
'2015-Dec-28', 15, 11,
'2015-Dec-29', 31, 9,
'2015-Dec-30', 38, 13,
'2015-Dec-31', 38, 13,
'2016-Jan-01', 46, 12,
'2016-Jan-02', 102, 15,
'2016-Jan-03', 43, 8,
'2016-Jan-04', 74, 21,
'2016-Jan-05', 236, 24,
'2016-Jan-06', 200, 27,
'2016-Jan-07', 185, 24,
'2016-Jan-08', 99, 26,
'2016-Jan-09', 67, 15,
'2016-Jan-10', 76, 18,
'2016-Jan-11', 86, 24,
'2016-Jan-12', 168, 32,
'2016-Jan-13', 84, 22,
'2016-Jan-14', 247, 37,
'2016-Jan-15', 121, 22,
'2016-Jan-16', 114, 17,
'2016-Jan-17', 26, 10,
'2016-Jan-18', 76, 21,
'2016-Jan-19', 182, 28,
'2016-Jan-20', 197, 28,
'2016-Jan-21', 228, 26,
'2016-Jan-22', 163, 23,
'2016-Jan-23', 54, 19,
'2016-Jan-24', 47, 14,
'2016-Jan-25', 63, 22,
'2016-Jan-26', 220, 31,
'2016-Jan-27', 232, 37,
'2016-Jan-28', 212, 30,
'2016-Jan-29', 136, 29,
'2016-Jan-30', 15, 6,
'2016-Jan-31', 24, 9,
'2016-Feb-01', 125, 32,
'2016-Feb-02', 103, 29,
'2016-Feb-03', 259, 40,
'2016-Feb-04', 230, 41,
'2016-Feb-05', 124, 21,
'2016-Feb-06', 28, 10,
'2016-Feb-07', 50, 12,
'2016-Feb-08', 87, 18,
'2016-Feb-09', 34, 16,
'2016-Feb-10', 114, 27,
'2016-Feb-11', 140, 27,
'2016-Feb-12', 39, 15,
'2016-Feb-13', 71, 14,
'2016-Feb-14', 13, 9,
'2016-Feb-15', 54, 21,
'2016-Feb-16', 128, 23,
'2016-Feb-17', 155, 34,
'2016-Feb-18', 271, 35,
'2016-Feb-19', 86, 23,
'2016-Feb-20', 40, 9,
'2016-Feb-21', 23, 9,
'2016-Feb-22', 101, 28,
'2016-Feb-23', 104, 27,
'2016-Feb-24', 115, 31,
'2016-Feb-25', 165, 40,
'2016-Feb-26', 200, 50,
'2016-Feb-27', 91, 16,
'2016-Feb-28', 105, 20,
'2016-Feb-29', 135, 47,
'2016-Mar-01', 139, 44,
'2016-Mar-02', 207, 35,
'2016-Mar-03', 124, 36,
'2016-Mar-04', 120, 34,
'2016-Mar-05', 43, 19,
'2016-Mar-06', 85, 24,
'2016-Mar-07', 266, 33,
'2016-Mar-08', 213, 41,
'2016-Mar-09', 127, 36,
'2016-Mar-10', 106, 37,
'2016-Mar-11', 128, 32,
'2016-Mar-12', 43, 18,
'2016-Mar-13', 38, 15,
'2016-Mar-14', 45, 11, # I missed getting the full day's data due to travel.
'2016-Mar-15', 175, 33,
'2016-Mar-16', 136, 33,
'2016-Mar-17', 200, 34,
'2016-Mar-18', 164, 30,
'2016-Mar-19', 86, 20,
'2016-Mar-20', 31, 18,
'2016-Mar-21', 125, 32,
'2016-Mar-22', 42, 27,
'2016-Mar-23', 214, 40,
'2016-Mar-24', 171, 43,
'2016-Mar-25', 92, 24,
'2016-Mar-26', 38, 17,
'2016-Mar-27', 97, 21,
'2016-Mar-28', 124, 37,
'2016-Mar-29', 139, 36,
'2016-Mar-30', 190, 39,
'2016-Mar-31', 105, 32,
'2016-Apr-01', 101, 29,
'2016-Apr-02', 59, 17,
'2016-Apr-03', 53, 22,
'2016-Apr-04', 96, 32,
'2016-Apr-05', 121, 36,
'2016-Apr-06', 115, 33,
'2016-Apr-07', 154, 36,
'2016-Apr-08', 198, 41,
'2016-Apr-09', 60, 20,
'2016-Apr-10', 45, 16,
'2016-Apr-11', 127, 31,
'2016-Apr-12', 123, 36,
'2016-Apr-13', 230, 34,
'2016-Apr-14', 177, 44,
'2016-Apr-15', 93, 35,
'2016-Apr-16', 98, 25,
'2016-Apr-17', 37, 12,
'2016-Apr-18', 143, 40,
'2016-Apr-19', 280, 54,
'2016-Apr-20', 151, 45,
'2016-Apr-21', 235, 49,
'2016-Apr-22', 162, 40,
'2016-Apr-23', 76, 24,
'2016-Apr-24', 56, 14,
'2016-Apr-25', 300, 42,
'2016-Apr-26', 206, 54,
'2016-Apr-27', 136, 43,
'2016-Apr-28', 180, 45,
'2016-Apr-29', 376, 49,
'2016-Apr-30', 70, 24,
'2016-May-01', 87, 14,
'2016-May-02', 134, 42,
'2016-May-03', 107, 35,
'2016-May-04', 252, 46,
'2016-May-05', 100, 40,
'2016-May-06', 93, 32,
'2016-May-07', 71, 15,
'2016-May-08', 30, 14,
'2016-May-09', 169, 46,
'2016-May-10', 129, 37,
'2016-May-11', 175, 49,
'2016-May-12', 184, 43,
'2016-May-13', 153, 32,
'2016-May-14', 61, 13,
'2016-May-15', 105, 21,
'2016-May-16', 111, 25,
'2016-May-17', 116, 36,
'2016-May-18', 153, 39,
'2016-May-19', 183, 50,
'2016-May-20', 148, 33,
'2016-May-21', 81, 15,
'2016-May-22', 78, 19,
'2016-May-23', 259, 47,
'2016-May-24', 318, 59,
'2016-May-25', 443, 59,
'2016-May-26', 256, 42,
'2016-May-27', 233, 48,
'2016-May-28', 212, 28,
'2016-May-29', 94, 22,
'2016-May-30', 150, 27,
'2016-May-31', 332, 48,
'2016-Jun-01', 266, 41,
'2016-Jun-02', 104, 31,
'2016-Jun-03', 210, 48,
'2016-Jun-04', 84, 20,
'2016-Jun-05', 36, 15,
'2016-Jun-06', 308, 44,
'2016-Jun-07', 155, 46,
'2016-Jun-08', 252, 51,
'2016-Jun-09', 195, 40,
'2016-Jun-10', 159, 34,
'2016-Jun-11', 103, 16,
'2016-Jun-12', 54, 20,
'2016-Jun-13', 168, 45,
'2016-Jun-14', 117, 45,
'2016-Jun-15', 151, 41,
'2016-Jun-16', 98, 34,
'2016-Jun-17', 68, 18,
'2016-Jun-18', 96, 20,
'2016-Jun-19', 46, 19,
'2016-Jun-20', 143, 43,
'2016-Jun-21', 228, 45,
'2016-Jun-22', 201, 50,
'2016-Jun-23', 139, 38,
'2016-Jun-24', 162, 41,
'2016-Jun-25', 80, 24,
'2016-Jun-26', 47, 18,
'2016-Jun-27', 262, 48,
'2016-Jun-28', 309, 52,
'2016-Jun-29', 271, 48,
'2016-Jun-30', 332, 48,
'2016-Jul-01', 154, 39,
'2016-Jul-02', 49, 21,
'2016-Jul-03', 69, 22,
'2016-Jul-04', 68, 21,
'2016-Jul-05', 118, 37,
'2016-Jul-06', 138, 52,
'2016-Jul-07', 100, 35,
'2016-Jul-08', 158, 39,
'2016-Jul-09', 57, 14,
'2016-Jul-10', 72, 14,
'2016-Jul-11', 89, 30,
'2016-Jul-12', 163, 30,
'2016-Jul-13', 131, 30,
'2016-Jul-14', 141, 37,
'2016-Jul-15', 123, 36,
'2016-Jul-16', 88, 18,
'2016-Jul-17', 59, 11,
]
# Local Variables:
# python-indent: 2
# End:
| coreymbryant/libmesh | doc/statistics/github_traffic_plotviews.py | Python | lgpl-2.1 | 28,605 |
import os,sys
import SCons.Action
import SCons.Builder
import SCons.Util
from SCons.Script import *
from SCons.Node.FS import _my_normcase
from SCons.Tool.JavaCommon import parse_java_file
def walkDirs(path):
"""helper function to get a list of all subdirectories"""
def addDirs(pathlist, dirname, names):
"""internal function to pass to os.path.walk"""
for n in names:
f = os.path.join(dirname, n)
if os.path.isdir(f):
pathlist.append(f)
pathlist = [path]
os.path.walk(path, addDirs, pathlist)
return pathlist
def ConfigureJNI(env):
"""Configure the given environment for compiling Java Native Interface
c or c++ language files."""
if not env.get('JAVAC'):
print 'The Java compiler must be installed and in the current path.'
return 0
# first look for a shell variable called JAVA_HOME
java_base = os.environ.get('JAVA_HOME')
if not java_base:
if sys.platform == 'darwin':
# Apple's OS X has its own special java base directory
java_base = '/System/Library/Frameworks/JavaVM.framework'
else:
# Search for the java compiler
print 'JAVA_HOME environment variable is not set. Searching for java... ',
jcdir = os.path.dirname(env.WhereIs('javac'))
if not jcdir:
print 'not found.'
return 0
# assuming the compiler found is in some directory like
# /usr/jdkX.X/bin/javac, java's home directory is /usr/jdkX.X
java_base = os.path.join(jcdir, '..')
print 'found.'
if sys.platform == 'cygwin':
# Cygwin and Sun Java have different ideas of how path names
# are defined. Use cygpath to convert the windows path to
# a cygwin path. i.e. C:\jdkX.X to /cygdrive/c/jdkX.X
java_base = os.popen("cygpath -up '" + java_base + "'"
).read().replace('\n', '')
if sys.platform == 'darwin':
# Apple does not use Sun's naming convention
java_headers = [os.path.join(java_base, 'Headers')]
java_libs = [os.path.join(java_base, 'Libraries')]
else:
# windows and linux
java_headers = [os.path.join(java_base, 'include')]
java_libs = [os.path.join(java_base, 'lib')]
# Sun's windows and linux JDKs keep system-specific header
# files in a sub-directory of include
if java_base == '/usr' or java_base == '/usr/local':
# too many possible subdirectories. Just use defaults
java_headers.append(os.path.join(java_headers[0], 'win32'))
java_headers.append(os.path.join(java_headers[0], 'linux'))
java_headers.append(os.path.join(java_headers[0], 'solaris'
))
else:
# add all subdirs of 'include'. The system specific headers
# should be in there somewhere
java_headers = walkDirs(java_headers[0])
# add Java's include and lib directory to the environment
env.Append(CPPPATH=java_headers)
env.Append(LIBPATH=java_libs)
# add any special platform-specific compilation or linking flags
if sys.platform == 'darwin':
env.Append(SHLINKFLAGS='-dynamiclib -framework JavaVM')
env['SHLIBSUFFIX'] = '.jnilib'
elif sys.platform == 'cygwin':
env.Append(CCFLAGS='-mno-cygwin')
env.Append(SHLINKFLAGS='-mno-cygwin -Wl,--kill-at')
# Add extra potentially useful environment variables
env['JAVA_HOME'] = java_base
env['JNI_CPPPATH'] = java_headers
env['JNI_LIBPATH'] = java_libs
return 1
def classname(path):
"""Turn a string (path name) into a Java class name."""
return os.path.normpath(path).replace(os.sep, '.')
def find_java_files(env, arg, dirpath, filenames):
java_suffix = env.get('JAVASUFFIX', '.java')
js = _my_normcase(java_suffix)
java_files = sorted([n for n in filenames
if _my_normcase(n).endswith(js)])
mydir = dirnode.Dir(dirpath)
java_paths = [mydir.File(f) for f in java_files]
for jp in java_paths:
arg[jp] = True
## Replacement Java Class Emitter
#
# The default emitter for the Java builder walks the full directory tree
# this causes problems when you do not want to build all files below a
# folder, e.g. in mamda. This emitter does not recurse through directories.
def emit_java_classes_norecurse(target, source, env):
"""Create and return lists of source java files
and their corresponding target class files.
"""
java_suffix = env.get('JAVASUFFIX', '.java')
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
target[0].must_be_same(SCons.Node.FS.Dir)
classdir = target[0]
s = source[0].rentry().disambiguate()
if isinstance(s, SCons.Node.FS.File):
sourcedir = s.dir.rdir()
elif isinstance(s, SCons.Node.FS.Dir):
sourcedir = s.rdir()
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % s.__class__)
slist = []
js = _my_normcase(java_suffix)
for entry in source:
entry = entry.rentry().disambiguate()
if isinstance(entry, SCons.Node.FS.File):
slist.append(entry)
elif isinstance(entry, SCons.Node.FS.Dir):
result = SCons.Util.OrderedDict()
dirnode = entry.rdir()
def find_java_files(arg, dirpath, filenames):
java_files = sorted([n for n in filenames
if _my_normcase(n).endswith(js)])
mydir = dirnode.Dir(dirpath)
java_paths = [mydir.File(f) for f in java_files]
for jp in java_paths:
arg[jp] = True
for dirpath, dirnames, filenames in os.walk(dirnode.get_abspath(),topdown=True):
del dirnames[:]
find_java_files(result, dirpath, filenames)
entry.walk(find_java_files, result)
slist.extend(list(result.keys()))
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % entry.__class__)
version = env.get('JAVAVERSION', '1.4')
full_tlist = []
for f in slist:
tlist = []
source_file_based = True
pkg_dir = None
if not f.is_derived():
pkg_dir, classes = parse_java_file(f.rfile().get_abspath(), version)
if classes:
source_file_based = False
if pkg_dir:
d = target[0].Dir(pkg_dir)
p = pkg_dir + os.sep
else:
d = target[0]
p = ''
for c in classes:
t = d.File(c + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = sourcedir
t.attributes.java_classname = classname(p + c)
tlist.append(t)
if source_file_based:
base = f.name[:-len(java_suffix)]
if pkg_dir:
t = target[0].Dir(pkg_dir).File(base + class_suffix)
else:
t = target[0].File(base + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = f.dir
t.attributes.java_classname = classname(base)
tlist.append(t)
for t in tlist:
t.set_specific_source([f])
full_tlist.extend(tlist)
return full_tlist, slist
| MattMulhern/OpenMamaCassandra | site_scons/jni_tools.py | Python | lgpl-2.1 | 7,622 |
''' CSA header reader from SPM spec
'''
import numpy as np
from ..py3k import ZEROB, asbytes, asstr
from .structreader import Unpacker
# DICOM VR code to Python type
_CONVERTERS = {
'FL': float, # float
'FD': float, # double
'DS': float, # decimal string
'SS': int, # signed short
'US': int, # unsigned short
'SL': int, # signed long
'UL': int, # unsigned long
'IS': int, # integer string
}
class CSAError(Exception):
pass
class CSAReadError(CSAError):
pass
def get_csa_header(dcm_data, csa_type='image'):
''' Get CSA header information from DICOM header
Return None if the header does not contain CSA information of the
specified `csa_type`
Parameters
----------
dcm_data : dicom.Dataset
DICOM dataset. Needs only implement the tag fetch with
``dcm_data[group, element]`` syntax
csa_type : {'image', 'series'}, optional
Type of CSA field to read; default is 'image'
Returns
-------
csa_info : None or dict
Parsed CSA field of `csa_type` or None, if we cannot find the CSA
information.
'''
csa_type = csa_type.lower()
if csa_type == 'image':
element_no = 0x1010
label = 'Image'
elif csa_type == 'series':
element_no = 0x1020
label = 'Series'
else:
raise ValueError('Invalid CSA header type "%s"'
% csa_type)
try:
tag = dcm_data[0x29, element_no]
except KeyError:
return None
if tag.name != '[CSA %s Header Info]' % label:
return None
return read(tag.value)
def read(csa_str):
''' Read CSA header from string `csa_str`
Parameters
----------
csa_str : str
byte string containing CSA header information
Returns
-------
header : dict
header information as dict, where `header` has fields (at least)
``type, n_tags, tags``. ``header['tags']`` is also a dictionary
with one key, value pair for each tag in the header.
'''
csa_len = len(csa_str)
csa_dict = {'tags': {}}
hdr_id = csa_str[:4]
up_str = Unpacker(csa_str, endian='<')
if hdr_id == asbytes('SV10'): # CSA2
hdr_type = 2
up_str.ptr = 4 # omit the SV10
csa_dict['unused0'] = up_str.read(4)
else: # CSA1
hdr_type = 1
csa_dict['type'] = hdr_type
csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I')
if not 0 < csa_dict['n_tags'] <= 128:
raise CSAReadError('Number of tags `t` should be '
'0 < t <= 128')
for tag_no in range(csa_dict['n_tags']):
name, vm, vr, syngodt, n_items, last3 = \
up_str.unpack('64si4s3i')
vr = nt_str(vr)
name = nt_str(name)
tag = {'n_items': n_items,
'vm': vm, # value multiplicity
'vr': vr, # value representation
'syngodt': syngodt,
'last3': last3,
'tag_no': tag_no}
if vm == 0:
n_values = n_items
else:
n_values = vm
# data converter
converter = _CONVERTERS.get(vr)
# CSA1 specific length modifier
if tag_no == 1:
tag0_n_items = n_items
assert n_items < 100
items = []
for item_no in range(n_items):
x0,x1,x2,x3 = up_str.unpack('4i')
ptr = up_str.ptr
if hdr_type == 1: # CSA1 - odd length calculation
item_len = x0 - tag0_n_items
if item_len < 0 or (ptr + item_len) > csa_len:
if item_no < vm:
items.append('')
break
else: # CSA2
item_len = x1
if (ptr + item_len) > csa_len:
raise CSAReadError('Item is too long, '
'aborting read')
if item_no >= n_values:
assert item_len == 0
continue
item = nt_str(up_str.read(item_len))
if converter:
# we may have fewer real items than are given in
# n_items, but we don't know how many - assume that
# we've reached the end when we hit an empty item
if item_len == 0:
n_values = item_no
continue
item = converter(item)
items.append(item)
# go to 4 byte boundary
plus4 = item_len % 4
if plus4 != 0:
up_str.ptr += (4-plus4)
tag['items'] = items
csa_dict['tags'][name] = tag
return csa_dict
def get_scalar(csa_dict, tag_name):
try:
items = csa_dict['tags'][tag_name]['items']
except KeyError:
return None
if len(items) == 0:
return None
return items[0]
def get_vector(csa_dict, tag_name, n):
try:
items = csa_dict['tags'][tag_name]['items']
except KeyError:
return None
if len(items) == 0:
return None
if len(items) != n:
raise ValueError('Expecting %d vector' % n)
return np.array(items)
def is_mosaic(csa_dict):
''' Return True if the data is of Mosaic type
Parameters
----------
csa_dict : dict
dict containing read CSA data
Returns
-------
tf : bool
True if the `dcm_data` appears to be of Siemens mosaic type,
False otherwise
'''
if csa_dict is None:
return False
if get_acq_mat_txt(csa_dict) is None:
return False
n_o_m = get_n_mosaic(csa_dict)
return not (n_o_m is None) and n_o_m != 0
def get_n_mosaic(csa_dict):
return get_scalar(csa_dict, 'NumberOfImagesInMosaic')
def get_acq_mat_txt(csa_dict):
return get_scalar(csa_dict, 'AcquisitionMatrixText')
def get_slice_normal(csa_dict):
return get_vector(csa_dict, 'SliceNormalVector', 3)
def get_b_matrix(csa_dict):
vals = get_vector(csa_dict, 'B_matrix', 6)
if vals is None:
return
# the 6 vector is the upper triangle of the symmetric B matrix
inds = np.array([0, 1, 2, 1, 3, 4, 2, 4, 5])
B = np.array(vals)[inds]
return B.reshape(3,3)
def get_b_value(csa_dict):
return get_scalar(csa_dict, 'B_value')
def get_g_vector(csa_dict):
return get_vector(csa_dict, 'DiffusionGradientDirection', 3)
def get_ice_dims(csa_dict):
dims = get_scalar(csa_dict, 'ICE_Dims')
if dims is None:
return None
return dims.split('_')
def nt_str(s):
''' Strip string to first null
Parameters
----------
s : str
Returns
-------
sdash : str
s stripped to first occurence of null (0)
'''
zero_pos = s.find(ZEROB)
if zero_pos == -1:
return s
return asstr(s[:zero_pos])
| ME-ICA/me-ica | meica.libs/nibabel/nicom/csareader.py | Python | lgpl-2.1 | 6,816 |
# Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
# Copyright 2016 Igalia, S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides the default implementation for flat review for Orca."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc." \
"Copyright (c) 2016 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import re
from . import braille
from . import debug
from . import eventsynthesizer
from . import messages
from . import orca_state
from . import settings
EMBEDDED_OBJECT_CHARACTER = '\ufffc'
class Char:
"""A character's worth of presentable information."""
def __init__(self, word, index, startOffset, string, x, y, width, height):
"""Creates a new char.
Arguments:
- word: the Word instance this belongs to
- startOffset: the start offset with respect to the accessible
- string: the actual char
- x, y, width, height: the extents of this Char on the screen
"""
self.word = word
self.index = index
self.startOffset = startOffset
self.endOffset = startOffset + 1
self.string = string
self.x = x
self.y = y
self.width = width
self.height = height
class Word:
"""A single chunk (word or object) of presentable information."""
def __init__(self, zone, index, startOffset, string, x, y, width, height):
"""Creates a new Word.
Arguments:
- zone: the Zone instance this belongs to
- index: the index of this Word in the Zone
- startOffset: the start offset with respect to the accessible
- string: the actual string
- x, y, width, height: the extents of this Word on the screen
"""
self.zone = zone
self.index = index
self.startOffset = startOffset
self.string = string
self.length = len(string)
self.endOffset = self.startOffset + len(string)
self.x = x
self.y = y
self.width = width
self.height = height
self.chars = []
def __str__(self):
return "WORD: '%s' (%i-%i) %s" % \
(self.string.replace("\n", "\\n"),
self.startOffset,
self.endOffset,
self.zone.accessible)
def __getattribute__(self, attr):
if attr != "chars":
return super().__getattribute__(attr)
# TODO - JD: For now, don't fake character and word extents.
# The main goal is to improve reviewability.
extents = self.x, self.y, self.width, self.height
try:
text = self.zone.accessible.queryText()
except:
text = None
chars = []
for i, char in enumerate(self.string):
start = i + self.startOffset
if text:
extents = text.getRangeExtents(start, start+1, pyatspi.DESKTOP_COORDS)
chars.append(Char(self, i, start, char, *extents))
return chars
def getRelativeOffset(self, offset):
"""Returns the char offset with respect to this word or -1."""
if self.startOffset <= offset < self.startOffset + len(self.string):
return offset - self.startOffset
return -1
class Zone:
"""Represents text that is a portion of a single horizontal line."""
WORDS_RE = re.compile(r"(\S+\s*)", re.UNICODE)
def __init__(self, accessible, string, x, y, width, height, role=None):
"""Creates a new Zone.
Arguments:
- accessible: the Accessible associated with this Zone
- string: the string being displayed for this Zone
- extents: x, y, width, height in screen coordinates
- role: Role to override accessible's role.
"""
self.accessible = accessible
self.startOffset = 0
self._string = string
self.length = len(string)
self.x = x
self.y = y
self.width = width
self.height = height
self.role = role or accessible.getRole()
self._words = []
def __str__(self):
return "ZONE: '%s' %s" % (self._string.replace("\n", "\\n"), self.accessible)
def __getattribute__(self, attr):
"""To ensure we update the content."""
if attr not in ["words", "string"]:
return super().__getattribute__(attr)
if attr == "string":
return self._string
if not self._shouldFakeText():
return self._words
# TODO - JD: For now, don't fake character and word extents.
# The main goal is to improve reviewability.
extents = self.x, self.y, self.width, self.height
words = []
for i, word in enumerate(re.finditer(self.WORDS_RE, self._string)):
words.append(Word(self, i, word.start(), word.group(), *extents))
self._words = words
return words
def _shouldFakeText(self):
"""Returns True if we should try to fake the text interface"""
textRoles = [pyatspi.ROLE_LABEL,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TABLE_CELL]
if self.role in textRoles:
return True
return False
def _extentsAreOnSameLine(self, zone, pixelDelta=5):
"""Returns True if this Zone is physically on the same line as zone."""
if self.width == 0 and self.height == 0:
return zone.y <= self.y <= zone.y + zone.height
if zone.width == 0 and self.height == 0:
return self.y <= zone.y <= self.y + self.height
highestBottom = min(self.y + self.height, zone.y + zone.height)
lowestTop = max(self.y, zone.y)
if lowestTop >= highestBottom:
return False
middle = self.y + self.height / 2
zoneMiddle = zone.y + zone.height / 2
if abs(middle - zoneMiddle) > pixelDelta:
return False
return True
def onSameLine(self, zone):
"""Returns True if we treat this Zone and zone as being on one line."""
if pyatspi.ROLE_SCROLL_BAR in [self.role, zone.role]:
return self.accessible == zone.accessible
try:
thisParentRole = self.accessible.parent.getRole()
zoneParentRole = zone.accessible.parent.getRole()
except:
pass
else:
if pyatspi.ROLE_MENU_BAR in [thisParentRole, zoneParentRole]:
return self.accessible.parent == zone.accessible.parent
return self._extentsAreOnSameLine(zone)
def getWordAtOffset(self, charOffset):
msg = "FLAT REVIEW: Searching for word at offset %i" % charOffset
debug.println(debug.LEVEL_INFO, msg, True)
for word in self.words:
msg = "FLAT REVIEW: Checking %s" % word
debug.println(debug.LEVEL_INFO, msg, True)
offset = word.getRelativeOffset(charOffset)
if offset >= 0:
return word, offset
if self.length == charOffset and self.words:
lastWord = self.words[-1]
return lastWord, lastWord.length
return None, -1
def hasCaret(self):
"""Returns True if this Zone contains the caret."""
return False
def wordWithCaret(self):
"""Returns the Word and relative offset with the caret."""
return None, -1
class TextZone(Zone):
"""A Zone whose purpose is to display text of an object."""
def __init__(self, accessible, startOffset, string, x, y, width, height, role=None):
super().__init__(accessible, string, x, y, width, height, role)
self.startOffset = startOffset
self.endOffset = self.startOffset + len(string)
self._itext = self.accessible.queryText()
def __getattribute__(self, attr):
"""To ensure we update the content."""
if not attr in ["words", "string"]:
return super().__getattribute__(attr)
string = self._itext.getText(self.startOffset, self.endOffset)
words = []
for i, word in enumerate(re.finditer(self.WORDS_RE, string)):
start, end = map(lambda x: x + self.startOffset, word.span())
extents = self._itext.getRangeExtents(start, end, pyatspi.DESKTOP_COORDS)
words.append(Word(self, i, start, word.group(), *extents))
self._string = string
self._words = words
return super().__getattribute__(attr)
def hasCaret(self):
"""Returns True if this Zone contains the caret."""
offset = self._itext.caretOffset
if self.startOffset <= offset < self.endOffset:
return True
return self.endOffset == self._itext.characterCount
def wordWithCaret(self):
"""Returns the Word and relative offset with the caret."""
if not self.hasCaret():
return None, -1
return self.getWordAtOffset(self._itext.caretOffset)
class StateZone(Zone):
"""A Zone whose purpose is to display the state of an object."""
def __init__(self, accessible, x, y, width, height, role=None):
super().__init__(accessible, "", x, y, width, height, role)
def __getattribute__(self, attr):
"""To ensure we update the state."""
if attr not in ["string", "brailleString"]:
return super().__getattribute__(attr)
if attr == "string":
generator = orca_state.activeScript.speechGenerator
else:
generator = orca_state.activeScript.brailleGenerator
result = generator.getStateIndicator(self.accessible, role=self.role)
if result:
return result[0]
return ""
class ValueZone(Zone):
"""A Zone whose purpose is to display the value of an object."""
def __init__(self, accessible, x, y, width, height, role=None):
super().__init__(accessible, "", x, y, width, height, role)
def __getattribute__(self, attr):
"""To ensure we update the value."""
if attr not in ["string", "brailleString"]:
return super().__getattribute__(attr)
if attr == "string":
generator = orca_state.activeScript.speechGenerator
else:
generator = orca_state.activeScript.brailleGenerator
result = ""
# TODO - JD: This cobbling together beats what we had, but the
# generators should also be doing the assembly.
rolename = generator.getLocalizedRoleName(self.accessible)
value = generator.getValue(self.accessible)
if rolename and value:
result = "%s %s" % (rolename, value[0])
return result
class Line:
"""A Line is a single line across a window and is composed of Zones."""
def __init__(self,
index,
zones):
"""Creates a new Line, which is a horizontal region of text.
Arguments:
- index: the index of this Line in the window
- zones: the Zones that make up this line
"""
self.index = index
self.zones = zones
self.brailleRegions = None
def __getattribute__(self, attr):
if attr == "string":
return " ".join([zone.string for zone in self.zones])
if attr == "x":
return min([zone.x for zone in self.zones])
if attr == "y":
return min([zone.y for zone in self.zones])
if attr == "width":
return sum([zone.width for zone in self.zones])
if attr == "height":
return max([zone.height for zone in self.zones])
return super().__getattribute__(attr)
def getBrailleRegions(self):
# [[[WDW - We'll always compute the braille regions. This
# allows us to handle StateZone and ValueZone zones whose
# states might be changing on us.]]]
#
if True or not self.brailleRegions:
self.brailleRegions = []
brailleOffset = 0
for zone in self.zones:
# The 'isinstance(zone, TextZone)' test is a sanity check
# to handle problems with Java text. See Bug 435553.
if isinstance(zone, TextZone) and \
((zone.accessible.getRole() in \
(pyatspi.ROLE_TEXT,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_TERMINAL)) or \
# [[[TODO: Eitan - HACK:
# This is just to get FF3 cursor key routing support.
# We really should not be determining all this stuff here,
# it should be in the scripts.
# Same applies to roles above.]]]
(zone.accessible.getRole() in \
(pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_HEADING,
pyatspi.ROLE_LINK))):
region = braille.ReviewText(zone.accessible,
zone.string,
zone.startOffset,
zone)
else:
try:
brailleString = zone.brailleString
except:
brailleString = zone.string
region = braille.ReviewComponent(zone.accessible,
brailleString,
0, # cursor offset
zone)
if len(self.brailleRegions):
pad = braille.Region(" ")
pad.brailleOffset = brailleOffset
self.brailleRegions.append(pad)
brailleOffset += 1
zone.brailleRegion = region
region.brailleOffset = brailleOffset
self.brailleRegions.append(region)
regionString = region.string
brailleOffset += len(regionString)
if not settings.disableBrailleEOL:
if len(self.brailleRegions):
pad = braille.Region(" ")
pad.brailleOffset = brailleOffset
self.brailleRegions.append(pad)
brailleOffset += 1
eol = braille.Region("$l")
eol.brailleOffset = brailleOffset
self.brailleRegions.append(eol)
return self.brailleRegions
class Context:
"""Contains the flat review regions for the current top-level object."""
ZONE = 0
CHAR = 1
WORD = 2
LINE = 3 # includes all zones on same line
WINDOW = 4
WRAP_NONE = 0
WRAP_LINE = 1 << 0
WRAP_TOP_BOTTOM = 1 << 1
WRAP_ALL = (WRAP_LINE | WRAP_TOP_BOTTOM)
def __init__(self, script):
"""Create a new Context for script."""
self.script = script
self.zones = []
self.lines = []
self.lineIndex = 0
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
self.targetCharInfo = None
self.focusZone = None
self.container = None
self.focusObj = orca_state.locusOfFocus
self.topLevel = script.utilities.topLevelObject(self.focusObj)
self.bounds = 0, 0, 0, 0
try:
component = self.topLevel.queryComponent()
self.bounds = component.getExtents(pyatspi.DESKTOP_COORDS)
except:
msg = "ERROR: Exception getting extents of %s" % self.topLevel
debug.println(debug.LEVEL_INFO, msg, True)
containerRoles = [pyatspi.ROLE_MENU]
isContainer = lambda x: x and x.getRole() in containerRoles
container = pyatspi.findAncestor(self.focusObj, isContainer)
if not container and isContainer(self.focusObj):
container = self.focusObj
self.container = container or self.topLevel
self.zones, self.focusZone = self.getShowingZones(self.container)
self.lines = self.clusterZonesByLine(self.zones)
if not (self.lines and self.focusZone):
return
for i, line in enumerate(self.lines):
if self.focusZone in line.zones:
self.lineIndex = i
self.zoneIndex = line.zones.index(self.focusZone)
word, offset = self.focusZone.wordWithCaret()
if word:
self.wordIndex = word.index
self.charIndex = offset
break
msg = "FLAT REVIEW: On line %i, zone %i, word %i, char %i" % \
(self.lineIndex, self.zoneIndex, self.wordIndex, self.charIndex)
debug.println(debug.LEVEL_INFO, msg, True)
def splitTextIntoZones(self, accessible, string, startOffset, cliprect):
"""Traverses the string, splitting it up into separate zones if the
string contains the EMBEDDED_OBJECT_CHARACTER, which is used by apps
such as Firefox to handle containment of things such as links in
paragraphs.
Arguments:
- accessible: the accessible
- string: a substring from the accessible's text specialization
- startOffset: the starting character offset of the string
- cliprect: the extents that the Zones must fit inside.
Returns a list of Zones for the visible text.
"""
zones = []
substrings = [(*m.span(), m.group(0)) for m in re.finditer(r"[^\ufffc]+", string)]
substrings = list(map(lambda x: (x[0] + startOffset, x[1] + startOffset, x[2]), substrings))
for (start, end, substring) in substrings:
extents = accessible.queryText().getRangeExtents(start, end, pyatspi.DESKTOP_COORDS)
if self.script.utilities.containsRegion(extents, cliprect):
clipping = self.script.utilities.intersection(extents, cliprect)
zones.append(TextZone(accessible, start, substring, *clipping))
return zones
def _getLines(self, accessible, startOffset, endOffset):
# TODO - JD: Move this into the script utilities so we can better handle
# app and toolkit quirks and also reuse this (e.g. for SayAll).
try:
text = accessible.queryText()
except NotImplementedError:
return []
lines = []
offset = startOffset
while offset < min(endOffset, text.characterCount):
result = text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_LINE_START)
if result[0] and result not in lines:
lines.append(result)
offset = max(result[2], offset + 1)
return lines
def getZonesFromText(self, accessible, cliprect):
"""Gets a list of Zones from an object that implements the
AccessibleText specialization.
Arguments:
- accessible: the accessible
- cliprect: the extents that the Zones must fit inside.
Returns a list of Zones.
"""
if not self.script.utilities.hasPresentableText(accessible):
return []
zones = []
text = accessible.queryText()
# TODO - JD: This is here temporarily whilst I sort out the rest
# of the text-related mess.
if "EditableText" in pyatspi.listInterfaces(accessible) \
and accessible.getState().contains(pyatspi.STATE_SINGLE_LINE):
extents = accessible.queryComponent().getExtents(0)
return [TextZone(accessible, 0, text.getText(0, -1), *extents)]
offset = 0
lastEndOffset = -1
upperMax = lowerMax = text.characterCount
upperMid = lowerMid = int(upperMax / 2)
upperMin = lowerMin = 0
upperY = lowerY = 0
oldMid = 0
# performing binary search to locate first line inside clipped area
while oldMid != upperMid:
oldMid = upperMid
[x, y, width, height] = text.getRangeExtents(upperMid,
upperMid+1,
0)
upperY = y
if y > cliprect.y:
upperMax = upperMid
else:
upperMin = upperMid
upperMid = int((upperMax - upperMin) / 2) + upperMin
# performing binary search to locate last line inside clipped area
oldMid = 0
limit = cliprect.y+cliprect.height
while oldMid != lowerMid:
oldMid = lowerMid
[x, y, width, height] = text.getRangeExtents(lowerMid,
lowerMid+1,
0)
lowerY = y
if y > limit:
lowerMax = lowerMid
else:
lowerMin = lowerMid
lowerMid = int((lowerMax - lowerMin) / 2) + lowerMin
msg = "FLAT REVIEW: Getting lines for %s offsets %i-%i" % (accessible, upperMin, lowerMax)
debug.println(debug.LEVEL_INFO, msg, True)
lines = self._getLines(accessible, upperMin, lowerMax)
msg = "FLAT REVIEW: %i lines found for %s" % (len(lines), accessible)
debug.println(debug.LEVEL_INFO, msg, True)
for string, startOffset, endOffset in lines:
zones.extend(self.splitTextIntoZones(accessible, string, startOffset, cliprect))
return zones
def _insertStateZone(self, zones, accessible, extents):
"""If the accessible presents non-textual state, such as a
checkbox or radio button, insert a StateZone representing
that state."""
# TODO - JD: This whole thing is pretty hacky. Either do it
# right or nuke it.
indicatorExtents = [extents.x, extents.y, 1, extents.height]
role = accessible.getRole()
if role == pyatspi.ROLE_TOGGLE_BUTTON:
zone = StateZone(accessible, *indicatorExtents, role=role)
if zone:
zones.insert(0, zone)
return
if role == pyatspi.ROLE_TABLE_CELL \
and self.script.utilities.hasMeaningfulToggleAction(accessible):
role = pyatspi.ROLE_CHECK_BOX
if role not in [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_RADIO_MENU_ITEM]:
return
zone = None
stateOnLeft = True
if len(zones) == 1 and isinstance(zones[0], TextZone):
textZone = zones[0]
textToLeftEdge = textZone.x - extents.x
textToRightEdge = (extents.x + extents.width) - (textZone.x + textZone.width)
stateOnLeft = textToLeftEdge > 20
if stateOnLeft:
indicatorExtents[2] = textToLeftEdge
else:
indicatorExtents[0] = textZone.x + textZone.width
indicatorExtents[2] = textToRightEdge
zone = StateZone(accessible, *indicatorExtents, role=role)
if zone:
if stateOnLeft:
zones.insert(0, zone)
else:
zones.append(zone)
def getZonesFromAccessible(self, accessible, cliprect):
"""Returns a list of Zones for the given accessible."""
try:
component = accessible.queryComponent()
extents = component.getExtents(pyatspi.DESKTOP_COORDS)
except:
return []
try:
role = accessible.getRole()
except:
return []
zones = self.getZonesFromText(accessible, cliprect)
if not zones and role in [pyatspi.ROLE_SCROLL_BAR,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_PROGRESS_BAR]:
zones.append(ValueZone(accessible, *extents))
elif not zones:
string = ""
redundant = [pyatspi.ROLE_TABLE_ROW]
if role not in redundant:
string = self.script.speechGenerator.getName(accessible, inFlatReview=True)
useless = [pyatspi.ROLE_TABLE_CELL, pyatspi.ROLE_LABEL]
if not string and role not in useless:
string = self.script.speechGenerator.getRoleName(accessible)
if string:
zones.append(Zone(accessible, string, *extents))
self._insertStateZone(zones, accessible, extents)
return zones
def _isOrIsIn(self, child, parent):
if not (child and parent):
return False
if child == parent:
return True
return pyatspi.findAncestor(child, lambda x: x == parent)
def getShowingZones(self, root, boundingbox=None):
"""Returns an unsorted list of all the zones under root and the focusZone."""
if boundingbox is None:
boundingbox = self.bounds
objs = self.script.utilities.getOnScreenObjects(root, boundingbox)
msg = "FLAT REVIEW: %i on-screen objects found for %s" % (len(objs), root)
debug.println(debug.LEVEL_INFO, msg, True)
allZones, focusZone = [], None
for o in objs:
zones = self.getZonesFromAccessible(o, boundingbox)
if not zones:
descendant = self.script.utilities.realActiveDescendant(o)
if descendant:
zones = self.getZonesFromAccessible(descendant, boundingbox)
if not zones:
continue
allZones.extend(zones)
if not focusZone and zones and self.focusObj and self._isOrIsIn(o, self.focusObj):
zones = list(filter(lambda z: z.hasCaret(), zones)) or zones
focusZone = zones[0]
msg = "FLAT REVIEW: %i zones found for %s" % (len(allZones), root)
debug.println(debug.LEVEL_INFO, msg, True)
return allZones, focusZone
def clusterZonesByLine(self, zones):
"""Returns a sorted list of Line clusters containing sorted Zones."""
if not zones:
return []
lineClusters = []
sortedZones = sorted(zones, key=lambda z: z.y)
newCluster = [sortedZones.pop(0)]
for zone in sortedZones:
if zone.onSameLine(newCluster[-1]):
newCluster.append(zone)
else:
lineClusters.append(sorted(newCluster, key=lambda z: z.x))
newCluster = [zone]
if newCluster:
lineClusters.append(sorted(newCluster, key=lambda z: z.x))
lines = []
for lineIndex, lineCluster in enumerate(lineClusters):
lines.append(Line(lineIndex, lineCluster))
for zoneIndex, zone in enumerate(lineCluster):
zone.line = lines[lineIndex]
zone.index = zoneIndex
msg = "FLAT REVIEW: Zones clustered into %i lines" % len(lines)
debug.println(debug.LEVEL_INFO, msg, True)
return lines
def getCurrent(self, flatReviewType=ZONE):
"""Returns the current string, offset, and extent information."""
# TODO - JD: This method has not (yet) been renamed. But we have a
# getter and setter which do totally different things....
zone = self._getCurrentZone()
if not zone:
return None, -1, -1, -1, -1
current = zone
if flatReviewType == Context.LINE:
current = zone.line
elif zone.words:
current = zone.words[self.wordIndex]
if flatReviewType == Context.CHAR and current.chars:
try:
current = current.chars[self.charIndex]
except:
return None, -1, -1, -1, -1
return current.string, current.x, current.y, current.width, current.height
def setCurrent(self, lineIndex, zoneIndex, wordIndex, charIndex):
"""Sets the current character of interest.
Arguments:
- lineIndex: index into lines
- zoneIndex: index into lines[lineIndex].zones
- wordIndex: index into lines[lineIndex].zones[zoneIndex].words
- charIndex: index lines[lineIndex].zones[zoneIndex].words[wordIndex].chars
"""
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
self.targetCharInfo = self.getCurrent(Context.CHAR)
def _getClickPoint(self):
string, x, y, width, height = self.getCurrent(Context.CHAR)
if (x < 0 and y < 0) or (width <= 0 and height <=0):
return -1, -1
# Click left of center to position the caret there.
x = int(max(x, x + (width / 2) - 1))
y = int(y + height / 2)
return x, y
def routeToCurrent(self):
"""Routes the mouse pointer to the current accessible."""
x, y = self._getClickPoint()
if x < 0 or y < 0:
return False
return eventsynthesizer.routeToPoint(x, y)
def clickCurrent(self, button=1):
"""Performs a mouse click on the current accessible."""
x, y = self._getClickPoint()
if x >= 0 and y >= 0 and eventsynthesizer.clickPoint(x, y, button):
return True
if eventsynthesizer.clickObject(self.getCurrentAccessible(), button):
return True
return False
def _getCurrentZone(self):
if not (self.lines and 0 <= self.lineIndex < len(self.lines)):
return None
line = self.lines[self.lineIndex]
if not (line and 0 <= self.zoneIndex < len(line.zones)):
return None
return line.zones[self.zoneIndex]
def getCurrentAccessible(self):
"""Returns the current accessible."""
zone = self._getCurrentZone()
if not zone:
return None
return zone.accessible
def getCurrentBrailleRegions(self):
"""Gets the braille for the entire current line.
Returns [regions, regionWithFocus]
"""
if (not self.lines) \
or (not self.lines[self.lineIndex].zones):
return [None, None]
regionWithFocus = None
line = self.lines[self.lineIndex]
regions = line.getBrailleRegions()
# Now find the current region and the current character offset
# into that region.
#
for zone in line.zones:
if zone.index == self.zoneIndex:
regionWithFocus = zone.brailleRegion
regionWithFocus.cursorOffset = 0
if zone.words:
regionWithFocus.cursorOffset += zone.words[0].startOffset - zone.startOffset
for wordIndex in range(0, self.wordIndex):
regionWithFocus.cursorOffset += \
len(zone.words[wordIndex].string)
regionWithFocus.cursorOffset += self.charIndex
regionWithFocus.repositionCursor()
break
return [regions, regionWithFocus]
def goBegin(self, flatReviewType=WINDOW):
"""Moves this context's locus of interest to the first char
of the first relevant zone.
Arguments:
- flatReviewType: one of ZONE, LINE or WINDOW
Returns True if the locus of interest actually changed.
"""
if (flatReviewType == Context.LINE) or (flatReviewType == Context.ZONE):
lineIndex = self.lineIndex
elif flatReviewType == Context.WINDOW:
lineIndex = 0
else:
raise Exception("Invalid type: %d" % flatReviewType)
if flatReviewType == Context.ZONE:
zoneIndex = self.zoneIndex
else:
zoneIndex = 0
wordIndex = 0
charIndex = 0
moved = (self.lineIndex != lineIndex) \
or (self.zoneIndex != zoneIndex) \
or (self.wordIndex != wordIndex) \
or (self.charIndex != charIndex) \
if moved:
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
self.targetCharInfo = self.getCurrent(Context.CHAR)
return moved
def goEnd(self, flatReviewType=WINDOW):
"""Moves this context's locus of interest to the last char
of the last relevant zone.
Arguments:
- flatReviewType: one of ZONE, LINE, or WINDOW
Returns True if the locus of interest actually changed.
"""
if (flatReviewType == Context.LINE) or (flatReviewType == Context.ZONE):
lineIndex = self.lineIndex
elif flatReviewType == Context.WINDOW:
lineIndex = len(self.lines) - 1
else:
raise Exception("Invalid type: %d" % flatReviewType)
if flatReviewType == Context.ZONE:
zoneIndex = self.zoneIndex
else:
zoneIndex = len(self.lines[lineIndex].zones) - 1
zone = self.lines[lineIndex].zones[zoneIndex]
if zone.words:
wordIndex = len(zone.words) - 1
chars = zone.words[wordIndex].chars
if chars:
charIndex = len(chars) - 1
else:
charIndex = 0
else:
wordIndex = 0
charIndex = 0
moved = (self.lineIndex != lineIndex) \
or (self.zoneIndex != zoneIndex) \
or (self.wordIndex != wordIndex) \
or (self.charIndex != charIndex) \
if moved:
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
self.targetCharInfo = self.getCurrent(Context.CHAR)
return moved
def goPrevious(self, flatReviewType=ZONE,
wrap=WRAP_ALL, omitWhitespace=True):
"""Moves this context's locus of interest to the first char
of the previous type.
Arguments:
- flatReviewType: one of ZONE, CHAR, WORD, LINE
- wrap: if True, will cross boundaries, including top and
bottom; if False, will stop on boundaries.
Returns True if the locus of interest actually changed.
"""
if not self.lines:
debug.println(debug.LEVEL_FINE, 'goPrevious(): no lines in context')
return False
moved = False
if flatReviewType == Context.ZONE:
if self.zoneIndex > 0:
self.zoneIndex -= 1
self.wordIndex = 0
self.charIndex = 0
moved = True
elif wrap & Context.WRAP_LINE:
if self.lineIndex > 0:
self.lineIndex -= 1
self.zoneIndex = len(self.lines[self.lineIndex].zones) - 1
self.wordIndex = 0
self.charIndex = 0
moved = True
elif wrap & Context.WRAP_TOP_BOTTOM:
self.lineIndex = len(self.lines) - 1
self.zoneIndex = len(self.lines[self.lineIndex].zones) - 1
self.wordIndex = 0
self.charIndex = 0
moved = True
elif flatReviewType == Context.CHAR:
if self.charIndex > 0:
self.charIndex -= 1
moved = True
else:
moved = self.goPrevious(Context.WORD, wrap, False)
if moved:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if zone.words:
chars = zone.words[self.wordIndex].chars
if chars:
self.charIndex = len(chars) - 1
elif flatReviewType == Context.WORD:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
accessible = zone.accessible
lineIndex = self.lineIndex
zoneIndex = self.zoneIndex
wordIndex = self.wordIndex
charIndex = self.charIndex
if self.wordIndex > 0:
self.wordIndex -= 1
self.charIndex = 0
moved = True
else:
moved = self.goPrevious(Context.ZONE, wrap)
if moved:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if zone.words:
self.wordIndex = len(zone.words) - 1
# If we landed on a whitespace word or something with no words,
# we might need to move some more.
#
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if omitWhitespace \
and moved \
and ((len(zone.string) == 0) \
or (len(zone.words) \
and zone.words[self.wordIndex].string.isspace())):
hasMoreText = False
if self.lineIndex > 0 and isinstance(zone, TextZone):
prevZone = self.lines[self.lineIndex - 1].zones[-1]
if prevZone.accessible == zone.accessible:
hasMoreText = True
# If we're on whitespace in the same zone, then let's
# try to move on. If not, we've definitely moved
# across accessibles. If that's the case, let's try
# to find the first 'real' word in the accessible.
# If we cannot, then we're just stuck on an accessible
# with no words and we should do our best to announce
# this to the user (e.g., "whitespace" or "blank").
#
if zone.accessible == accessible or hasMoreText:
moved = self.goPrevious(Context.WORD, wrap)
else:
wordIndex = self.wordIndex - 1
while wordIndex >= 0:
if (not zone.words[wordIndex].string) \
or not len(zone.words[wordIndex].string) \
or zone.words[wordIndex].string.isspace():
wordIndex -= 1
else:
break
if wordIndex >= 0:
self.wordIndex = wordIndex
if not moved:
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
elif flatReviewType == Context.LINE:
if wrap & Context.WRAP_LINE:
if self.lineIndex > 0:
self.lineIndex -= 1
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
elif (wrap & Context.WRAP_TOP_BOTTOM) \
and (len(self.lines) != 1):
self.lineIndex = len(self.lines) - 1
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
else:
raise Exception("Invalid type: %d" % flatReviewType)
if moved and (flatReviewType != Context.LINE):
self.targetCharInfo = self.getCurrent(Context.CHAR)
return moved
def goNext(self, flatReviewType=ZONE, wrap=WRAP_ALL, omitWhitespace=True):
"""Moves this context's locus of interest to first char of
the next type.
Arguments:
- flatReviewType: one of ZONE, CHAR, WORD, LINE
- wrap: if True, will cross boundaries, including top and
bottom; if False, will stop on boundaries.
"""
if not self.lines:
debug.println(debug.LEVEL_FINE, 'goNext(): no lines in context')
return False
moved = False
if flatReviewType == Context.ZONE:
if self.zoneIndex < (len(self.lines[self.lineIndex].zones) - 1):
self.zoneIndex += 1
self.wordIndex = 0
self.charIndex = 0
moved = True
elif wrap & Context.WRAP_LINE:
if self.lineIndex < (len(self.lines) - 1):
self.lineIndex += 1
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
braille.clear()
elif wrap & Context.WRAP_TOP_BOTTOM:
self.lineIndex = 0
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
braille.clear()
elif flatReviewType == Context.CHAR:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if zone.words:
chars = zone.words[self.wordIndex].chars
if chars:
if self.charIndex < (len(chars) - 1):
self.charIndex += 1
moved = True
else:
moved = self.goNext(Context.WORD, wrap, False)
else:
moved = self.goNext(Context.WORD, wrap)
else:
moved = self.goNext(Context.ZONE, wrap)
elif flatReviewType == Context.WORD:
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
accessible = zone.accessible
lineIndex = self.lineIndex
zoneIndex = self.zoneIndex
wordIndex = self.wordIndex
charIndex = self.charIndex
if zone.words:
if self.wordIndex < (len(zone.words) - 1):
self.wordIndex += 1
self.charIndex = 0
moved = True
else:
moved = self.goNext(Context.ZONE, wrap)
else:
moved = self.goNext(Context.ZONE, wrap)
# If we landed on a whitespace word or something with no words,
# we might need to move some more.
#
zone = self.lines[self.lineIndex].zones[self.zoneIndex]
if omitWhitespace \
and moved \
and ((len(zone.string) == 0) \
or (len(zone.words) \
and zone.words[self.wordIndex].string.isspace())):
# If we're on whitespace in the same zone, then let's
# try to move on. If not, we've definitely moved
# across accessibles. If that's the case, let's try
# to find the first 'real' word in the accessible.
# If we cannot, then we're just stuck on an accessible
# with no words and we should do our best to announce
# this to the user (e.g., "whitespace" or "blank").
#
if zone.accessible == accessible:
moved = self.goNext(Context.WORD, wrap)
else:
wordIndex = self.wordIndex + 1
while wordIndex < len(zone.words):
if (not zone.words[wordIndex].string) \
or not len(zone.words[wordIndex].string) \
or zone.words[wordIndex].string.isspace():
wordIndex += 1
else:
break
if wordIndex < len(zone.words):
self.wordIndex = wordIndex
if not moved:
self.lineIndex = lineIndex
self.zoneIndex = zoneIndex
self.wordIndex = wordIndex
self.charIndex = charIndex
elif flatReviewType == Context.LINE:
if wrap & Context.WRAP_LINE:
if self.lineIndex < (len(self.lines) - 1):
self.lineIndex += 1
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
elif (wrap & Context.WRAP_TOP_BOTTOM) \
and (self.lineIndex != 0):
self.lineIndex = 0
self.zoneIndex = 0
self.wordIndex = 0
self.charIndex = 0
moved = True
else:
raise Exception("Invalid type: %d" % flatReviewType)
if moved and (flatReviewType != Context.LINE):
self.targetCharInfo = self.getCurrent(Context.CHAR)
return moved
def goAbove(self, flatReviewType=LINE, wrap=WRAP_ALL):
"""Moves this context's locus of interest to first char
of the type that's closest to and above the current locus of
interest.
Arguments:
- flatReviewType: LINE
- wrap: if True, will cross top/bottom boundaries; if False, will
stop on top/bottom boundaries.
Returns: [string, startOffset, endOffset, x, y, width, height]
"""
moved = False
if flatReviewType == Context.CHAR:
# We want to shoot for the closest character, which we've
# saved away as self.targetCharInfo, which is the list
# [string, x, y, width, height].
#
if not self.targetCharInfo:
self.targetCharInfo = self.getCurrent(Context.CHAR)
target = self.targetCharInfo
[string, x, y, width, height] = target
middleTargetX = x + (width / 2)
moved = self.goPrevious(Context.LINE, wrap)
if moved:
while True:
[string, bx, by, bwidth, bheight] = \
self.getCurrent(Context.CHAR)
if (bx + width) >= middleTargetX:
break
elif not self.goNext(Context.CHAR, Context.WRAP_NONE):
break
# Moving around might have reset the current targetCharInfo,
# so we reset it to our saved value.
#
self.targetCharInfo = target
elif flatReviewType == Context.LINE:
return self.goPrevious(flatReviewType, wrap)
else:
raise Exception("Invalid type: %d" % flatReviewType)
return moved
def goBelow(self, flatReviewType=LINE, wrap=WRAP_ALL):
"""Moves this context's locus of interest to the first
char of the type that's closest to and below the current
locus of interest.
Arguments:
- flatReviewType: one of WORD, LINE
- wrap: if True, will cross top/bottom boundaries; if False, will
stop on top/bottom boundaries.
Returns: [string, startOffset, endOffset, x, y, width, height]
"""
moved = False
if flatReviewType == Context.CHAR:
# We want to shoot for the closest character, which we've
# saved away as self.targetCharInfo, which is the list
# [string, x, y, width, height].
#
if not self.targetCharInfo:
self.targetCharInfo = self.getCurrent(Context.CHAR)
target = self.targetCharInfo
[string, x, y, width, height] = target
middleTargetX = x + (width / 2)
moved = self.goNext(Context.LINE, wrap)
if moved:
while True:
[string, bx, by, bwidth, bheight] = \
self.getCurrent(Context.CHAR)
if (bx + width) >= middleTargetX:
break
elif not self.goNext(Context.CHAR, Context.WRAP_NONE):
break
# Moving around might have reset the current targetCharInfo,
# so we reset it to our saved value.
#
self.targetCharInfo = target
elif flatReviewType == Context.LINE:
moved = self.goNext(flatReviewType, wrap)
else:
raise Exception("Invalid type: %d" % flatReviewType)
return moved
| GNOME/orca | src/orca/flat_review.py | Python | lgpl-2.1 | 49,319 |
#!/usr/bin/env python3
import sys
from TimeFileMaker import *
if __name__ == '__main__':
USAGE = 'Usage: %s FILE_NAME [OUTPUT_FILE_NAME ..]' % sys.argv[0]
HELP_STRING = r'''Formats timing information from the output of `make TIMED=1` into a sorted table.
The input is expected to contain lines in the format:
FILE_NAME (...user: NUMBER_IN_SECONDS...)
'''
if len(sys.argv) < 2 or '--help' in sys.argv[1:] or '-h' in sys.argv[1:]:
print(USAGE)
if '--help' in sys.argv[1:] or '-h' in sys.argv[1:]:
print(HELP_STRING)
if len(sys.argv) == 2: sys.exit(0)
sys.exit(1)
else:
times_dict = get_times(sys.argv[1])
table = make_table_string(times_dict)
print_or_write_table(table, sys.argv[2:])
| Matafou/coq | tools/make-one-time-file.py | Python | lgpl-2.1 | 773 |
# coding=utf8
#
# Copyright 2013 Dreamlab Onet.pl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 3.0.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, visit
#
# http://www.gnu.org/licenses/lgpl.txt
#
class RmockError(Exception):
pass
class RmockStartError(RmockError):
pass
class RmockParamsError(RmockError):
pass
class InvalidFunction(RmockError):
pass
| tikan/rmock | src/rmock/errors.py | Python | lgpl-3.0 | 832 |
"""
Unit tests for the enharmony.artist module.
"""
import unittest
from enharmony.artist import Artist
@unittest.skip("not implemented")
class TestParsing(unittest.TestCase): # pylint: disable=R0904
"""Tests for parsing artists."""
def test_nominal(self):
"""Verify a normal artist can be parsed."""
artist = Artist("The Something")
self.assertEqual("The Something", artist.name)
@unittest.skip("not implemented")
class TestFormatting(unittest.TestCase): # pylint: disable=R0904
"""Tests for formatting artists."""
def test_nominal(self):
"""Verify a normal artist can be formatted."""
artist = Artist("The Something")
self.assertEqual("The Something", str(artist))
self.assertEqual(artist, eval(repr(artist)))
@unittest.skip("not implemented")
class TestEquality(unittest.TestCase): # pylint: disable=R0904
"""Tests for artist equality."""
def test_exact(self):
"""Verify exact artist name matches are equal."""
self.assertEqual(Artist("Artist Name"), Artist("Artist Name"))
def test_case(self):
"""Verify artist name case does not matter."""
self.assertEqual(Artist("Artist Name"), Artist("Artist name"))
def test_ands(self):
"""Verify artist "and" operators do not matter."""
self.assertEqual(Artist("Artist + Others"), Artist("Artist & others"))
def test_and_order(self):
"""Verify order of multiple artists does not matter."""
self.assertEqual(Artist("Artist + Others"), Artist("Others & Artist"))
@unittest.skip("not implemented")
class TestInequality(unittest.TestCase): # pylint: disable=R0904
"""Tests for artist inequality."""
def test_types(self):
"""Verify different types are not equal."""
self.assertNotEqual(Artist("Name"), "Name")
def test_different(self):
"""Verify different artist names are not equal."""
self.assertNotEqual(Artist("Artist A"), Artist("Artist B"))
if __name__ == '__main__':
unittest.main()
| jacebrowning/enharmony | enharmony/test/test_artist.py | Python | lgpl-3.0 | 2,056 |
from instances import *
from read_instances import *
from lockfile import FileLock
import cPickle as pickle
data = readall()
alldata = []
for d in data[:1]:
H = d["H"]
nlp = createNLP(DMatrix([[1.2,0.3],[0.7,1.3]]),DMatrix([[0.2,0.4],[0.77,0.12]],lift=True,simple=False)
log = []
dists = []
nlpsolver = SnoptSolver(nlp)
#nlpsolver.setOption("tol",1e-12)
nlpsolver.setOption("gather_stats",True)
nlpsolver.setOption("_feasibility_tolerance",1e-12)
nlpsolver.setOption("_optimality_tolerance",1e-12)
nlpsolver.setOption("_major_iteration_limit",3000)
nlpsolver.setOption("detect_linear",True)
#nlpsolver.setOption("max_iter",3000)
nlpsolver.init()
nlpsolver.setInput(1e-5,"x0")
bs_ = mul(d["problem"]["Bs"][0],1e-5*DMatrix.ones(2,2))
nlpsolver.input("x0")[-bs_.size():] = vec(bs_)
nlpsolver.setInput(0,"lbg")
nlpsolver.setInput(0,"ubg")
nlpsolver.evaluate()
print nlpsolver.getStats()
alldata.append({"f": nlpsolver.output("f"),"x":nlpsolver.output("x"), "stats": nlpsolver.getStats()})
| ghorn/debian-casadi | casadi/interfaces/snopt/snopt_bug.py | Python | lgpl-3.0 | 1,067 |
import numpy as np
import cv2
import random
from multiprocessing.pool import ThreadPool
def adjust_img(img):
# a = np.random.randint(2)
# if a==1:
# img = np.flip(img, axis=1)
return img
def process(batch, max_label):
# add more process here
imgs, labels = list(zip(*batch))
imgs = [cv2.imread(i) for i in imgs]
imgs = [adjust_img(i) for i in imgs]
labels = np.eye(max_label+1)[np.array(labels)]
batch = [np.float32(imgs), np.float32(labels)]
return batch
class DataReader():
def __init__(self, listfile, bsize):
f = open(listfile, 'r')
self.data = []
print('Reading text file...')
max_label = 0
for line in f:
line = line.strip().split('\t')
img = line[1]
label = int(line[2])
if label>max_label:
max_label = label
self.data.append([img, label])
random.shuffle(self.data)
print('Finished')
self.pos = 0
self.epoch = 0
self.bsize = bsize
self.max_label = label
self.iter_per_epoch = len(self.data)//self.bsize
self.pool = ThreadPool(processes=1)
self.prefetch()
print('max_label:',max_label)
def prefetch(self):
if self.pos + self.bsize > len(self.data):
self.pos = 0
self.epoch += 1
random.shuffle(self.data)
batch = self.data[self.pos: self.pos+self.bsize]
self.p = self.pool.apply_async(process, args=(batch, self.max_label))
self.pos += self.bsize
def get_next(self):
batch = self.p.get()
self.prefetch()
return batch
| ddddwee1/SULsT | example/FaceResNet/datareader.py | Python | lgpl-3.0 | 1,419 |
#
# MuPIF: Multi-Physics Integration Framework
# Copyright (C) 2010-2015 Borek Patzak
#
# Czech Technical University, Faculty of Civil Engineering,
# Department of Structural Mechanics, 166 29 Prague, Czech Republic
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import os
import Pyro5.api
from . import apierror
from . import mupifobject
from .dataid import DataID
from . import property
from . import field
from . import function
from . import timestep
from . import pyroutil
from . import pyrofile
from typing import Optional, Any
import time
from pydantic.dataclasses import dataclass
import logging
log = logging.getLogger()
prefix = "mupif."
type_ids = []
type_ids.extend(prefix+s for s in list(map(str, DataID)))
# Schema for metadata for Model and further passed to Workflow
ModelSchema = {
"type": "object", # Object supplies a dictionary
"properties": {
# Name: e.g. Non-stationary thermal problem, obtained automatically from getApplicationSignature()
# Name of the model (or workflow), e.g. "stationary thermal model", "steel buckling workflow"
"Name": {"type": "string"},
# ID: Unique ID of model (workflow), e.g. "Lammps", "CalculiX", "MFEM", "Buckling workflow 1"
"ID": {"type": ["string", "integer"]},
"Description": {"type": "string"},
"Version_date": {"type": "string"},
"Material": {"type": "string"}, # What material is simulated
"Manuf_process": {"type": "string"}, # Manufacturing process or in-service conditions
"Geometry": {"type": "string"}, # e.g. nanometers, 3D periodic box
"Physics": { # Corresponds to MODA Generic Physics
"type": "object",
"properties": {
# Type: MODA model type
"Type": {"type": "string", "enum": ["Electronic", "Atomistic", "Molecular", "Mesoscopic", "Continuum", "Other"]},
"Entity": {"type": "string", "enum": ["Atom", "Electron", "Grains", "Finite volume", "Other"]},
# Entity_description: E.g. Atoms are treated as spherical entities in space with the radius and mass
# determined by the element type
"Entity_description": {"type": "string"},
# Equation: List of equations' description such as Equation of motion, heat balance, mass conservation.
# MODA PHYSICS EQUATIONS
"Equation": {"type": "array"},
# Equation_quantities: e.g. Force, mass, potential, energy, stress, heat, temperature.
"Equation_quantities": {"type": "array"},
# Relation_description: Describes equilibrium of forces on an infinitesimal element, etc.
"Relation_description": {"type": "array"},
# Relation_formulation: Constitutive equation (material relation), e.g. force field, stress-strain,
# flow-gradient. MODA MATERIAL RELATIONS
"Relation_formulation": {"type": "array"}
},
"required": ["Type", "Entity"]
},
"Solver": {
"properties": {
# Software: Name of the software (e.g.openFOAM). Corresponds to MODA SOFTWARE TOOL
"Software": {"type": "string"},
"Language": {"type": "string"},
"License": {"type": "string"},
"Creator": {"type": "string"},
"Version_date": {"type": "string"},
# Type: Type e.g. finite difference method for Ordinary Differential Equations (ODEs)
# Corresponds to MODA Solver Specification NUMERICAL SOLVER attribute.
"Type": {"type": "string"},
# Solver_additional_params: Additional parameters of numerical solver, e.g. time integration scheme
"Solver_additional_params": {"type": "string"},
"Documentation": {"type": "string"}, # Where published/documented
"Estim_time_step_s": {"type": "number"}, # Seconds
"Estim_comp_time_s": {"type": "number"}, # Seconds
"Estim_execution_cost_EUR": {"type": "number"}, # EUR
"Estim_personnel_cost_EUR": {"type": "number"}, # EUR
"Required_expertise": {"type": "string", "enum": ["None", "User", "Expert"]},
"Accuracy": {"type": "string", "enum": ["Low", "Medium", "High", "Unknown"]},
"Sensitivity": {"type": "string", "enum": ["Low", "Medium", "High", "Unknown"]},
"Complexity": {"type": "string", "enum": ["Low", "Medium", "High", "Unknown"]},
"Robustness": {"type": "string", "enum": ["Low", "Medium", "High", "Unknown"]}
},
"required": [
"Software", "Language", "License", "Creator", "Version_date", "Type", "Documentation",
"Estim_time_step_s", "Estim_comp_time_s", "Estim_execution_cost_EUR", "Estim_personnel_cost_EUR",
"Required_expertise", "Accuracy", "Sensitivity", "Complexity", "Robustness"
]
},
"Execution": {
"properties": {
"ID": {"type": ["string", "integer"]}, # Optional application execution ID (typically set by workflow)
# Use_case_ID: user case ID (e.g. thermo-mechanical simulation coded as 1_1)
"Use_case_ID": {"type": ["string", "integer"]},
# Task_ID: user task ID (e.g. variant of user case ID such as model with higher accuracy)
"Task_ID": {"type": "string"},
"Status": {"type": "string", "enum": ["Instantiated", "Initialized", "Running", "Finished", "Failed"]},
"Progress": {"type": "number"}, # Progress in %
"Date_time_start": {"type": "string"}, # automatically set in Workflow
"Date_time_end": {"type": "string"}, # automatically set in Workflow
"Username": {"type": "string"}, # automatically set in Model and Workflow
"Hostname": {"type": "string"} # automatically set in Model and Workflow
},
"required": ["ID"]
},
"Inputs": {
"type": "array", # List
"items": {
"type": "object", # Object supplies a dictionary
"properties": {
"Type": {"type": "string", "enum": ["mupif.Property", "mupif.Field", "mupif.ParticleSet", "mupif.GrainState", "mupif.PyroFile"]},
"Type_ID": {"type": "string", "enum": type_ids}, # e.g. PID_Concentration
"Obj_ID": { # optional parameter for additional info, string or list of string
"anyof": [{"type": "string"}, {"type": "array", "items": {"type": "string"}}]
},
"Name": {"type": "string"},
"ValueType": {"type": "string", "enum": ["Scalar", "Vector", "Tensor", "ScalarArray", "VectorArray", "TensorArray"]},
"Description": {"type": "string"},
"Units": {"type": "string"},
"Required": {"type": "boolean"},
"Set_at": {"type": "string", "enum": ["initialization", "timestep"]}
},
"required": ["Type", "Type_ID", "Name", "Units", "Required", "Set_at"],
"allOf": [
{
"anyOf": [
{
"not": {
"properties": {
"Type": {"const": "mupif.Property"}
}
}
},
{"required": ["ValueType"]}
]
}
]
}
},
"Outputs": {
"type": "array",
"items": {
"type": "object",
"properties": {
"Type": {"type": "string", "enum": ["mupif.Property", "mupif.Field", "mupif.ParticleSet", "mupif.GrainState"]},
"Type_ID": {"type": "string", "enum": type_ids}, # e.g. mupif.DataID.FID_Temperature
"Obj_ID": { # optional parameter for additional info, string or list of string
"anyof": [{"type": "string"}, {"type": "array", "items": {"type": "string"}}]
},
"Name": {"type": "string"},
"ValueType": {"type": "string", "enum": ["Scalar", "Vector", "Tensor", "ScalarArray", "VectorArray", "TensorArray"]},
"Description": {"type": "string"},
"Units": {"type": "string"}
},
"required": ["Type", "Type_ID", "Name", "Units"],
"allOf": [
{
"anyOf": [
{
"not": {
"properties": {
"Type": {"const": "mupif.Property"}
}
}
},
{"required": ["ValueType"]}
]
}
]
}
}
},
"required": [
"Name", "ID", "Description", "Physics", "Solver", "Execution", "Inputs", "Outputs"
]
}
@Pyro5.api.expose
class Model(mupifobject.MupifObject):
"""
An abstract class representing an application and its interface (API).
The purpose of this class is to define abstract services for data exchange and steering.
This interface has to be implemented/provided by any application.
The data exchange is performed by the means of new data types introduced in the framework,
namely properties and fields.
New abstract data types (properties, fields) allow to hide all implementation details
related to discretization and data storage.
.. automethod:: __init__
"""
pyroDaemon: Optional[Any] = None
externalDaemon: bool = False
pyroNS: Optional[str] = None
pyroURI: Optional[str] = None
appName: str = None
workDir: str = ''
_jobID: str = None
def __init__(self, *, metadata={}, **kw):
(username, hostname) = pyroutil.getUserInfo()
defaults = dict([
('Username', username),
('Hostname', hostname),
('Status', 'Initialized'),
('Date_time_start', time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())),
('Execution', {}),
('Solver', {})
])
# use defaults for metadata, unless given explicitly
for k, v in defaults.items():
if k not in metadata:
metadata[k] = v
super().__init__(metadata=metadata, **kw)
def initialize(self, workdir='', metadata={}, validateMetaData=True, **kwargs):
"""
Initializes application, i.e. all functions after constructor and before run.
:param str workdir: Optional parameter for working directory
:param dict metadata: Optional dictionary used to set up metadata (can be also set by setMetadata() ).
:param bool validateMetaData: Defines if the metadata validation will be called
:param named_arguments kwargs: Arbitrary further parameters
"""
self.updateMetadata(metadata)
self.setMetadata('Name', self.getApplicationSignature())
self.setMetadata('Status', 'Initialized')
if workdir == '':
self.workDir = os.getcwd()
else:
self.workDir = workdir
if validateMetaData:
self.validateMetadata(ModelSchema)
# log.info('Metadata successfully validated')
def registerPyro(self, pyroDaemon, pyroNS, pyroURI, appName=None, externalDaemon=False):
"""
Register the Pyro daemon and nameserver. Required by several services
:param Pyro5.api.Daemon pyroDaemon: Optional pyro daemon
:param Pyro5.naming.Nameserver pyroNS: Optional nameserver
:param string pyroURI: Optional URI of receiver
:param string appName: Optional application name. Used for removing from pyroNS
:param bool externalDaemon: Optional parameter when daemon was allocated externally.
"""
self.pyroDaemon = pyroDaemon
self.pyroNS = pyroNS
self.pyroURI = pyroURI
self.appName = appName
self.externalDaemon = externalDaemon
def get(self, objectTypeID, time=None, objectID=""):
"""
Returns the requested object at given time. Object is identified by id.
:param DataID objectTypeID: Identifier of the object
:param Physics.PhysicalQuantity time: Target time
:param int objectID: Identifies object with objectID (optional, default 0)
:return: Returns requested object.
"""
def set(self, obj, objectID=""):
"""
Registers the given (remote) object in application.
:param property.Property or field.Field or function.Function or pyrofile.PyroFile or heavydata.HeavyDataHandle obj: Remote object to be registered by the application
:param int or str objectID: Identifies object with objectID (optional, default 0)
"""
def getFieldURI(self, fieldID, time, objectID=""):
"""
Returns the uri of requested field at given time. Field is identified by fieldID.
:param DataID fieldID: Identifier of the field
:param Physics.PhysicalQuantity time: Target time
:param int objectID: Identifies field with objectID (optional, default 0)
:return: Requested field uri
:rtype: Pyro5.api.URI
"""
if self.pyroDaemon is None:
raise apierror.APIError('Error: getFieldURI requires to register pyroDaemon in application')
try:
var_field = self.get(fieldID, time, objectID=objectID)
except Exception:
self.setMetadata('Status', 'Failed')
raise apierror.APIError('Error: can not obtain field')
if hasattr(var_field, '_PyroURI'):
return var_field._PyroURI
else:
uri = self.pyroDaemon.register(var_field)
# inject uri into var_field attributes, note: _PyroURI is avoided
# for deepcopy operation
var_field._PyroURI = uri
# self.pyroNS.register("MUPIF."+self.pyroName+"."+str(fieldID), uri)
return uri
def solveStep(self, tstep, stageID=0, runInBackground=False):
"""
Solves the problem for given time step.
Proceeds the solution from actual state to given time.
The actual state should not be updated at the end, as this method could be
called multiple times for the same solution step until the global convergence
is reached. When global convergence is reached, finishStep is called and then
the actual state has to be updated.
Solution can be split into individual stages identified by optional stageID parameter.
In between the stages the additional data exchange can be performed.
See also wait and isSolved services.
:param timestep.TimeStep tstep: Solution step
:param int stageID: optional argument identifying solution stage (default 0)
:param bool runInBackground: optional argument, defualt False. If True, the solution will run in background (in separate thread or remotely).
"""
self.setMetadata('Status', 'Running')
self.setMetadata('Progress', 0.)
def wait(self):
"""
Wait until solve is completed when executed in background.
"""
def isSolved(self):
"""
Check whether solve has completed.
:return: Returns true or false depending whether solve has completed when executed in background.
:rtype: bool
"""
def finishStep(self, tstep):
"""
Called after a global convergence within a time step is achieved.
:param timestep.TimeStep tstep: Solution step
"""
def getCriticalTimeStep(self):
"""
Returns a critical time step for an application.
:return: Returns the actual (related to current state) critical time step increment
:rtype: Physics.PhysicalQuantity
"""
def getAssemblyTime(self, tstep):
"""
Returns the assembly time related to given time step.
The registered fields (inputs) should be evaluated in this time.
:param timestep.TimeStep tstep: Solution step
:return: Assembly time
:rtype: Physics.PhysicalQuantity, timestep.TimeStep
"""
def storeState(self, tstep):
"""
Store the solution state of an application.
:param timestep.TimeStep tstep: Solution step
"""
def restoreState(self, tstep):
"""
Restore the saved state of an application.
:param timestep.TimeStep tstep: Solution step
"""
def getAPIVersion(self):
"""
:return: Returns the supported API version
:rtype: str, int
"""
def getApplicationSignature(self):
"""
Get application signature.
:return: Returns the application identification
:rtype: str
"""
return "Model"
def removeApp(self, nameServer=None, appName=None):
"""
Removes (unregisters) application from the name server.
:param Pyro5.naming.Nameserver nameServer: Optional instance of a nameServer
:param str appName: Optional name of the application to be removed
"""
if nameServer is None:
nameServer = self.pyroNS
if appName is None:
appName = self.appName
if nameServer is not None: # local application can run without a nameServer
try:
log.debug("Removing application %s from a nameServer %s" % (appName, nameServer))
nameServer._pyroClaimOwnership()
nameServer.remove(appName)
except Exception as e:
# log.warning("Cannot remove application %s from nameServer %s" % (appName, nameServer))
log.exception(f"Cannot remove {appName} from {nameServer}?")
# print("".join(Pyro5.errors.get_pyro_traceback()))
self.setMetadata('Status', 'Failed')
raise
@Pyro5.api.oneway # in case call returns much later than daemon.shutdown
def terminate(self):
"""
Terminates the application. Shutdowns daemons if created internally.
"""
self.setMetadata('Status', 'Finished')
self.setMetadata('Date_time_end', time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()))
# Remove application from nameServer
# print("Removing")
if self.pyroNS is not None:
self.removeApp()
if self.pyroDaemon:
self.pyroDaemon.unregister(self)
log.info("Unregistering daemon %s" % self.pyroDaemon)
# log.info(self.pyroDaemon)
if not self.externalDaemon:
self.pyroDaemon.shutdown()
self.pyroDaemon = None
else:
log.info("Terminating model")
def getURI(self):
"""
:return: Returns the application URI or None if application not registered in Pyro
:rtype: str
"""
return self.pyroURI
def printMetadata(self, nonEmpty=False):
"""
Print all metadata
:param bool nonEmpty: Optionally print only non-empty values
:return: None
:rtype: None
"""
if self.hasMetadata('Name'):
print('AppName:\'%s\':' % self.getMetadata('Name'))
super().printMetadata(nonEmpty)
def setJobID(self, jobid):
self._jobID = jobid
def getJobID(self):
return self._jobID
@Pyro5.api.expose
class RemoteModel (object):
"""
Remote Application instances are normally represented by auto generated pyro proxy.
However, when application is allocated using JobManager or ssh tunnel, the proper termination of the tunnel or
job manager task is required.
This class is a decorator around pyro proxy object represeting application storing the reference to job manager and
related jobID or/and ssh tunnel.
These extermal attributes could not be injected into Application instance, as it is remote instance (using proxy)
and the termination of job and tunnel has to be done from local computer, which has the neccesary
communication link established (ssh tunnel in particular, when port translation takes place)
"""
def __init__(self, decoratee, jobMan=None, jobID=None, appTunnel=None):
self._decoratee = decoratee
self._jobMan = jobMan
self._jobID = jobID
self._appTunnel = appTunnel
def __getattr__(self, name):
"""
Catch all attribute access and pass it to self._decoratee, see python data model, __getattr__ method
"""
return getattr(self._decoratee, name)
def getJobID(self):
return self._jobID
@Pyro5.api.oneway # in case call returns much later than daemon.shutdown
def terminate(self):
"""
Terminates the application. Terminates the allocated job at jobManager
"""
if self._decoratee is not None:
self._decoratee.terminate()
self._decoratee = None
if self._jobMan and self._jobID:
try:
log.info("RemoteApplication: Terminating jobManager job %s on %s" % (
str(self._jobID), self._jobMan.getNSName()))
self._jobMan.terminateJob(self._jobID)
self._jobID = None
except Exception as e:
print(e)
self.setMetadata('Status', 'Failed')
finally:
self._jobMan.terminateJob(self._jobID)
self._jobID = None
# close tunnel as the last step so an application is still reachable
if self._appTunnel:
# log.info ("RemoteApplication: Terminating sshTunnel of application")
if self._appTunnel != "manual":
self._appTunnel.terminate()
def __del__(self):
"""
Destructor, calls terminate if not done before.
"""
self.terminate()
| mupif/mupif | mupif/model.py | Python | lgpl-3.0 | 23,354 |
from time import sleep
from daemonize import Daemonize
pid = "/tmp/test.pid"
def main():
while True:
sleep(5)
daemon = Daemonize(app="test_app", pid=pid, action=main)
daemon.start()
| chrys87/fenrir | play zone/daemon.py | Python | lgpl-3.0 | 198 |
#!/usr/bin/env python
conversion_factor = {'CO2':1E-3*28.97/12.01, 'CH4':28.97/16.04, 'TCO':28.97/28.010}
rename = {'TCO':'CO'}
from argparse import ArgumentParser
parser = ArgumentParser(description='Produces an animation of the relative difference between two experiments.')
parser.add_argument('control', help='Control data (netCDF).')
parser.add_argument('experiment', help='Experiment data (netCDF).')
parser.add_argument('outname', help='Name of the movie file to generate.')
parser.add_argument('--fps', default=25, type=int, help='Frames per second for the movie. Default is %(default)s.')
parser.add_argument('--bitrate', default=8000, type=int, help='Frames per second for the movie. Default is %(default)s.')
args = parser.parse_args()
# Add paths to locally installed packages.
# Only needed for running on gpsc-vis nodes.
import sys
sys.path.append('/fs/ssm/eccc/crd/ccmr/EC-CAS/master/basemap_1.0.7rel_ubuntu-14.04-amd64-64/lib/python')
try:
import xarray
import dask
import fstd2nc
except ImportError:
parser.error("You need to run the following command before using the script:\n\n. ssmuse-sh -p eccc/crd/ccmr/EC-CAS/master/fstd2nc_0.20180821.0\n")
import numpy as np
from matplotlib import pyplot as pl
from matplotlib import animation as an
from mpl_toolkits.basemap import Basemap
# Ignore numpy warnings about things like invalid values (such as NaN).
import warnings
warnings.simplefilter("ignore")
control = xarray.open_dataset(args.control,chunks={'time':1})
experiment = xarray.open_dataset(args.experiment,chunks={'time':1})
# Get a minimal label for the control and experiment
# (part of filename that is unique).
from os.path import basename, splitext
control_name = splitext(basename(args.control))[0].split('_')
experiment_name = splitext(basename(args.experiment))[0].split('_')
i = -1
while control_name[i] == experiment_name[i]:
i = i - 1
diag_type = ' '.join(control_name[i+1:])
control_name = '_'.join(control_name[:i+1])
experiment_name = '_'.join(experiment_name[:i+1])
# Find common timesteps between the experiments.
times = np.intersect1d(control.coords['time'], experiment.coords['time'])
control = control.sel(time=times)
experiment = experiment.sel(time=times)
# Loop over each tracer, produce a movie.
for tracer in control.data_vars.keys():
if tracer not in experiment.data_vars: continue
conversion = conversion_factor.get(tracer,1.0)
data1 = control.data_vars[tracer] * conversion
data2 = experiment.data_vars[tracer] * conversion
reldiff = (data2-data1)/data1 * 100
rect = (0,0,1,1) # Rectangle of visible area (for tight_layout)
if 'lat' in control.dims and 'lon' in control.dims:
fig = pl.figure(figsize=(8,10))
def set_title(title):
frame1.axes.set_title(rename.get(tracer,tracer)+' '+diag_type+' - '+title)
frame2.axes.set_title('')
frame3.axes.set_title('')
# Set up frames, with map background.
ax = pl.subplot(311)
m = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=0, urcrnrlon=360, ax=ax)
m.drawcoastlines()
m.drawparallels(np.arange(-60.,61.,30.),labels=[True,False,False,False])
m.drawmeridians(np.arange(0,361.,60.))
frame1 = data1.isel(time=-1).plot(ax=ax,robust=True)
fig.axes[-1].set_ylabel('\n'+control_name) # Add label to colorbar.
ax = pl.subplot(312)
m = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=0, urcrnrlon=360, ax=ax)
m.drawcoastlines()
m.drawparallels(np.arange(-60.,61.,30.),labels=[True,False,False,False])
m.drawmeridians(np.arange(0,361.,60.))
frame2 = data2.isel(time=-1).plot(ax=ax,robust=True)
fig.axes[-1].set_ylabel('\n'+experiment_name) # Add label to colorbar.
ax = pl.subplot(313)
m = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=0, urcrnrlon=360, ax=ax)
m.drawcoastlines()
m.drawparallels(np.arange(-60.,61.,30.),labels=[True,False,False,False])
m.drawmeridians(np.arange(0,361.,60.),labels=[False,False,False,True])
frame3 = reldiff.isel(time=-1).plot(ax=ax,robust=True,cmap='RdBu_r',center=0.0)
fig.axes[-1].set_ylabel('\nrelative diff (%)') # Add label to colorbar.
# Remove some labels that were generated by xarray.
frame1.axes.set_xlabel('')
frame2.axes.set_xlabel('')
frame3.axes.set_xlabel('')
frame1.axes.set_ylabel('')
frame2.axes.set_ylabel('')
frame3.axes.set_ylabel('')
# Adjust layout to avoid cutting off lat/lon labels.
rect=(0.02,0.02,1.02,1)
else:
fig = pl.figure(figsize=(12,6))
pl.suptitle(rename.get(tracer,tracer)+' '+diag_type,fontsize=16)
def set_title(title):
frame1.axes.set_title('')
frame2.axes.set_title('')
frame3.axes.set_title(title)
frame1 = data1.isel(time=-1).plot(ax=pl.subplot(131),robust=True)
fig.axes[-1].set_ylabel('') # Remove label on colorbar
frame2 = data2.isel(time=-1).plot(ax=pl.subplot(132),robust=True)
fig.axes[-1].set_ylabel('') # Remove label on colorbar
frame3 = reldiff.isel(time=-1).plot(ax=pl.subplot(133),robust=True,cmap='RdBu_r',center=0.0)
fig.axes[-1].set_ylabel('') # Remove label on colorbar
# Remove some labels to save space.
frame2.axes.get_yaxis().set_visible(False)
frame3.axes.get_yaxis().set_visible(False)
# Label the frames.
frame1.axes.set_xlabel(control_name)
frame2.axes.set_xlabel(experiment_name)
frame3.axes.set_xlabel('relative diff (%)')
# Use same colorbar range for control and experiment.
frame2.set_clim(frame1.get_clim())
# Adjust vertical scale for pressure levels.
if 'pres' in control.dims:
for frame in frame1, frame2, frame3:
frame.axes.set_ylim(frame.axes.get_ylim()[::-1])
if min(control.coords['pres']) <= 100:
frame.axes.set_yscale('log')
# Set a dummy title to reserve space in the layout.
set_title('title')
# Remove excess whitespace.
pl.tight_layout(rect=rect)
try:
# gpsc-vis nodes
movie = an.writers['avconv'](fps=args.fps, bitrate=args.bitrate, metadata={'comment':str(args)})
except KeyError:
# ppp4
movie = an.writers['ffmpeg'](fps=args.fps, bitrate=args.bitrate, metadata={'comment':str(args)})
outfile = args.outname+'_'+tracer+'.avi'
print ("Saving "+outfile)
with movie.saving(fig, outfile, 72):
for i in fstd2nc.mixins._ProgressBar("",suffix='%(percent)d%% [%(myeta)s]').iter(range(control.dims['time'])):
# Get date and time as formatted string.
time = str(control.coords['time'].values[i])
time = time[:10] + ' ' + time[11:16]
set_title(time)
# Get the values for this frame.
d1 = data1.isel(time=i).values.flatten()
d1 = np.ma.masked_invalid(d1)
frame1.set_array(d1)
d2 = data2.isel(time=i).values.flatten()
d2 = np.ma.masked_invalid(d2)
frame2.set_array(d2)
d3 = reldiff.isel(time=i).values.flatten()
d3 = np.ma.masked_invalid(d3)
frame3.set_array(d3)
# Write this frame to the movie.
movie.grab_frame()
| neishm/EC-CAS-diags | reldiff_movie.py | Python | lgpl-3.0 | 7,014 |
# (C) British Crown Copyright 2011 - 2019, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
The crs module defines Coordinate Reference Systems and the transformations
between them.
"""
from __future__ import (absolute_import, division, print_function)
from abc import ABCMeta, abstractproperty
import math
import warnings
import numpy as np
import shapely.geometry as sgeom
from shapely.prepared import prep
import six
from cartopy._crs import (CRS, Geodetic, Globe, PROJ4_VERSION,
WGS84_SEMIMAJOR_AXIS, WGS84_SEMIMINOR_AXIS)
from cartopy._crs import Geocentric # noqa: F401 (flake8 = unused import)
import cartopy.trace
__document_these__ = ['CRS', 'Geocentric', 'Geodetic', 'Globe']
class RotatedGeodetic(CRS):
"""
Define a rotated latitude/longitude coordinate system with spherical
topology and geographical distance.
Coordinates are measured in degrees.
The class uses proj to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
"""
def __init__(self, pole_longitude, pole_latitude,
central_rotated_longitude=0.0, globe=None):
"""
Parameters
----------
pole_longitude
Pole longitude position, in unrotated degrees.
pole_latitude
Pole latitude position, in unrotated degrees.
central_rotated_longitude: optional
Longitude rotation about the new pole, in degrees. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. Defaults to a "WGS84" datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
globe = globe or Globe(datum='WGS84')
super(RotatedGeodetic, self).__init__(proj4_params, globe=globe)
class Projection(six.with_metaclass(ABCMeta, CRS)):
"""
Define a projected coordinate system with flat topology and Euclidean
distance.
"""
_method_map = {
'Point': '_project_point',
'LineString': '_project_line_string',
'LinearRing': '_project_linear_ring',
'Polygon': '_project_polygon',
'MultiPoint': '_project_multipoint',
'MultiLineString': '_project_multiline',
'MultiPolygon': '_project_multipolygon',
}
@abstractproperty
def boundary(self):
pass
@abstractproperty
def threshold(self):
pass
@abstractproperty
def x_limits(self):
pass
@abstractproperty
def y_limits(self):
pass
@property
def cw_boundary(self):
try:
boundary = self._cw_boundary
except AttributeError:
boundary = sgeom.LinearRing(self.boundary)
self._cw_boundary = boundary
return boundary
@property
def ccw_boundary(self):
try:
boundary = self._ccw_boundary
except AttributeError:
boundary = sgeom.LinearRing(self.boundary.coords[::-1])
self._ccw_boundary = boundary
return boundary
@property
def domain(self):
try:
domain = self._domain
except AttributeError:
domain = self._domain = sgeom.Polygon(self.boundary)
return domain
def _determine_longitude_bounds(self, central_longitude):
# In new proj, using exact limits will wrap-around, so subtract a
# small epsilon:
epsilon = 1e-10
minlon = -180 + central_longitude
maxlon = 180 + central_longitude
if central_longitude > 0:
maxlon -= epsilon
elif central_longitude < 0:
minlon += epsilon
return minlon, maxlon
def _repr_html_(self):
if not six.PY2:
from html import escape
else:
from cgi import escape
try:
# As matplotlib is not a core cartopy dependency, don't error
# if it's not available.
import matplotlib.pyplot as plt
except ImportError:
# We can't return an SVG of the CRS, so let Jupyter fall back to
# a default repr by returning None.
return None
# Produce a visual repr of the Projection instance.
fig, ax = plt.subplots(figsize=(5, 3),
subplot_kw={'projection': self})
ax.set_global()
ax.coastlines('auto')
ax.gridlines()
buf = six.StringIO()
fig.savefig(buf, format='svg', bbox_inches='tight')
plt.close(fig)
# "Rewind" the buffer to the start and return it as an svg string.
buf.seek(0)
svg = buf.read()
return '{}<pre>{}</pre>'.format(svg, escape(repr(self)))
def _as_mpl_axes(self):
import cartopy.mpl.geoaxes as geoaxes
return geoaxes.GeoAxes, {'map_projection': self}
def project_geometry(self, geometry, src_crs=None):
"""
Project the given geometry into this projection.
Parameters
----------
geometry
The geometry to (re-)project.
src_crs: optional
The source CRS. Defaults to None.
If src_crs is None, the source CRS is assumed to be a geodetic
version of the target CRS.
Returns
-------
geometry
The projected result (a shapely geometry).
"""
if src_crs is None:
src_crs = self.as_geodetic()
elif not isinstance(src_crs, CRS):
raise TypeError('Source CRS must be an instance of CRS'
' or one of its subclasses, or None.')
geom_type = geometry.geom_type
method_name = self._method_map.get(geom_type)
if not method_name:
raise ValueError('Unsupported geometry '
'type {!r}'.format(geom_type))
return getattr(self, method_name)(geometry, src_crs)
def _project_point(self, point, src_crs):
return sgeom.Point(*self.transform_point(point.x, point.y, src_crs))
def _project_line_string(self, geometry, src_crs):
return cartopy.trace.project_linear(geometry, src_crs, self)
def _project_linear_ring(self, linear_ring, src_crs):
"""
Project the given LinearRing from the src_crs into this CRS and
returns a list of LinearRings and a single MultiLineString.
"""
debug = False
# 1) Resolve the initial lines into projected segments
# 1abc
# def23ghi
# jkl41
multi_line_string = cartopy.trace.project_linear(linear_ring,
src_crs, self)
# Threshold for whether a point is close enough to be the same
# point as another.
threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
# 2) Simplify the segments where appropriate.
if len(multi_line_string) > 1:
# Stitch together segments which are close to continuous.
# This is important when:
# 1) The first source point projects into the map and the
# ring has been cut by the boundary.
# Continuing the example from above this gives:
# def23ghi
# jkl41abc
# 2) The cut ends of segments are too close to reliably
# place into an order along the boundary.
line_strings = list(multi_line_string)
any_modified = False
i = 0
if debug:
first_coord = np.array([ls.coords[0] for ls in line_strings])
last_coord = np.array([ls.coords[-1] for ls in line_strings])
print('Distance matrix:')
np.set_printoptions(precision=2)
x = first_coord[:, np.newaxis, :]
y = last_coord[np.newaxis, :, :]
print(np.abs(x - y).max(axis=-1))
while i < len(line_strings):
modified = False
j = 0
while j < len(line_strings):
if i != j and np.allclose(line_strings[i].coords[0],
line_strings[j].coords[-1],
atol=threshold):
if debug:
print('Joining together {} and {}.'.format(i, j))
last_coords = list(line_strings[j].coords)
first_coords = list(line_strings[i].coords)[1:]
combo = sgeom.LineString(last_coords + first_coords)
if j < i:
i, j = j, i
del line_strings[j], line_strings[i]
line_strings.append(combo)
modified = True
any_modified = True
break
else:
j += 1
if not modified:
i += 1
if any_modified:
multi_line_string = sgeom.MultiLineString(line_strings)
# 3) Check for rings that have been created by the projection stage.
rings = []
line_strings = []
for line in multi_line_string:
if len(line.coords) > 3 and np.allclose(line.coords[0],
line.coords[-1],
atol=threshold):
result_geometry = sgeom.LinearRing(line.coords[:-1])
rings.append(result_geometry)
else:
line_strings.append(line)
# If we found any rings, then we should re-create the multi-line str.
if rings:
multi_line_string = sgeom.MultiLineString(line_strings)
return rings, multi_line_string
def _project_multipoint(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
geoms.append(self._project_point(geom, src_crs))
if geoms:
return sgeom.MultiPoint(geoms)
else:
return sgeom.MultiPoint()
def _project_multiline(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_line_string(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
return sgeom.MultiLineString(geoms)
else:
return []
def _project_multipolygon(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_polygon(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
result = sgeom.MultiPolygon(geoms)
else:
result = sgeom.MultiPolygon()
return result
def _project_polygon(self, polygon, src_crs):
"""
Return the projected polygon(s) derived from the given polygon.
"""
# Determine orientation of polygon.
# TODO: Consider checking the internal rings have the opposite
# orientation to the external rings?
if src_crs.is_geodetic():
is_ccw = True
else:
is_ccw = polygon.exterior.is_ccw
# Project the polygon exterior/interior rings.
# Each source ring will result in either a ring, or one or more
# lines.
rings = []
multi_lines = []
for src_ring in [polygon.exterior] + list(polygon.interiors):
p_rings, p_mline = self._project_linear_ring(src_ring, src_crs)
if p_rings:
rings.extend(p_rings)
if len(p_mline) > 0:
multi_lines.append(p_mline)
# Convert any lines to rings by attaching them to the boundary.
if multi_lines:
rings.extend(self._attach_lines_to_boundary(multi_lines, is_ccw))
# Resolve all the inside vs. outside rings, and convert to the
# final MultiPolygon.
return self._rings_to_multi_polygon(rings, is_ccw)
def _attach_lines_to_boundary(self, multi_line_strings, is_ccw):
"""
Return a list of LinearRings by attaching the ends of the given lines
to the boundary, paying attention to the traversal directions of the
lines and boundary.
"""
debug = False
debug_plot_edges = False
# Accumulate all the boundary and segment end points, along with
# their distance along the boundary.
edge_things = []
# Get the boundary as a LineString of the correct orientation
# so we can compute distances along it.
if is_ccw:
boundary = self.ccw_boundary
else:
boundary = self.cw_boundary
def boundary_distance(xy):
return boundary.project(sgeom.Point(*xy))
# Squash all the LineStrings into a single list.
line_strings = []
for multi_line_string in multi_line_strings:
line_strings.extend(multi_line_string)
# Record the positions of all the segment ends
for i, line_string in enumerate(line_strings):
first_dist = boundary_distance(line_string.coords[0])
thing = _BoundaryPoint(first_dist, False,
(i, 'first', line_string.coords[0]))
edge_things.append(thing)
last_dist = boundary_distance(line_string.coords[-1])
thing = _BoundaryPoint(last_dist, False,
(i, 'last', line_string.coords[-1]))
edge_things.append(thing)
# Record the positions of all the boundary vertices
for xy in boundary.coords[:-1]:
point = sgeom.Point(*xy)
dist = boundary.project(point)
thing = _BoundaryPoint(dist, True, point)
edge_things.append(thing)
if debug_plot_edges:
import matplotlib.pyplot as plt
current_fig = plt.gcf()
fig = plt.figure()
# Reset the current figure so we don't upset anything.
plt.figure(current_fig.number)
ax = fig.add_subplot(1, 1, 1)
# Order everything as if walking around the boundary.
# NB. We make line end-points take precedence over boundary points
# to ensure that end-points are still found and followed when they
# coincide.
edge_things.sort(key=lambda thing: (thing.distance, thing.kind))
remaining_ls = dict(enumerate(line_strings))
prev_thing = None
for edge_thing in edge_things[:]:
if (prev_thing is not None and
not edge_thing.kind and
not prev_thing.kind and
edge_thing.data[0] == prev_thing.data[0]):
j = edge_thing.data[0]
# Insert a edge boundary point in between this geometry.
mid_dist = (edge_thing.distance + prev_thing.distance) * 0.5
mid_point = boundary.interpolate(mid_dist)
new_thing = _BoundaryPoint(mid_dist, True, mid_point)
if debug:
print('Artificially insert boundary: {}'.format(new_thing))
ind = edge_things.index(edge_thing)
edge_things.insert(ind, new_thing)
prev_thing = None
else:
prev_thing = edge_thing
if debug:
print()
print('Edge things')
for thing in edge_things:
print(' ', thing)
if debug_plot_edges:
for thing in edge_things:
if isinstance(thing.data, sgeom.Point):
ax.plot(*thing.data.xy, marker='o')
else:
ax.plot(*thing.data[2], marker='o')
ls = line_strings[thing.data[0]]
coords = np.array(ls.coords)
ax.plot(coords[:, 0], coords[:, 1])
ax.text(coords[0, 0], coords[0, 1], thing.data[0])
ax.text(coords[-1, 0], coords[-1, 1],
'{}.'.format(thing.data[0]))
def filter_last(t):
return t.kind or t.data[1] == 'first'
edge_things = list(filter(filter_last, edge_things))
processed_ls = []
while remaining_ls:
# Rename line_string to current_ls
i, current_ls = remaining_ls.popitem()
if debug:
import sys
sys.stdout.write('+')
sys.stdout.flush()
print()
print('Processing: %s, %s' % (i, current_ls))
added_linestring = set()
while True:
# Find out how far around this linestring's last
# point is on the boundary. We will use this to find
# the next point on the boundary.
d_last = boundary_distance(current_ls.coords[-1])
if debug:
print(' d_last: {!r}'.format(d_last))
next_thing = _find_first_ge(edge_things, d_last)
# Remove this boundary point from the edge.
edge_things.remove(next_thing)
if debug:
print(' next_thing:', next_thing)
if next_thing.kind:
# We've just got a boundary point, add it, and keep going.
if debug:
print(' adding boundary point')
boundary_point = next_thing.data
combined_coords = (list(current_ls.coords) +
[(boundary_point.x, boundary_point.y)])
current_ls = sgeom.LineString(combined_coords)
elif next_thing.data[0] == i:
# We've gone all the way around and are now back at the
# first boundary thing.
if debug:
print(' close loop')
processed_ls.append(current_ls)
if debug_plot_edges:
coords = np.array(current_ls.coords)
ax.plot(coords[:, 0], coords[:, 1], color='black',
linestyle='--')
break
else:
if debug:
print(' adding line')
j = next_thing.data[0]
line_to_append = line_strings[j]
if j in remaining_ls:
remaining_ls.pop(j)
coords_to_append = list(line_to_append.coords)
# Build up the linestring.
current_ls = sgeom.LineString((list(current_ls.coords) +
coords_to_append))
# Catch getting stuck in an infinite loop by checking that
# linestring only added once.
if j not in added_linestring:
added_linestring.add(j)
else:
if debug_plot_edges:
plt.show()
raise RuntimeError('Unidentified problem with '
'geometry, linestring being '
're-added. Please raise an issue.')
# filter out any non-valid linear rings
linear_rings = [
sgeom.LinearRing(linear_ring)
for linear_ring in processed_ls
if len(linear_ring.coords) > 2 and linear_ring.is_valid]
if debug:
print(' DONE')
return linear_rings
def _rings_to_multi_polygon(self, rings, is_ccw):
exterior_rings = []
interior_rings = []
for ring in rings:
if ring.is_ccw != is_ccw:
interior_rings.append(ring)
else:
exterior_rings.append(ring)
polygon_bits = []
# Turn all the exterior rings into polygon definitions,
# "slurping up" any interior rings they contain.
for exterior_ring in exterior_rings:
polygon = sgeom.Polygon(exterior_ring)
prep_polygon = prep(polygon)
holes = []
for interior_ring in interior_rings[:]:
if prep_polygon.contains(interior_ring):
holes.append(interior_ring)
interior_rings.remove(interior_ring)
elif polygon.crosses(interior_ring):
# Likely that we have an invalid geometry such as
# that from #509 or #537.
holes.append(interior_ring)
interior_rings.remove(interior_ring)
polygon_bits.append((exterior_ring.coords,
[ring.coords for ring in holes]))
# Any left over "interior" rings need "inverting" with respect
# to the boundary.
if interior_rings:
boundary_poly = self.domain
x3, y3, x4, y4 = boundary_poly.bounds
bx = (x4 - x3) * 0.1
by = (y4 - y3) * 0.1
x3 -= bx
y3 -= by
x4 += bx
y4 += by
for ring in interior_rings:
# Use shapely buffer in an attempt to fix invalid geometries
polygon = sgeom.Polygon(ring).buffer(0)
if not polygon.is_empty and polygon.is_valid:
x1, y1, x2, y2 = polygon.bounds
bx = (x2 - x1) * 0.1
by = (y2 - y1) * 0.1
x1 -= bx
y1 -= by
x2 += bx
y2 += by
box = sgeom.box(min(x1, x3), min(y1, y3),
max(x2, x4), max(y2, y4))
# Invert the polygon
polygon = box.difference(polygon)
# Intersect the inverted polygon with the boundary
polygon = boundary_poly.intersection(polygon)
if not polygon.is_empty:
polygon_bits.append(polygon)
if polygon_bits:
multi_poly = sgeom.MultiPolygon(polygon_bits)
else:
multi_poly = sgeom.MultiPolygon()
return multi_poly
def quick_vertices_transform(self, vertices, src_crs):
"""
Where possible, return a vertices array transformed to this CRS from
the given vertices array of shape ``(n, 2)`` and the source CRS.
Note
----
This method may return None to indicate that the vertices cannot
be transformed quickly, and a more complex geometry transformation
is required (see :meth:`cartopy.crs.Projection.project_geometry`).
"""
return_value = None
if self == src_crs:
x = vertices[:, 0]
y = vertices[:, 1]
# Extend the limits a tiny amount to allow for precision mistakes
epsilon = 1.e-10
x_limits = (self.x_limits[0] - epsilon, self.x_limits[1] + epsilon)
y_limits = (self.y_limits[0] - epsilon, self.y_limits[1] + epsilon)
if (x.min() >= x_limits[0] and x.max() <= x_limits[1] and
y.min() >= y_limits[0] and y.max() <= y_limits[1]):
return_value = vertices
return return_value
class _RectangularProjection(six.with_metaclass(ABCMeta, Projection)):
"""
The abstract superclass of projections with a rectangular domain which
is symmetric about the origin.
"""
def __init__(self, proj4_params, half_width, half_height, globe=None):
self._half_width = half_width
self._half_height = half_height
super(_RectangularProjection, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
w, h = self._half_width, self._half_height
return sgeom.LinearRing([(-w, -h), (-w, h), (w, h), (w, -h), (-w, -h)])
@property
def x_limits(self):
return (-self._half_width, self._half_width)
@property
def y_limits(self):
return (-self._half_height, self._half_height)
class _CylindricalProjection(six.with_metaclass(ABCMeta,
_RectangularProjection)):
"""
The abstract class which denotes cylindrical projections where we
want to allow x values to wrap around.
"""
def _ellipse_boundary(semimajor=2, semiminor=1, easting=0, northing=0, n=201):
"""
Define a projection boundary using an ellipse.
This type of boundary is used by several projections.
"""
t = np.linspace(0, -2 * np.pi, n) # Clockwise boundary.
coords = np.vstack([semimajor * np.cos(t), semiminor * np.sin(t)])
coords += ([easting], [northing])
return coords
class PlateCarree(_CylindricalProjection):
def __init__(self, central_longitude=0.0, globe=None):
proj4_params = [('proj', 'eqc'), ('lon_0', central_longitude)]
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1))
a_rad = math.radians(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
x_max = a_rad * 180
y_max = a_rad * 90
# Set the threshold around 0.5 if the x max is 180.
self._threshold = x_max / 360.
super(PlateCarree, self).__init__(proj4_params, x_max, y_max,
globe=globe)
@property
def threshold(self):
return self._threshold
def _bbox_and_offset(self, other_plate_carree):
"""
Return a pair of (xmin, xmax) pairs and an offset which can be used
for identification of whether data in ``other_plate_carree`` needs
to be transformed to wrap appropriately.
>>> import cartopy.crs as ccrs
>>> src = ccrs.PlateCarree(central_longitude=10)
>>> bboxes, offset = ccrs.PlateCarree()._bbox_and_offset(src)
>>> print(bboxes)
[[-180.0, -170.0], [-170.0, 180.0]]
>>> print(offset)
10.0
The returned values are longitudes in ``other_plate_carree``'s
coordinate system.
Warning
-------
The two CRSs must be identical in every way, other than their
central longitudes. No checking of this is done.
"""
self_lon_0 = self.proj4_params['lon_0']
other_lon_0 = other_plate_carree.proj4_params['lon_0']
lon_0_offset = other_lon_0 - self_lon_0
lon_lower_bound_0 = self.x_limits[0]
lon_lower_bound_1 = (other_plate_carree.x_limits[0] + lon_0_offset)
if lon_lower_bound_1 < self.x_limits[0]:
lon_lower_bound_1 += np.diff(self.x_limits)[0]
lon_lower_bound_0, lon_lower_bound_1 = sorted(
[lon_lower_bound_0, lon_lower_bound_1])
bbox = [[lon_lower_bound_0, lon_lower_bound_1],
[lon_lower_bound_1, lon_lower_bound_0]]
bbox[1][1] += np.diff(self.x_limits)[0]
return bbox, lon_0_offset
def quick_vertices_transform(self, vertices, src_crs):
return_value = super(PlateCarree,
self).quick_vertices_transform(vertices, src_crs)
# Optimise the PlateCarree -> PlateCarree case where no
# wrapping or interpolation needs to take place.
if return_value is None and isinstance(src_crs, PlateCarree):
self_params = self.proj4_params.copy()
src_params = src_crs.proj4_params.copy()
self_params.pop('lon_0'), src_params.pop('lon_0')
xs, ys = vertices[:, 0], vertices[:, 1]
potential = (self_params == src_params and
self.y_limits[0] <= ys.min() and
self.y_limits[1] >= ys.max())
if potential:
mod = np.diff(src_crs.x_limits)[0]
bboxes, proj_offset = self._bbox_and_offset(src_crs)
x_lim = xs.min(), xs.max()
for poly in bboxes:
# Arbitrarily choose the number of moduli to look
# above and below the -180->180 range. If data is beyond
# this range, we're not going to transform it quickly.
for i in [-1, 0, 1, 2]:
offset = mod * i - proj_offset
if ((poly[0] + offset) <= x_lim[0] and
(poly[1] + offset) >= x_lim[1]):
return_value = vertices + [[-offset, 0]]
break
if return_value is not None:
break
return return_value
class TransverseMercator(Projection):
"""
A Transverse Mercator projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
scale_factor=1.0, globe=None, approx=None):
"""
Parameters
----------
central_longitude: optional
The true longitude of the central meridian in degrees.
Defaults to 0.
central_latitude: optional
The true latitude of the planar origin in degrees. Defaults to 0.
false_easting: optional
X offset from the planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from the planar origin in metres. Defaults to 0.
scale_factor: optional
Scale factor at the central meridian. Defaults to 1.
globe: optional
An instance of :class:`cartopy.crs.Globe`. If omitted, a default
globe is created.
approx: optional
Whether to use Proj's approximate projection (True), or the new
Extended Transverse Mercator code (False). Defaults to True, but
will change to False in the next release.
"""
if approx is None:
warnings.warn('The default value for the *approx* keyword '
'argument to TransverseMercator will change '
'from True to False after 0.18.',
stacklevel=2)
approx = True
proj4_params = [('proj', 'tmerc'), ('lon_0', central_longitude),
('lat_0', central_latitude), ('k', scale_factor),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
if PROJ4_VERSION < (6, 0, 0):
if not approx:
proj4_params[0] = ('proj', 'etmerc')
else:
if approx:
proj4_params += [('approx', None)]
super(TransverseMercator, self).__init__(proj4_params, globe=globe)
@property
def threshold(self):
return 1e4
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LinearRing([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return (-2e7, 2e7)
@property
def y_limits(self):
return (-1e7, 1e7)
class OSGB(TransverseMercator):
def __init__(self, approx=None):
if approx is None:
warnings.warn('The default value for the *approx* keyword '
'argument to OSGB will change from True to '
'False after 0.18.',
stacklevel=2)
approx = True
super(OSGB, self).__init__(central_longitude=-2, central_latitude=49,
scale_factor=0.9996012717,
false_easting=400000,
false_northing=-100000,
globe=Globe(datum='OSGB36', ellipse='airy'),
approx=approx)
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LinearRing([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (0, 7e5)
@property
def y_limits(self):
return (0, 13e5)
class OSNI(TransverseMercator):
def __init__(self, approx=None):
if approx is None:
warnings.warn('The default value for the *approx* keyword '
'argument to OSNI will change from True to '
'False after 0.18.',
stacklevel=2)
approx = True
globe = Globe(semimajor_axis=6377340.189,
semiminor_axis=6356034.447938534)
super(OSNI, self).__init__(central_longitude=-8,
central_latitude=53.5,
scale_factor=1.000035,
false_easting=200000,
false_northing=250000,
globe=globe,
approx=approx)
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LinearRing([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (18814.9667, 386062.3293)
@property
def y_limits(self):
return (11764.8481, 464720.9559)
class UTM(Projection):
"""
Universal Transverse Mercator projection.
"""
def __init__(self, zone, southern_hemisphere=False, globe=None):
"""
Parameters
----------
zone
The numeric zone of the UTM required.
southern_hemisphere: optional
Set to True if the zone is in the southern hemisphere. Defaults to
False.
globe: optional
An instance of :class:`cartopy.crs.Globe`. If omitted, a default
globe is created.
"""
proj4_params = [('proj', 'utm'),
('units', 'm'),
('zone', zone)]
if southern_hemisphere:
proj4_params.append(('south', None))
super(UTM, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LinearRing([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def threshold(self):
return 1e2
@property
def x_limits(self):
easting = 5e5
# allow 50% overflow
return (0 - easting/2, 2 * easting + easting/2)
@property
def y_limits(self):
northing = 1e7
# allow 50% overflow
return (0 - northing, 2 * northing + northing/2)
class EuroPP(UTM):
"""
UTM Zone 32 projection for EuroPP domain.
Ellipsoid is International 1924, Datum is ED50.
"""
def __init__(self):
globe = Globe(ellipse='intl')
super(EuroPP, self).__init__(32, globe=globe)
@property
def x_limits(self):
return (-1.4e6, 2e6)
@property
def y_limits(self):
return (4e6, 7.9e6)
class Mercator(Projection):
"""
A Mercator projection.
"""
def __init__(self, central_longitude=0.0,
min_latitude=-80.0, max_latitude=84.0,
globe=None, latitude_true_scale=None,
false_easting=0.0, false_northing=0.0, scale_factor=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
min_latitude: optional
The maximum southerly extent of the projection. Defaults
to -80 degrees.
max_latitude: optional
The maximum northerly extent of the projection. Defaults
to 84 degrees.
globe: A :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
latitude_true_scale: optional
The latitude where the scale is 1. Defaults to 0 degrees.
false_easting: optional
X offset from the planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from the planar origin in metres. Defaults to 0.
scale_factor: optional
Scale factor at natural origin. Defaults to unused.
Notes
-----
Only one of ``latitude_true_scale`` and ``scale_factor`` should
be included.
"""
proj4_params = [('proj', 'merc'),
('lon_0', central_longitude),
('x_0', false_easting),
('y_0', false_northing),
('units', 'm')]
# If it's None, we don't pass it to Proj4, in which case its default
# of 0.0 will be used.
if latitude_true_scale is not None:
proj4_params.append(('lat_ts', latitude_true_scale))
if scale_factor is not None:
if latitude_true_scale is not None:
raise ValueError('It does not make sense to provide both '
'"scale_factor" and "latitude_true_scale". ')
else:
proj4_params.append(('k_0', scale_factor))
super(Mercator, self).__init__(proj4_params, globe=globe)
# Calculate limits.
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
limits = self.transform_points(Geodetic(),
np.array([minlon, maxlon]),
np.array([min_latitude, max_latitude]))
self._x_limits = tuple(limits[..., 0])
self._y_limits = tuple(limits[..., 1])
self._threshold = min(np.diff(self.x_limits)[0] / 720,
np.diff(self.y_limits)[0] / 360)
def __eq__(self, other):
res = super(Mercator, self).__eq__(other)
if hasattr(other, "_y_limits") and hasattr(other, "_x_limits"):
res = res and self._y_limits == other._y_limits and \
self._x_limits == other._x_limits
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self._x_limits, self._y_limits))
@property
def threshold(self):
return self._threshold
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LinearRing([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
# Define a specific instance of a Mercator projection, the Google mercator.
Mercator.GOOGLE = Mercator(min_latitude=-85.0511287798066,
max_latitude=85.0511287798066,
globe=Globe(ellipse=None,
semimajor_axis=WGS84_SEMIMAJOR_AXIS,
semiminor_axis=WGS84_SEMIMAJOR_AXIS,
nadgrids='@null'))
# Deprecated form
GOOGLE_MERCATOR = Mercator.GOOGLE
class LambertCylindrical(_RectangularProjection):
def __init__(self, central_longitude=0.0):
proj4_params = [('proj', 'cea'), ('lon_0', central_longitude)]
globe = Globe(semimajor_axis=math.degrees(1))
super(LambertCylindrical, self).__init__(proj4_params, 180,
math.degrees(1), globe=globe)
@property
def threshold(self):
return 0.5
class LambertConformal(Projection):
"""
A Lambert Conformal conic projection.
"""
def __init__(self, central_longitude=-96.0, central_latitude=39.0,
false_easting=0.0, false_northing=0.0,
secant_latitudes=None, standard_parallels=None,
globe=None, cutoff=-30):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to -96.
central_latitude: optional
The central latitude. Defaults to 39.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
secant_latitudes: optional
Secant latitudes. This keyword is deprecated in v0.12 and directly
replaced by ``standard parallels``. Defaults to None.
standard_parallels: optional
Standard parallel latitude(s). Defaults to (33, 45).
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
cutoff: optional
Latitude of map cutoff.
The map extends to infinity opposite the central pole
so we must cut off the map drawing before then.
A value of 0 will draw half the globe. Defaults to -30.
"""
proj4_params = [('proj', 'lcc'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if secant_latitudes and standard_parallels:
raise TypeError('standard_parallels replaces secant_latitudes.')
elif secant_latitudes is not None:
warnings.warn('secant_latitudes has been deprecated in v0.12. '
'The standard_parallels keyword can be used as a '
'direct replacement.',
DeprecationWarning,
stacklevel=2)
standard_parallels = secant_latitudes
elif standard_parallels is None:
# The default. Put this as a keyword arg default once
# secant_latitudes is removed completely.
standard_parallels = (33, 45)
n_parallels = len(standard_parallels)
if not 1 <= n_parallels <= 2:
raise ValueError('1 or 2 standard parallels must be specified. '
'Got {} ({})'.format(n_parallels,
standard_parallels))
proj4_params.append(('lat_1', standard_parallels[0]))
if n_parallels == 2:
proj4_params.append(('lat_2', standard_parallels[1]))
super(LambertConformal, self).__init__(proj4_params, globe=globe)
# Compute whether this projection is at the "north pole" or the
# "south pole" (after the central lon/lat have been taken into
# account).
if n_parallels == 1:
plat = 90 if standard_parallels[0] > 0 else -90
else:
# Which pole are the parallels closest to? That is the direction
# that the cone converges.
if abs(standard_parallels[0]) > abs(standard_parallels[1]):
poliest_sec = standard_parallels[0]
else:
poliest_sec = standard_parallels[1]
plat = 90 if poliest_sec > 0 else -90
self.cutoff = cutoff
n = 91
lons = np.empty(n + 2)
lats = np.full(n + 2, float(cutoff))
lons[0] = lons[-1] = 0
lats[0] = lats[-1] = plat
if plat == 90:
# Ensure clockwise
lons[1:-1] = np.linspace(central_longitude + 180 - 0.001,
central_longitude - 180 + 0.001, n)
else:
lons[1:-1] = np.linspace(central_longitude - 180 + 0.001,
central_longitude + 180 - 0.001, n)
points = self.transform_points(PlateCarree(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
def __eq__(self, other):
res = super(LambertConformal, self).__eq__(other)
if hasattr(other, "cutoff"):
res = res and self.cutoff == other.cutoff
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self.cutoff))
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class LambertAzimuthalEqualArea(Projection):
"""
A Lambert Azimuthal Equal-Area projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The central latitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'laea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
super(LambertAzimuthalEqualArea, self).__init__(proj4_params,
globe=globe)
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
# Find the antipode, and shift it a small amount in latitude to
# approximate the extent of the projection:
lon = central_longitude + 180
sign = np.sign(central_latitude) or 1
lat = -central_latitude + sign * 0.01
x, max_y = self.transform_point(lon, lat, PlateCarree())
coords = _ellipse_boundary(a * 1.9999, max_y - false_northing,
false_easting, false_northing, 61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Miller(_RectangularProjection):
_handles_ellipses = False
def __init__(self, central_longitude=0.0, globe=None):
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1), ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = np.float(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
proj4_params = [('proj', 'mill'), ('lon_0', central_longitude)]
# See Snyder, 1987. Eqs (11-1) and (11-2) substituting maximums of
# (lambda-lambda0)=180 and phi=90 to get limits.
super(Miller, self).__init__(proj4_params,
a * np.pi, a * 2.303412543376391,
globe=globe)
@property
def threshold(self):
return 0.5
class RotatedPole(_CylindricalProjection):
"""
A rotated latitude/longitude projected coordinate system
with cylindrical topology and projected distance.
Coordinates are measured in projection metres.
The class uses proj to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
"""
def __init__(self, pole_longitude=0.0, pole_latitude=90.0,
central_rotated_longitude=0.0, globe=None):
"""
Parameters
----------
pole_longitude: optional
Pole longitude position, in unrotated degrees. Defaults to 0.
pole_latitude: optional
Pole latitude position, in unrotated degrees. Defaults to 0.
central_rotated_longitude: optional
Longitude rotation about the new pole, in degrees. Defaults to 0.
globe: optional
An optional :class:`cartopy.crs.Globe`. Defaults to a "WGS84"
datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
super(RotatedPole, self).__init__(proj4_params, 180, 90, globe=globe)
@property
def threshold(self):
return 0.5
class Gnomonic(Projection):
_handles_ellipses = False
def __init__(self, central_latitude=0.0,
central_longitude=0.0, globe=None):
proj4_params = [('proj', 'gnom'), ('lat_0', central_latitude),
('lon_0', central_longitude)]
super(Gnomonic, self).__init__(proj4_params, globe=globe)
self._max = 5e7
@property
def boundary(self):
return sgeom.Point(0, 0).buffer(self._max).exterior
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return (-self._max, self._max)
@property
def y_limits(self):
return (-self._max, self._max)
class Stereographic(Projection):
def __init__(self, central_latitude=0.0, central_longitude=0.0,
false_easting=0.0, false_northing=0.0,
true_scale_latitude=None,
scale_factor=None, globe=None):
# Warn when using Stereographic with proj < 5.0.0 due to
# incorrect transformation with lon_0=0 (see
# https://github.com/OSGeo/proj.4/issues/194).
if central_latitude == 0:
if PROJ4_VERSION != ():
if PROJ4_VERSION < (5, 0, 0):
warnings.warn(
'The Stereographic projection in Proj older than '
'5.0.0 incorrectly transforms points when '
'central_latitude=0. Use this projection with '
'caution.',
stacklevel=2)
else:
warnings.warn(
'Cannot determine Proj version. The Stereographic '
'projection may be unreliable and should be used with '
'caution.',
stacklevel=2)
proj4_params = [('proj', 'stere'), ('lat_0', central_latitude),
('lon_0', central_longitude),
('x_0', false_easting), ('y_0', false_northing)]
if true_scale_latitude is not None:
if central_latitude not in (-90., 90.):
warnings.warn('"true_scale_latitude" parameter is only used '
'for polar stereographic projections. Consider '
'the use of "scale_factor" instead.',
stacklevel=2)
proj4_params.append(('lat_ts', true_scale_latitude))
if scale_factor is not None:
if true_scale_latitude is not None:
raise ValueError('It does not make sense to provide both '
'"scale_factor" and "true_scale_latitude". '
'Ignoring "scale_factor".')
else:
proj4_params.append(('k_0', scale_factor))
super(Stereographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or WGS84_SEMIMINOR_AXIS)
# Note: The magic number has been picked to maintain consistent
# behaviour with a wgs84 globe. There is no guarantee that the scaling
# should even be linear.
x_axis_offset = 5e7 / WGS84_SEMIMAJOR_AXIS
y_axis_offset = 5e7 / WGS84_SEMIMINOR_AXIS
self._x_limits = (-a * x_axis_offset + false_easting,
a * x_axis_offset + false_easting)
self._y_limits = (-b * y_axis_offset + false_northing,
b * y_axis_offset + false_northing)
coords = _ellipse_boundary(self._x_limits[1], self._y_limits[1],
false_easting, false_northing, 91)
self._boundary = sgeom.LinearRing(coords.T)
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class NorthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, true_scale_latitude=None,
globe=None):
super(NorthPolarStereo, self).__init__(
central_latitude=90,
central_longitude=central_longitude,
true_scale_latitude=true_scale_latitude, # None is +90
globe=globe)
class SouthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, true_scale_latitude=None,
globe=None):
super(SouthPolarStereo, self).__init__(
central_latitude=-90,
central_longitude=central_longitude,
true_scale_latitude=true_scale_latitude, # None is -90
globe=globe)
class Orthographic(Projection):
_handles_ellipses = False
def __init__(self, central_longitude=0.0, central_latitude=0.0,
globe=None):
if PROJ4_VERSION != ():
if (5, 0, 0) <= PROJ4_VERSION < (5, 1, 0):
warnings.warn(
'The Orthographic projection in the v5.0.x series of Proj '
'incorrectly transforms points. Use this projection with '
'caution.',
stacklevel=2)
else:
warnings.warn(
'Cannot determine Proj version. The Orthographic projection '
'may be unreliable and should be used with caution.',
stacklevel=2)
proj4_params = [('proj', 'ortho'), ('lon_0', central_longitude),
('lat_0', central_latitude)]
super(Orthographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
# To stabilise the projection of geometries, we reduce the boundary by
# a tiny fraction at the cost of the extreme edges.
coords = _ellipse_boundary(a * 0.99999, a * 0.99999, n=61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _WarpedRectangularProjection(six.with_metaclass(ABCMeta, Projection)):
def __init__(self, proj4_params, central_longitude,
false_easting=None, false_northing=None, globe=None):
if false_easting is not None:
proj4_params += [('x_0', false_easting)]
if false_northing is not None:
proj4_params += [('y_0', false_northing)]
super(_WarpedRectangularProjection, self).__init__(proj4_params,
globe=globe)
# Obtain boundary points
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
n = 91
lon = np.empty(2 * n + 1)
lat = np.empty(2 * n + 1)
lon[:n] = minlon
lat[:n] = np.linspace(-90, 90, n)
lon[n:2 * n] = maxlon
lat[n:2 * n] = np.linspace(90, -90, n)
lon[-1] = minlon
lat[-1] = -90
points = self.transform_points(self.as_geodetic(), lon, lat)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _Eckert(six.with_metaclass(ABCMeta, _WarpedRectangularProjection)):
"""
An Eckert projection.
This class implements all the methods common to the Eckert family of
projections.
"""
_handles_ellipses = False
def __init__(self, central_longitude=0, false_easting=None,
false_northing=None, globe=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
proj4_params = [('proj', self._proj_name),
('lon_0', central_longitude)]
super(_Eckert, self).__init__(proj4_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e5
class EckertI(_Eckert):
"""
An Eckert I projection.
This projection is pseudocylindrical, but not equal-area. Both meridians
and parallels are straight lines. Its equal-area pair is :class:`EckertII`.
"""
_proj_name = 'eck1'
class EckertII(_Eckert):
"""
An Eckert II projection.
This projection is pseudocylindrical, and equal-area. Both meridians and
parallels are straight lines. Its non-equal-area pair with equally-spaced
parallels is :class:`EckertI`.
"""
_proj_name = 'eck2'
class EckertIII(_Eckert):
"""
An Eckert III projection.
This projection is pseudocylindrical, but not equal-area. Parallels are
equally-spaced straight lines, while meridians are elliptical arcs up to
semicircles on the edges. Its equal-area pair is :class:`EckertIV`.
"""
_proj_name = 'eck3'
class EckertIV(_Eckert):
"""
An Eckert IV projection.
This projection is pseudocylindrical, and equal-area. Parallels are
unequally-spaced straight lines, while meridians are elliptical arcs up to
semicircles on the edges. Its non-equal-area pair with equally-spaced
parallels is :class:`EckertIII`.
It is commonly used for world maps.
"""
_proj_name = 'eck4'
class EckertV(_Eckert):
"""
An Eckert V projection.
This projection is pseudocylindrical, but not equal-area. Parallels are
equally-spaced straight lines, while meridians are sinusoidal arcs. Its
equal-area pair is :class:`EckertVI`.
"""
_proj_name = 'eck5'
class EckertVI(_Eckert):
"""
An Eckert VI projection.
This projection is pseudocylindrical, and equal-area. Parallels are
unequally-spaced straight lines, while meridians are sinusoidal arcs. Its
non-equal-area pair with equally-spaced parallels is :class:`EckertV`.
It is commonly used for world maps.
"""
_proj_name = 'eck6'
class EqualEarth(_WarpedRectangularProjection):
u"""
An Equal Earth projection.
This projection is pseudocylindrical, and equal area. Parallels are
unequally-spaced straight lines, while meridians are equally-spaced arcs.
It is intended for world maps.
Note
----
To use this projection, you must be using Proj 5.2.0 or newer.
References
----------
Bojan \u0160avri\u010d, Tom Patterson & Bernhard Jenny (2018) The Equal
Earth map projection, International Journal of Geographical Information
Science, DOI: 10.1080/13658816.2018.1504949
"""
def __init__(self, central_longitude=0, false_easting=None,
false_northing=None, globe=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
"""
if PROJ4_VERSION < (5, 2, 0):
raise ValueError('The EqualEarth projection requires Proj version '
'5.2.0, but you are using {}.'
.format('.'.join(str(v) for v in PROJ4_VERSION)))
proj_params = [('proj', 'eqearth'), ('lon_0', central_longitude)]
super(EqualEarth, self).__init__(proj_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e5
class Mollweide(_WarpedRectangularProjection):
"""
A Mollweide projection.
This projection is pseudocylindrical, and equal area. Parallels are
unequally-spaced straight lines, while meridians are elliptical arcs up to
semicircles on the edges. Poles are points.
It is commonly used for world maps, or interrupted with several central
meridians.
"""
_handles_ellipses = False
def __init__(self, central_longitude=0, globe=None,
false_easting=None, false_northing=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
proj4_params = [('proj', 'moll'), ('lon_0', central_longitude)]
super(Mollweide, self).__init__(proj4_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e5
class Robinson(_WarpedRectangularProjection):
"""
A Robinson projection.
This projection is pseudocylindrical, and a compromise that is neither
equal-area nor conformal. Parallels are unequally-spaced straight lines,
and meridians are curved lines of no particular form.
It is commonly used for "visually-appealing" world maps.
"""
_handles_ellipses = False
def __init__(self, central_longitude=0, globe=None,
false_easting=None, false_northing=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
# Warn when using Robinson with proj 4.8 due to discontinuity at
# 40 deg N introduced by incomplete fix to issue #113 (see
# https://github.com/OSGeo/proj.4/issues/113).
if PROJ4_VERSION != ():
if (4, 8) <= PROJ4_VERSION < (4, 9):
warnings.warn('The Robinson projection in the v4.8.x series '
'of Proj contains a discontinuity at '
'40 deg latitude. Use this projection with '
'caution.',
stacklevel=2)
else:
warnings.warn('Cannot determine Proj version. The Robinson '
'projection may be unreliable and should be used '
'with caution.',
stacklevel=2)
proj4_params = [('proj', 'robin'), ('lon_0', central_longitude)]
super(Robinson, self).__init__(proj4_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e4
def transform_point(self, x, y, src_crs):
"""
Capture and handle any input NaNs, else invoke parent function,
:meth:`_WarpedRectangularProjection.transform_point`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
Note
----
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
"""
if np.isnan(x) or np.isnan(y):
result = (np.nan, np.nan)
else:
result = super(Robinson, self).transform_point(x, y, src_crs)
return result
def transform_points(self, src_crs, x, y, z=None):
"""
Capture and handle NaNs in input points -- else as parent function,
:meth:`_WarpedRectangularProjection.transform_points`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
Note
----
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
Instead, we invalidate any of the points that contain a NaN.
"""
input_point_nans = np.isnan(x) | np.isnan(y)
if z is not None:
input_point_nans |= np.isnan(z)
handle_nans = np.any(input_point_nans)
if handle_nans:
# Remove NaN points from input data to avoid the error.
x[input_point_nans] = 0.0
y[input_point_nans] = 0.0
if z is not None:
z[input_point_nans] = 0.0
result = super(Robinson, self).transform_points(src_crs, x, y, z)
if handle_nans:
# Result always has shape (N, 3).
# Blank out each (whole) point where we had a NaN in the input.
result[input_point_nans] = np.nan
return result
class InterruptedGoodeHomolosine(Projection):
def __init__(self, central_longitude=0, globe=None):
proj4_params = [('proj', 'igh'), ('lon_0', central_longitude)]
super(InterruptedGoodeHomolosine, self).__init__(proj4_params,
globe=globe)
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
epsilon = 1e-10
# Obtain boundary points
n = 31
top_interrupted_lons = (-40.0,)
bottom_interrupted_lons = (80.0, -20.0, -100.0)
lons = np.empty(
(2 + 2 * len(top_interrupted_lons + bottom_interrupted_lons)) * n +
1)
lats = np.empty(
(2 + 2 * len(top_interrupted_lons + bottom_interrupted_lons)) * n +
1)
end = 0
# Left boundary
lons[end:end + n] = minlon
lats[end:end + n] = np.linspace(-90, 90, n)
end += n
# Top boundary
for lon in top_interrupted_lons:
lons[end:end + n] = lon - epsilon + central_longitude
lats[end:end + n] = np.linspace(90, 0, n)
end += n
lons[end:end + n] = lon + epsilon + central_longitude
lats[end:end + n] = np.linspace(0, 90, n)
end += n
# Right boundary
lons[end:end + n] = maxlon
lats[end:end + n] = np.linspace(90, -90, n)
end += n
# Bottom boundary
for lon in bottom_interrupted_lons:
lons[end:end + n] = lon + epsilon + central_longitude
lats[end:end + n] = np.linspace(-90, 0, n)
end += n
lons[end:end + n] = lon - epsilon + central_longitude
lats[end:end + n] = np.linspace(0, -90, n)
end += n
# Close loop
lons[-1] = minlon
lats[-1] = -90
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 2e4
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _Satellite(Projection):
def __init__(self, projection, satellite_height=35785831,
central_longitude=0.0, central_latitude=0.0,
false_easting=0, false_northing=0, globe=None,
sweep_axis=None):
proj4_params = [('proj', projection), ('lon_0', central_longitude),
('lat_0', central_latitude), ('h', satellite_height),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
if sweep_axis:
proj4_params.append(('sweep', sweep_axis))
super(_Satellite, self).__init__(proj4_params, globe=globe)
def _set_boundary(self, coords):
self._boundary = sgeom.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Geostationary(_Satellite):
"""
A view appropriate for satellites in Geostationary Earth orbit.
Perspective view looking directly down from above a point on the equator.
In this projection, the projected coordinates are scanning angles measured
from the satellite looking directly downward, multiplied by the height of
the satellite.
"""
def __init__(self, central_longitude=0.0, satellite_height=35785831,
false_easting=0, false_northing=0, globe=None,
sweep_axis='y'):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
satellite_height: float, optional
The height of the satellite. Defaults to 35785831 meters
(true geostationary orbit).
false_easting:
X offset from planar origin in metres. Defaults to 0.
false_northing:
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
sweep_axis: 'x' or 'y', optional. Defaults to 'y'.
Controls which axis is scanned first, and thus which angle is
applied first. The default is appropriate for Meteosat, while
'x' should be used for GOES.
"""
super(Geostationary, self).__init__(
projection='geos',
satellite_height=satellite_height,
central_longitude=central_longitude,
central_latitude=0.0,
false_easting=false_easting,
false_northing=false_northing,
globe=globe,
sweep_axis=sweep_axis)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
h = np.float(satellite_height)
# These are only exact for a spherical Earth, owing to assuming a is
# constant. Handling elliptical would be much harder for this.
sin_max_th = a / (a + h)
tan_max_th = a / np.sqrt((a + h) ** 2 - a ** 2)
# Using Napier's rules for right spherical triangles
# See R2 and R6 (x and y coords are h * b and h * a, respectively):
# https://en.wikipedia.org/wiki/Spherical_trigonometry
t = np.linspace(0, -2 * np.pi, 61) # Clockwise boundary.
coords = np.vstack([np.arctan(tan_max_th * np.cos(t)),
np.arcsin(sin_max_th * np.sin(t))])
coords *= h
coords += np.array([[false_easting], [false_northing]])
self._set_boundary(coords)
class NearsidePerspective(_Satellite):
"""
Perspective view looking directly down from above a point on the globe.
In this projection, the projected coordinates are x and y measured from
the origin of a plane tangent to the Earth directly below the perspective
point (e.g. a satellite).
"""
_handles_ellipses = False
def __init__(self, central_longitude=0.0, central_latitude=0.0,
satellite_height=35785831,
false_easting=0, false_northing=0, globe=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
central_latitude: float, optional
The central latitude. Defaults to 0.
satellite_height: float, optional
The height of the satellite. Defaults to 35785831 meters
(true geostationary orbit).
false_easting:
X offset from planar origin in metres. Defaults to 0.
false_northing:
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
super(NearsidePerspective, self).__init__(
projection='nsper',
satellite_height=satellite_height,
central_longitude=central_longitude,
central_latitude=central_latitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS
h = np.float(satellite_height)
max_x = a * np.sqrt(h / (2 * a + h))
coords = _ellipse_boundary(max_x, max_x,
false_easting, false_northing, 61)
self._set_boundary(coords)
class AlbersEqualArea(Projection):
"""
An Albers Equal Area projection
This projection is conic and equal-area, and is commonly used for maps of
the conterminous United States.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
standard_parallels=(20.0, 50.0), globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The central latitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
standard_parallels: optional
The one or two latitudes of correct scale. Defaults to (20, 50).
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'aea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if standard_parallels is not None:
try:
proj4_params.append(('lat_1', standard_parallels[0]))
try:
proj4_params.append(('lat_2', standard_parallels[1]))
except IndexError:
pass
except TypeError:
proj4_params.append(('lat_1', standard_parallels))
super(AlbersEqualArea, self).__init__(proj4_params, globe=globe)
# bounds
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
n = 103
lons = np.empty(2 * n + 1)
lats = np.empty(2 * n + 1)
tmp = np.linspace(minlon, maxlon, n)
lons[:n] = tmp
lats[:n] = 90
lons[n:-1] = tmp[::-1]
lats[n:-1] = -90
lons[-1] = lons[0]
lats[-1] = lats[0]
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class AzimuthalEquidistant(Projection):
"""
An Azimuthal Equidistant projection
This projection provides accurate angles about and distances through the
central position. Other angles, distances, or areas may be distorted.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Parameters
----------
central_longitude: optional
The true longitude of the central meridian in degrees.
Defaults to 0.
central_latitude: optional
The true latitude of the planar origin in degrees.
Defaults to 0.
false_easting: optional
X offset from the planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from the planar origin in metres. Defaults to 0.
globe: optional
An instance of :class:`cartopy.crs.Globe`. If omitted, a default
globe is created.
"""
# Warn when using Azimuthal Equidistant with proj < 4.9.2 due to
# incorrect transformation past 90 deg distance (see
# https://github.com/OSGeo/proj.4/issues/246).
if PROJ4_VERSION != ():
if PROJ4_VERSION < (4, 9, 2):
warnings.warn('The Azimuthal Equidistant projection in Proj '
'older than 4.9.2 incorrectly transforms points '
'farther than 90 deg from the origin. Use this '
'projection with caution.',
stacklevel=2)
else:
warnings.warn('Cannot determine Proj version. The Azimuthal '
'Equidistant projection may be unreliable and '
'should be used with caution.',
stacklevel=2)
proj4_params = [('proj', 'aeqd'), ('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting), ('y_0', false_northing)]
super(AzimuthalEquidistant, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
coords = _ellipse_boundary(a * np.pi, b * np.pi,
false_easting, false_northing, 61)
self._boundary = sgeom.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Sinusoidal(Projection):
"""
A Sinusoidal projection.
This projection is equal-area.
"""
def __init__(self, central_longitude=0.0, false_easting=0.0,
false_northing=0.0, globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'sinu'),
('lon_0', central_longitude),
('x_0', false_easting),
('y_0', false_northing)]
super(Sinusoidal, self).__init__(proj4_params, globe=globe)
# Obtain boundary points
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
points = []
n = 91
lon = np.empty(2 * n + 1)
lat = np.empty(2 * n + 1)
lon[:n] = minlon
lat[:n] = np.linspace(-90, 90, n)
lon[n:2 * n] = maxlon
lat[n:2 * n] = np.linspace(90, -90, n)
lon[-1] = minlon
lat[-1] = -90
points = self.transform_points(self.as_geodetic(), lon, lat)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
# MODIS data products use a Sinusoidal projection of a spherical Earth
# https://modis-land.gsfc.nasa.gov/GCTP.html
Sinusoidal.MODIS = Sinusoidal(globe=Globe(ellipse=None,
semimajor_axis=6371007.181,
semiminor_axis=6371007.181))
class EquidistantConic(Projection):
"""
An Equidistant Conic projection.
This projection is conic and equidistant, and the scale is true along all
meridians and along one or two specified standard parallels.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
standard_parallels=(20.0, 50.0), globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The true latitude of the planar origin in degrees. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
standard_parallels: optional
The one or two latitudes of correct scale. Defaults to (20, 50).
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'eqdc'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if standard_parallels is not None:
try:
proj4_params.append(('lat_1', standard_parallels[0]))
try:
proj4_params.append(('lat_2', standard_parallels[1]))
except IndexError:
pass
except TypeError:
proj4_params.append(('lat_1', standard_parallels))
super(EquidistantConic, self).__init__(proj4_params, globe=globe)
# bounds
n = 103
lons = np.empty(2 * n + 1)
lats = np.empty(2 * n + 1)
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
tmp = np.linspace(minlon, maxlon, n)
lons[:n] = tmp
lats[:n] = 90
lons[n:-1] = tmp[::-1]
lats[n:-1] = -90
lons[-1] = lons[0]
lats[-1] = lats[0]
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _BoundaryPoint(object):
def __init__(self, distance, kind, data):
"""
A representation for a geometric object which is
connected to the boundary.
Parameters
----------
distance: float
The distance along the boundary that this object
can be found.
kind: bool
Whether this object represents a point from the
pre-computed boundary.
data: point or namedtuple
The actual data that this boundary object represents.
"""
self.distance = distance
self.kind = kind
self.data = data
def __repr__(self):
return '_BoundaryPoint(%r, %r, %s)' % (self.distance, self.kind,
self.data)
def _find_first_ge(a, x):
for v in a:
if v.distance >= x:
return v
# We've gone all the way around, so pick the first point again.
return a[0]
def epsg(code):
"""
Return the projection which corresponds to the given EPSG code.
The EPSG code must correspond to a "projected coordinate system",
so EPSG codes such as 4326 (WGS-84) which define a "geodetic coordinate
system" will not work.
Note
----
The conversion is performed by querying https://epsg.io/ so a
live internet connection is required.
"""
import cartopy._epsg
return cartopy._epsg._EPSGProjection(code)
| ocefpaf/cartopy | lib/cartopy/crs.py | Python | lgpl-3.0 | 90,497 |
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from collections import deque
class PropertyEvaluationContext:
"""Context for evaluating a property value
It contains:
1. a stack of containers during the evaluation in the function call stack fashion
2. a context dictionary which contains all the current context
"""
def __init__(self, source_stack = None):
self.stack_of_containers = deque()
if source_stack is not None:
self.stack_of_containers.append(source_stack)
self.context = {}
def rootStack(self):
if self.stack_of_containers:
return self.stack_of_containers[0]
def pushContainer(self, container):
self.stack_of_containers.append(container)
def popContainer(self) -> None:
return self.stack_of_containers.pop()
| Ultimaker/Uranium | UM/Settings/PropertyEvaluationContext.py | Python | lgpl-3.0 | 887 |
#!/usr/bin/env python
##Copyright 2008-2017 Thomas Paviot (tpaviot@gmail.com)
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
import itertools
import math
import os
import sys
import time
import OCC
from OCC.Core.Aspect import Aspect_GFM_VER
from OCC.Core.AIS import (
AIS_Shape,
AIS_Shaded,
AIS_TexturedShape,
AIS_WireFrame,
AIS_Shape_SelectionMode,
)
from OCC.Core.gp import gp_Dir, gp_Pnt, gp_Pnt2d, gp_Vec
from OCC.Core.BRepBuilderAPI import (
BRepBuilderAPI_MakeVertex,
BRepBuilderAPI_MakeEdge,
BRepBuilderAPI_MakeEdge2d,
BRepBuilderAPI_MakeFace,
)
from OCC.Core.TopAbs import (
TopAbs_FACE,
TopAbs_EDGE,
TopAbs_VERTEX,
TopAbs_SHELL,
TopAbs_SOLID,
)
from OCC.Core.Geom import Geom_Curve, Geom_Surface
from OCC.Core.Geom2d import Geom2d_Curve
from OCC.Core.Visualization import Display3d
from OCC.Core.V3d import (
V3d_ZBUFFER,
V3d_Zpos,
V3d_Zneg,
V3d_Xpos,
V3d_Xneg,
V3d_Ypos,
V3d_Yneg,
V3d_XposYnegZpos,
)
from OCC.Core.TCollection import TCollection_ExtendedString, TCollection_AsciiString
from OCC.Core.Quantity import (
Quantity_Color,
Quantity_TOC_RGB,
Quantity_NOC_WHITE,
Quantity_NOC_BLACK,
Quantity_NOC_BLUE1,
Quantity_NOC_CYAN1,
Quantity_NOC_RED,
Quantity_NOC_GREEN,
Quantity_NOC_ORANGE,
Quantity_NOC_YELLOW,
)
from OCC.Core.Prs3d import Prs3d_Arrow, Prs3d_Text, Prs3d_TextAspect
from OCC.Core.Graphic3d import (
Graphic3d_NOM_NEON_GNC,
Graphic3d_NOT_ENV_CLOUDS,
Handle_Graphic3d_TextureEnv_Create,
Graphic3d_TextureEnv,
Graphic3d_Camera,
Graphic3d_RM_RAYTRACING,
Graphic3d_RM_RASTERIZATION,
Graphic3d_StereoMode_QuadBuffer,
Graphic3d_RenderingParams,
Graphic3d_MaterialAspect,
Graphic3d_TOSM_FRAGMENT,
Graphic3d_Structure,
Graphic3d_GraduatedTrihedron,
Graphic3d_NameOfMaterial,
)
from OCC.Core.Aspect import Aspect_TOTP_RIGHT_LOWER, Aspect_FM_STRETCH, Aspect_FM_NONE
# Shaders and Units definition must be found by occ
# the fastest way to get done is to set the CASROOT env variable
# it must point to the /share folder.
if sys.platform == "win32":
# do the same for Units
if "CASROOT" in os.environ:
casroot_path = os.environ["CASROOT"]
# raise an error, force the user to correctly set the variable
err_msg = "Please set the CASROOT env variable (%s is not ok)" % casroot_path
if not os.path.isdir(casroot_path):
raise AssertionError(err_msg)
else: # on miniconda or anaconda or whatever conda
occ_package_path = os.path.dirname(OCC.__file__)
casroot_path = os.path.join(
occ_package_path, "..", "..", "..", "Library", "share", "oce"
)
# we check that all required files are at the right place
shaders_dict_found = os.path.isdir(os.path.join(casroot_path, "src", "Shaders"))
unitlexicon_found = os.path.isfile(
os.path.join(casroot_path, "src", "UnitsAPI", "Lexi_Expr.dat")
)
unitsdefinition_found = os.path.isfile(
os.path.join(casroot_path, "src", "UnitsAPI", "Units.dat")
)
if shaders_dict_found and unitlexicon_found and unitsdefinition_found:
os.environ["CASROOT"] = casroot_path
def rgb_color(r, g, b):
return Quantity_Color(r, g, b, Quantity_TOC_RGB)
def get_color_from_name(color_name):
"""from the string 'WHITE', returns Quantity_Color
WHITE.
color_name is the color name, case insensitive.
"""
enum_name = "Quantity_NOC_%s" % color_name.upper()
if enum_name in globals():
color_num = globals()[enum_name]
elif enum_name + "1" in globals():
color_num = globals()[enum_name + "1"]
print("Many colors for color name %s, using first." % color_name)
else:
color_num = Quantity_NOC_WHITE
print("Color name not defined. Use White by default")
return Quantity_Color(color_num)
def to_string(_string):
return TCollection_ExtendedString(_string)
# some thing we'll need later
modes = itertools.cycle(
[TopAbs_FACE, TopAbs_EDGE, TopAbs_VERTEX, TopAbs_SHELL, TopAbs_SOLID]
)
class Viewer3d(Display3d):
def __init__(self):
Display3d.__init__(self)
self._parent = None # the parent opengl GUI container
self._inited = False
self._local_context_opened = False
self.Context = self.GetContext()
self.Viewer = self.GetViewer()
self.View = self.GetView()
self.default_drawer = None
self._struc_mgr = None
self._is_offscreen = None
self.selected_shapes = []
self._select_callbacks = []
self._overlay_items = []
def get_parent(self):
return self._parent
def register_overlay_item(self, overlay_item):
self._overlay_items.append(overlay_item)
self.View.MustBeResized()
self.View.Redraw()
def register_select_callback(self, callback):
"""Adds a callback that will be called each time a shape s selected"""
if not callable(callback):
raise AssertionError("You must provide a callable to register the callback")
self._select_callbacks.append(callback)
def unregister_callback(self, callback):
"""Remove a callback from the callback list"""
if not callback in self._select_callbacks:
raise AssertionError("This callback is not registered")
self._select_callbacks.remove(callback)
def MoveTo(self, X, Y):
self.Context.MoveTo(X, Y, self.View, True)
def FitAll(self):
self.View.ZFitAll()
self.View.FitAll()
def Create(
self,
window_handle=None,
parent=None,
create_default_lights=True,
draw_face_boundaries=True,
phong_shading=True,
display_glinfo=True,
):
self._window_handle = window_handle
self._parent = parent
if self._window_handle is None:
self.InitOffscreen(640, 480)
self._is_offscreen = True
else:
self.Init(self._window_handle)
self._is_offscreen = False
# display OpenGl Information
if display_glinfo:
self.GlInfo()
if create_default_lights:
self.Viewer.SetDefaultLights()
self.Viewer.SetLightOn()
self.camera = self.View.Camera()
self.default_drawer = self.Context.DefaultDrawer()
# draw black contour edges, like other famous CAD packages
if draw_face_boundaries:
self.default_drawer.SetFaceBoundaryDraw(True)
# turn up tesselation defaults, which are too conversative...
chord_dev = self.default_drawer.MaximalChordialDeviation() / 10.0
self.default_drawer.SetMaximalChordialDeviation(chord_dev)
if phong_shading:
# gouraud shading by default, prefer phong instead
self.View.SetShadingModel(Graphic3d_TOSM_FRAGMENT)
# necessary for text rendering
self._struc_mgr = self.Context.MainPrsMgr().StructureManager()
# turn self._inited flag to True
self._inited = True
def OnResize(self):
self.View.MustBeResized()
def ResetView(self):
self.View.Reset()
def Repaint(self):
self.Viewer.Redraw()
def SetModeWireFrame(self):
self.View.SetComputedMode(False)
self.Context.SetDisplayMode(AIS_WireFrame, True)
def SetModeShaded(self):
self.View.SetComputedMode(False)
self.Context.SetDisplayMode(AIS_Shaded, True)
def SetModeHLR(self):
self.View.SetComputedMode(True)
def SetOrthographicProjection(self):
self.camera.SetProjectionType(Graphic3d_Camera.Projection_Orthographic)
def SetPerspectiveProjection(self):
self.camera.SetProjectionType(Graphic3d_Camera.Projection_Perspective)
def View_Top(self):
self.View.SetProj(V3d_Zpos)
def View_Bottom(self):
self.View.SetProj(V3d_Zneg)
def View_Left(self):
self.View.SetProj(V3d_Xneg)
def View_Right(self):
self.View.SetProj(V3d_Xpos)
def View_Front(self):
self.View.SetProj(V3d_Yneg)
def View_Rear(self):
self.View.SetProj(V3d_Ypos)
def View_Iso(self):
self.View.SetProj(V3d_XposYnegZpos)
def EnableTextureEnv(self, name_of_texture=Graphic3d_NOT_ENV_CLOUDS):
"""enable environment mapping. Possible modes are
Graphic3d_NOT_ENV_CLOUDS
Graphic3d_NOT_ENV_CV
Graphic3d_NOT_ENV_MEDIT
Graphic3d_NOT_ENV_PEARL
Graphic3d_NOT_ENV_SKY1
Graphic3d_NOT_ENV_SKY2
Graphic3d_NOT_ENV_LINES
Graphic3d_NOT_ENV_ROAD
Graphic3d_NOT_ENV_UNKNOWN
"""
texture_env = Graphic3d_TextureEnv(name_of_texture)
self.View.SetTextureEnv(texture_env)
self.View.Redraw()
def DisableTextureEnv(self):
a_null_texture = Handle_Graphic3d_TextureEnv_Create()
self.View.SetTextureEnv(
a_null_texture
) # Passing null handle to clear the texture data
self.View.Redraw()
def SetRenderingParams(
self,
Method=Graphic3d_RM_RASTERIZATION,
RaytracingDepth=3,
IsShadowEnabled=True,
IsReflectionEnabled=False,
IsAntialiasingEnabled=False,
IsTransparentShadowEnabled=False,
StereoMode=Graphic3d_StereoMode_QuadBuffer,
AnaglyphFilter=Graphic3d_RenderingParams.Anaglyph_RedCyan_Optimized,
ToReverseStereo=False,
):
"""Default values are :
Method=Graphic3d_RM_RASTERIZATION,
RaytracingDepth=3,
IsShadowEnabled=True,
IsReflectionEnabled=False,
IsAntialiasingEnabled=False,
IsTransparentShadowEnabled=False,
StereoMode=Graphic3d_StereoMode_QuadBuffer,
AnaglyphFilter=Graphic3d_RenderingParams.Anaglyph_RedCyan_Optimized,
ToReverseStereo=False)
"""
self.ChangeRenderingParams(
Method,
RaytracingDepth,
IsShadowEnabled,
IsReflectionEnabled,
IsAntialiasingEnabled,
IsTransparentShadowEnabled,
StereoMode,
AnaglyphFilter,
ToReverseStereo,
)
def SetRasterizationMode(self):
"""to enable rasterization mode, just call the SetRenderingParams
with default values
"""
self.SetRenderingParams()
def SetRaytracingMode(self, depth=3):
"""enables the raytracing mode"""
self.SetRenderingParams(
Method=Graphic3d_RM_RAYTRACING,
RaytracingDepth=depth,
IsAntialiasingEnabled=True,
IsShadowEnabled=True,
IsReflectionEnabled=True,
IsTransparentShadowEnabled=True,
)
def ExportToImage(self, image_filename):
self.View.Dump(image_filename)
def display_graduated_trihedron(self):
a_trihedron_data = Graphic3d_GraduatedTrihedron()
self.View.GraduatedTrihedronDisplay(a_trihedron_data)
def display_triedron(self):
"""Show a black triedron in lower right corner"""
self.View.TriedronDisplay(
Aspect_TOTP_RIGHT_LOWER,
Quantity_Color(Quantity_NOC_BLACK),
0.1,
V3d_ZBUFFER,
)
def hide_triedron(self):
"""Show a black triedron in lower right corner"""
self.View.TriedronErase()
def set_bg_gradient_color(self, color1, color2, fill_method=Aspect_GFM_VER):
"""set a bg vertical gradient color.
color1 is [R1, G1, B1], each being bytes or an instance of Quantity_Color
color2 is [R2, G2, B2], each being bytes or an instance of Quantity_Color
fill_method is one of Aspect_GFM_VER value Aspect_GFM_NONE, Aspect_GFM_HOR,
Aspect_GFM_VER, Aspect_GFM_DIAG1, Aspect_GFM_DIAG2, Aspect_GFM_CORNER1, Aspect_GFM_CORNER2,
Aspect_GFM_CORNER3, Aspect_GFM_CORNER4
"""
if isinstance(color1, list) and isinstance(color2, list):
R1, G1, B1 = color1
R2, G2, B2 = color2
color1 = rgb_color(float(R1) / 255.0, float(G1) / 255.0, float(B1) / 255.0)
color2 = rgb_color(float(R2) / 255.0, float(G2) / 255.0, float(B2) / 255.0)
elif not isinstance(color1, Quantity_Color) and isinstance(
color2, Quantity_Color
):
raise AssertionError(
"color1 and color2 mmust be either [R, G, B] lists or a Quantity_Color"
)
self.View.SetBgGradientColors(color1, color2, fill_method, True)
def SetBackgroundImage(self, image_filename, stretch=True):
"""displays a background image (jpg, png etc.)"""
if not os.path.isfile(image_filename):
raise IOError("image file %s not found." % image_filename)
if stretch:
self.View.SetBackgroundImage(image_filename, Aspect_FM_STRETCH, True)
else:
self.View.SetBackgroundImage(image_filename, Aspect_FM_NONE, True)
def DisplayVector(self, vec, pnt, update=False):
"""displays a vector as an arrow"""
if self._inited:
aStructure = Graphic3d_Structure(self._struc_mgr)
pnt_as_vec = gp_Vec(pnt.X(), pnt.Y(), pnt.Z())
start = pnt_as_vec + vec
pnt_start = gp_Pnt(start.X(), start.Y(), start.Z())
Prs3d_Arrow.Draw(
aStructure, pnt_start, gp_Dir(vec), math.radians(20), vec.Magnitude()
)
aStructure.Display()
# it would be more coherent if a AIS_InteractiveObject
# would be returned
if update:
self.Repaint()
return aStructure
def DisplayMessage(
self, point, text_to_write, height=None, message_color=None, update=False
):
"""
:point: a gp_Pnt or gp_Pnt2d instance
:text_to_write: a string
:message_color: triple with the range 0-1
"""
aStructure = Graphic3d_Structure(self._struc_mgr)
text_aspect = Prs3d_TextAspect()
if message_color is not None:
text_aspect.SetColor(rgb_color(*message_color))
if height is not None:
text_aspect.SetHeight(height)
if isinstance(point, gp_Pnt2d):
point = gp_Pnt(point.X(), point.Y(), 0)
Prs3d_Text.Draw(aStructure, text_aspect, to_string(text_to_write), point)
aStructure.Display()
# @TODO: it would be more coherent if a AIS_InteractiveObject
# is be returned
if update:
self.Repaint()
return aStructure
def DisplayShape(
self,
shapes,
material=None,
texture=None,
color=None,
transparency=None,
update=False,
):
"""display one or a set of displayable objects"""
ais_shapes = [] # the list of all displayed shapes
if issubclass(shapes.__class__, gp_Pnt):
# if a gp_Pnt is passed, first convert to vertex
vertex = BRepBuilderAPI_MakeVertex(shapes)
shapes = [vertex.Shape()]
elif isinstance(shapes, gp_Pnt2d):
vertex = BRepBuilderAPI_MakeVertex(gp_Pnt(shapes.X(), shapes.Y(), 0))
shapes = [vertex.Shape()]
elif isinstance(shapes, Geom_Surface):
bounds = True
toldegen = 1e-6
face = BRepBuilderAPI_MakeFace()
face.Init(shapes, bounds, toldegen)
face.Build()
shapes = [face.Shape()]
elif isinstance(shapes, Geom_Curve):
edge = BRepBuilderAPI_MakeEdge(shapes)
shapes = [edge.Shape()]
elif isinstance(shapes, Geom2d_Curve):
edge2d = BRepBuilderAPI_MakeEdge2d(shapes)
shapes = [edge2d.Shape()]
# if only one shapes, create a list with a single shape
if not isinstance(shapes, list):
shapes = [shapes]
# build AIS_Shapes list
for shape in shapes:
if material or texture:
if texture:
shape_to_display = AIS_TexturedShape(shape)
(
filename,
toScaleU,
toScaleV,
toRepeatU,
toRepeatV,
originU,
originV,
) = texture.GetProperties()
shape_to_display.SetTextureFileName(
TCollection_AsciiString(filename)
)
shape_to_display.SetTextureMapOn()
shape_to_display.SetTextureScale(True, toScaleU, toScaleV)
shape_to_display.SetTextureRepeat(True, toRepeatU, toRepeatV)
shape_to_display.SetTextureOrigin(True, originU, originV)
shape_to_display.SetDisplayMode(3)
elif material:
shape_to_display = AIS_Shape(shape)
if isinstance(material, Graphic3d_NameOfMaterial):
shape_to_display.SetMaterial(Graphic3d_MaterialAspect(material))
else:
shape_to_display.SetMaterial(material)
else:
# TODO: can we use .Set to attach all TopoDS_Shapes
# to this AIS_Shape instance?
shape_to_display = AIS_Shape(shape)
ais_shapes.append(shape_to_display)
# if not SOLO:
# # computing graphic properties is expensive
# # if an iterable is found, so cluster all TopoDS_Shape under
# # an AIS_MultipleConnectedInteractive
# #shape_to_display = AIS_MultipleConnectedInteractive()
# for ais_shp in ais_shapes:
# # TODO : following line crashes with oce-0.18
# # why ? fix ?
# #shape_to_display.Connect(i)
# self.Context.Display(ais_shp, False)
# set the graphic properties
if material is None:
# The default material is too shiny to show the object
# color well, so I set it to something less reflective
for shape_to_display in ais_shapes:
shape_to_display.SetMaterial(
Graphic3d_MaterialAspect(Graphic3d_NOM_NEON_GNC)
)
if color:
if isinstance(color, str):
color = get_color_from_name(color)
elif isinstance(color, int):
color = Quantity_Color(color)
for shp in ais_shapes:
self.Context.SetColor(shp, color, False)
if transparency:
for shape_to_display in ais_shapes:
shape_to_display.SetTransparency(transparency)
# display the shapes
for shape_to_display in ais_shapes:
self.Context.Display(shape_to_display, False)
if update:
# especially this call takes up a lot of time...
self.FitAll()
self.Repaint()
return ais_shapes
def DisplayColoredShape(
self,
shapes,
color="YELLOW",
update=False,
):
if isinstance(color, str):
dict_color = {
"WHITE": Quantity_NOC_WHITE,
"BLUE": Quantity_NOC_BLUE1,
"RED": Quantity_NOC_RED,
"GREEN": Quantity_NOC_GREEN,
"YELLOW": Quantity_NOC_YELLOW,
"CYAN": Quantity_NOC_CYAN1,
"BLACK": Quantity_NOC_BLACK,
"ORANGE": Quantity_NOC_ORANGE,
}
clr = dict_color[color]
elif isinstance(color, Quantity_Color):
clr = color
else:
raise ValueError(
'color should either be a string ( "BLUE" ) or a Quantity_Color(0.1, 0.8, 0.1) got %s'
% color
)
return self.DisplayShape(shapes, color=clr, update=update)
def EnableAntiAliasing(self):
self.SetNbMsaaSample(4)
def DisableAntiAliasing(self):
self.SetNbMsaaSample(0)
def EraseAll(self):
self.Context.EraseAll(True)
def Tumble(self, num_images, animation=True):
self.View.Tumble(num_images, animation)
def Pan(self, dx, dy):
self.View.Pan(dx, dy)
def SetSelectionMode(self, mode=None):
topo_level = next(modes)
if mode is None:
self.Context.Activate(AIS_Shape_SelectionMode(topo_level), True)
else:
self.Context.Activate(AIS_Shape_SelectionMode(mode), True)
self.Context.UpdateSelected(True)
def SetSelectionModeVertex(self):
self.SetSelectionMode(TopAbs_VERTEX)
def SetSelectionModeEdge(self):
self.SetSelectionMode(TopAbs_EDGE)
def SetSelectionModeFace(self):
self.SetSelectionMode(TopAbs_FACE)
def SetSelectionModeShape(self):
self.Context.Deactivate()
def SetSelectionModeNeutral(self):
self.Context.Deactivate()
def GetSelectedShapes(self):
return self.selected_shapes
def GetSelectedShape(self):
"""
Returns the current selected shape
"""
return self.selected_shape
def SelectArea(self, Xmin, Ymin, Xmax, Ymax):
self.Context.Select(Xmin, Ymin, Xmax, Ymax, self.View, True)
self.Context.InitSelected()
# reinit the selected_shapes list
self.selected_shapes = []
while self.Context.MoreSelected():
if self.Context.HasSelectedShape():
self.selected_shapes.append(self.Context.SelectedShape())
self.Context.NextSelected()
# callbacks
for callback in self._select_callbacks:
callback(self.selected_shapes, Xmin, Ymin, Xmax, Ymax)
def Select(self, X, Y):
self.Context.Select(True)
self.Context.InitSelected()
self.selected_shapes = []
if self.Context.MoreSelected():
if self.Context.HasSelectedShape():
self.selected_shapes.append(self.Context.SelectedShape())
# callbacks
for callback in self._select_callbacks:
callback(self.selected_shapes, X, Y)
def ShiftSelect(self, X, Y):
self.Context.ShiftSelect(True)
self.Context.InitSelected()
self.selected_shapes = []
while self.Context.MoreSelected():
if self.Context.HasSelectedShape():
self.selected_shapes.append(self.Context.SelectedShape())
self.Context.NextSelected()
# hilight newly selected unhighlight those no longer selected
self.Context.UpdateSelected(True)
# callbacks
for callback in self._select_callbacks:
callback(self.selected_shapes, X, Y)
def Rotation(self, X, Y):
self.View.Rotation(X, Y)
def DynamicZoom(self, X1, Y1, X2, Y2):
self.View.Zoom(X1, Y1, X2, Y2)
def ZoomFactor(self, zoom_factor):
self.View.SetZoom(zoom_factor)
def ZoomArea(self, X1, Y1, X2, Y2):
self.View.WindowFit(X1, Y1, X2, Y2)
def Zoom(self, X, Y):
self.View.Zoom(X, Y)
def StartRotation(self, X, Y):
self.View.StartRotation(X, Y)
class OffscreenRenderer(Viewer3d):
"""The offscreen renderer is inherited from Viewer3d.
The DisplayShape method is overriden to export to image
each time it is called.
"""
def __init__(self, screen_size=(640, 480)):
Viewer3d.__init__(self)
# create the renderer
self.Create()
self.SetSize(screen_size[0], screen_size[1])
self.SetModeShaded()
self.set_bg_gradient_color([206, 215, 222], [128, 128, 128])
self.display_triedron()
self.capture_number = 0
def DisplayShape(
self,
shapes,
material=None,
texture=None,
color=None,
transparency=None,
update=True,
):
# call the "original" DisplayShape method
r = super(OffscreenRenderer, self).DisplayShape(
shapes, material, texture, color, transparency, update
) # always update
if (
os.getenv("PYTHONOCC_OFFSCREEN_RENDERER_DUMP_IMAGE") == "1"
): # dump to jpeg file
timestamp = ("%f" % time.time()).split(".")[0]
self.capture_number += 1
image_filename = "capture-%i-%s.jpeg" % (
self.capture_number,
timestamp.replace(" ", "-"),
)
if os.getenv("PYTHONOCC_OFFSCREEN_RENDERER_DUMP_IMAGE_PATH"):
path = os.getenv("PYTHONOCC_OFFSCREEN_RENDERER_DUMP_IMAGE_PATH")
if not os.path.isdir(path):
raise IOError("%s is not a valid path" % path)
else:
path = os.getcwd()
image_full_name = os.path.join(path, image_filename)
self.View.Dump(image_full_name)
if not os.path.isfile(image_full_name):
raise IOError("OffscreenRenderer failed to render image to file")
print("OffscreenRenderer content dumped to %s" % image_full_name)
return r
| tpaviot/pythonocc-core | src/Display/OCCViewer.py | Python | lgpl-3.0 | 25,906 |
import datetime
from .base import ReportGenerator
from geraldo.base import cm, TA_CENTER, TA_RIGHT
from geraldo.utils import get_attr_value, calculate_size
from geraldo.widgets import Widget, Label, SystemField
from geraldo.graphics import Graphic, RoundRect, Rect, Line, Circle, Arc,\
Ellipse, Image
from geraldo.exceptions import AbortEvent
# In development
DEFAULT_ROW_HEIGHT = 0.65*cm
DEFAULT_CHAR_WIDTH = 0.261*cm
# Default is Epson ESC/P2 standard
DEFAULT_ESCAPE_SET = {
'line-feed': chr(10),
'form-feed': chr(12),
'carriage-return': chr(13),
'condensed': chr(15),
'cancel-condensed': chr(18),
'line-spacing-big': chr(27)+chr(48),
'line-spacing-normal': chr(27)+chr(49),
'line-spacing-short': chr(27)+chr(50),
'italic': chr(27)+chr(52),
'cancel-italic': chr(27)+chr(53),
}
class Paragraph(object):
text = ''
style = None
height = None
width = None
def __init__(self, text, style=None):
self.text = text
self.style = style
def wrapOn(self, page_size, width, height): # TODO: this should be more eficient with multiple lines
self.height = height
self.width = width
class TextGenerator(ReportGenerator):
"""This is a generator to output data in text/plain format.
Attributes:
* 'row_height' - should be the equivalent height of a row plus the space
between rows. This is important to calculate how many rows has a page.
* 'character_width' - should be the equivalent width of a character. This
is important to calculate how many columns has a page.
* 'to_printer' - is a boolean variable you can inform to generate a text
to matrix printer or not. This means escape characters will be in output
or not.
* 'escape_set' - is a dictionary with equivalence table to escape codes.
As far as we know, escape codes can vary depending of model or printer
manufacturer (i.e. Epson, Lexmark, HP, etc.). This attribute is useful
to support this. Defaul is ESC/P2 standard (Epson matrix printers)
* 'filename' - is the file path you can inform optionally to save text to.
* 'encode_to' - you can inform the coding identifier to force Geraldo to
encode the output string on it. Example: 'latin-1'
* 'manual_escape_codes' - a boolean variable that sets escape codes are
manually informed or not.
"""
row_height = DEFAULT_ROW_HEIGHT
character_width = DEFAULT_CHAR_WIDTH
_to_printer = True
_escape_set = DEFAULT_ESCAPE_SET
encode_to = None
manual_escape_codes = False
escapes_report_start = ''
escapes_report_end = ''
escapes_page_start = ''
escapes_page_end = ''
mimetype = 'text/plain'
def __init__(self, report, cache_enabled=None, **kwargs):
super(TextGenerator, self).__init__(report, **kwargs)
# Cache enabled
if cache_enabled is not None:
self.cache_enabled = cache_enabled
elif self.cache_enabled is None:
self.cache_enabled = bool(self.report.cache_status)
# Specific attributes
for k,v in kwargs.items():
setattr(self, k, v)
self.update_escape_chars()
def execute(self):
super(TextGenerator, self).execute()
# Calls the before_print event
self.report.do_before_print(generator=self)
# Render pages
self.render_bands()
# Returns rendered pages
if self.return_pages:
return self._rendered_pages
# Calls the after_render event
self.report.do_before_generate(generator=self)
# Generate the pages
text = self.generate_pages()
# Encode
if self.encode_to:
text = text.encode(self.encode_to)
# Calls the after_print event
self.report.do_after_print(generator=self)
# Saves to file or just returns the text
if hasattr(self, 'filename'):
fp = file(self.filename, 'w')
fp.write(text)
fp.close()
else:
return text
def get_hash_key(self, objects):
"""Appends pdf extension to the hash_key"""
return super(TextGenerator, self).get_hash_key(objects) + '.txt'
def calculate_size(self, size):
"""Uses the function 'calculate_size' to calculate a size"""
if isinstance(size, str):
if size.endswith('*cols') or size.endswith('*col'):
return int(size.split('*')[0]) * self.character_width
elif size.endswith('*rows') or size.endswith('*row'):
return int(size.split('*')[0]) * self.row_height
return calculate_size(size)
def make_paragraph(self, text, style=None): # TODO: make style with basic functions, like alignment, bold, emphasis (italic), etc
"""Uses the Paragraph class to return a new paragraph object"""
return Paragraph(text, style)
def wrap_paragraph_on(self, paragraph, width, height):
"""Wraps the paragraph on the height/width informed"""
paragraph.wrapOn(self.report.page_size, width, height)
def wrap_barcode_on(self, barcode, width, height):
"""Do nothing with a barcode"""
pass
def make_paragraph_style(self, band, style=None):
"""Merge report default_style + band default_style + widget style"""
d_style = self.report.default_style.copy()
if band.default_style:
for k,v in band.default_style.items():
d_style[k] = v
if style:
for k,v in style.items():
d_style[k] = v
return dict(name=datetime.datetime.now().strftime('%H%m%s'), **d_style)
def keep_in_frame(self, widget, width, height, paragraphs, mode):
# Doesn't nothing for a while: TODO
pass
# METHODS THAT ARE TOTALLY SPECIFIC TO THIS GENERATOR AND MUST
# OVERRIDE THE SUPERCLASS EQUIVALENT ONES
def generate_pages(self):
"""Specific method that generates the pages"""
self._generation_datetime = datetime.datetime.now()
self._output = u''
# Escapes
self.add_escapes_report_start();
for num, page in enumerate([page for page in self._rendered_pages if page.elements]):
# Escapes
self.add_escapes_page_start(num);
_page_output = [u' ' * self.page_columns_count] * self.page_rows_count
self._current_page_number = num + 1
# Loop at band widgets
for element in page.elements:
# Widget element
if isinstance(element, Widget):
self.generate_widget(element, _page_output, num)
# Adds the page output to output string
self._output = ''.join([self._output, u'\n'.join(_page_output)])
# Escapes
self.add_escapes_page_end(num);
# Escapes
self.add_escapes_report_end();
return self._output
def generate_widget(self, widget, page_output, page_number=0):
"""Renders a widget element on canvas"""
# Calls the before_print event
try:
widget.do_before_print(generator=self)
except AbortEvent:
return
# Exits if is not visible
if not widget.visible:
return
text = widget.text
# Aligment
if widget.style.get('alignment', None) == TA_CENTER:
text = text.center(int(self.calculate_size(widget.width) / self.character_width))
elif widget.style.get('alignment', None) == TA_RIGHT:
text = text.rjust(int(self.calculate_size(widget.width) / self.character_width))
self.print_in_page_output(page_output, text, widget.rect)
# Calls the after_print event
widget.do_after_print(generator=self)
def generate_graphic(self, graphic, page_output):
"""Renders a graphic element"""
# TODO: horizontal and vertical lines, rectangles and borders should work
pass
# Calls the before_print event - UNCOMMENT IF IMPLEMENT THIS METHOD
#graphic.do_before_print(generator=self)
# Calls the after_print event - UNCOMMENT IF IMPLEMENT THIS METHOD
#graphic.do_after_print(generator=self)
def print_in_page_output(self, page_output, text, rect):
"""Changes the array page_output (a matrix with rows and cols equivalent
to rows and cols in a matrix printer page) inserting the text value in
the left/top coordinates."""
# Make the real rect for this text
text_rect = {
'top': int(round(self.calculate_size(rect['top']) / self.row_height)),
'left': int(round(self.calculate_size(rect['left']) / self.character_width)),
'height': int(round(self.calculate_size(rect['height']) / self.row_height)),
'width': int(round(self.calculate_size(rect['width']) / self.character_width)),
'bottom': int(round(self.calculate_size(rect['bottom']) / self.row_height)),
'right': int(round(self.calculate_size(rect['right']) / self.character_width)),
}
# Default height and width
text_rect['height'] = text_rect['height'] or 1
text_rect['width'] = text_rect['width'] or len(text)
if text_rect['height'] and text_rect['width']:
# Make a text with the exact width
text = text.ljust(text_rect['width'])[:text_rect['width']] # Align to left - TODO: should have center and right justifying also
# Inserts the text into the page output buffer
_temp = page_output[text_rect['top']]
_temp = _temp[:text_rect['left']] + text + _temp[text_rect['right']:]
page_output[text_rect['top']] = _temp[:self.get_page_columns_count()]
def add_escapes_report_start(self):
"""Adds the escape commands to the output variable"""
self._output = ''.join([self._output, self.escapes_report_start])
def add_escapes_report_end(self):
"""Adds the escape commands to the output variable"""
self._output = ''.join([self._output, self.escapes_report_end])
def add_escapes_page_start(self, num):
"""Adds the escape commands to the output variable"""
self._output = ''.join([self._output, self.escapes_page_start])
def add_escapes_page_end(self, num):
"""Adds the escape commands to the output variable"""
self._output = ''.join([self._output, self.escapes_page_end])
def update_escape_chars(self):
"""Sets the escape chars to be ran for some events on report generation"""
if self.manual_escape_codes:
return
if self.to_printer:
self.escapes_report_start = ''
self.escapes_report_end = ''
self.escapes_page_start = ''
self.escapes_page_end = self.escape_set['form-feed']
else:
self.escapes_report_start = ''
self.escapes_report_end = ''
self.escapes_page_start = ''
self.escapes_page_end = ''
def get_escape_set(self):
return self._escape_set
def set_escape_set(self, val):
self._escape_set = val
self.update_escape_chars()
escape_set = property(get_escape_set, set_escape_set)
def get_to_printer(self):
return self._to_printer
def set_to_printer(self, val):
self._to_printer = val
self.update_escape_chars()
to_printer = property(get_to_printer, set_to_printer)
def get_page_rows_count(self):
if not hasattr(self, '_page_rows_count'):
height = self.calculate_size(self.report.page_size[1]) / self.row_height
self._page_rows_count = int(round(height))
return self._page_rows_count
page_rows_count = property(get_page_rows_count)
def get_page_columns_count(self):
if not hasattr(self, '_page_columns_count'):
width = self.calculate_size(self.report.page_size[0]) / self.character_width
self._page_columns_count = int(round(width))
return self._page_columns_count
page_columns_count = property(get_page_columns_count)
| HireIQ/geraldo | geraldo/generators/text.py | Python | lgpl-3.0 | 12,316 |
# -*- coding: utf-8 -*-
# Python stdlib
import unittest
# py.test
import pytest
# Python tfstate
from tfstate.base import Resource
from tfstate.provider.aws import AwsResource
# Unit tests
from unit_tests.base import BaseResourceUnitTest
@pytest.mark.provider_aws
class AwsResourceUnitTest(BaseResourceUnitTest):
def test_object_constructor(self):
self.load_example_json('aws/aws_eip/aws_eip_example.json')
resource_name, resource_data = self.example_data.popitem()
aws_resource = AwsResource(resource_name, resource_data)
self.assertIsInstance(aws_resource, Resource, "AwsResource object does not inherit from Resource")
self.assertEqual(aws_resource.terraform_provider, "aws", "AwsResource provider is not aws")
self.assertEqual(aws_resource.resource_name, resource_name, "AwsResource name does not match")
self.assertEqual(aws_resource.native_data, resource_data, "AwsResource native data does not match")
self.assertEqual(aws_resource.id, aws_resource.primary_data['id'], "Resource ID does not match")
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(AwsResourceUnitTest))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| rodynnz/python-tfstate | unit_tests/test_tfstate/test_provider/test_aws/test_base.py | Python | lgpl-3.0 | 1,331 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from rbnics.backends.abstract.tensors_list import TensorsList
from rbnics.utils.decorators import AbstractBackend
@AbstractBackend
class TensorSnapshotsList(TensorsList):
pass
| mathLab/RBniCS | rbnics/backends/abstract/tensor_snapshots_list.py | Python | lgpl-3.0 | 311 |
from __future__ import print_function
import os
import unittest
from codecs import open as codec_open
from six import iteritems
import numpy as np
#import pyNastran
#from pyNastran.bdf.bdf import BDF
#root_path = pyNastran.__path__[0]
#test_path = os.path.join(root_path, 'bdf', 'test', 'unit')
import pyNastran
from pyNastran.bdf.bdf import BDF, read_bdf
from pyNastran.bdf.mesh_utils.bdf_equivalence import bdf_equivalence_nodes
from pyNastran.bdf.mesh_utils.collapse_bad_quads import convert_bad_quads_to_tris
from pyNastran.bdf.mesh_utils.delete_bad_elements import get_bad_shells
from pyNastran.utils.log import SimpleLogger
# testing these imports are up to date
from pyNastran.bdf.mesh_utils.utils import *
pkg_path = pyNastran.__path__[0]
np.set_printoptions(edgeitems=3, infstr='inf',
linewidth=75, nanstr='nan', precision=3,
suppress=True, threshold=1000, formatter=None)
log = SimpleLogger(level='error')
class TestMeshUtils(unittest.TestCase):
def test_quad_180_01(self):
r"""
Identify a 180+ degree quad
y
^ 4
| / |
| / |
| / |
| / |
/ |
1------2 |----> x
\ |
\|
3
"""
msg = (
'CEND\n'
'BEGIN BULK\n'
'GRID,1,,0.,0.,0.\n'
'GRID,2,,1.,0.,0.\n'
'GRID,3,,2.,-1.,0.\n'
'GRID,4,,2., 1.,0.\n'
'CQUAD4,100,1, 1,2,3,4\n'
'PSHELL,1,1,0.1\n'
'MAT1,1,3.0,, 0.3\n'
'ENDDATA'
)
bdf_filename = 'cquad4.bdf'
with codec_open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
model = read_bdf(bdf_filename, log=log, xref=True)
xyz_cid0 = model.get_xyz_in_coord(cid=0, dtype='float32')
nid_map = {}
for i, (nid, node) in enumerate(sorted(iteritems(model.nodes))):
#xyz = node.get_position()
#xyz_cid0[i, :] = xyz
nid_map[nid] = i
eids_to_delete = get_bad_shells(model, xyz_cid0, nid_map, max_theta=180.,
max_skew=1000., max_aspect_ratio=1000.)
assert eids_to_delete == [100], eids_to_delete
os.remove(bdf_filename)
def test_eq1(self):
"""
Collapse nodes 2 and 3; consider 1-3
"""
msg = (
'CEND\n'
'BEGIN BULK\n'
'GRID,1,,0.,0.,0.\n'
'GRID,2,,0.,0.,0.5\n'
'GRID,3,,0.,0.,0.51\n'
'GRID,10,,0.,0.,1.\n'
'GRID,11,,0.,0.,1.\n'
'CTRIA3,1,1,1,2,11\n'
'CTRIA3,3,1,2,3,11\n'
'CTRIA3,4,1,1,2,10\n'
'PSHELL,1,1,0.1\n'
'MAT1,1,3.0,, 0.3\n'
'ENDDATA'
)
bdf_filename = 'nonunique.bdf'
bdf_filename_out = 'unique.bdf'
with codec_open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
tol = 0.2
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
# model = BDF(debug=False)
# model.read_bdf(bdf_filename_out)
# assert len(model.nodes) == 3, len(model.nodes)
os.remove(bdf_filename)
os.remove(bdf_filename_out)
def test_eq2(self):
r"""
5
6 *-------* 40
| \ |
| \ |
| \ |
*-------* 3
1 20
"""
msg = (
'CEND\n'
'BEGIN BULK\n'
'GRID,1, , 0., 0., 0.\n'
'GRID,20,, 1., 0., 0.\n'
'GRID,3, , 1.01, 0., 0.\n'
'GRID,40,, 1., 1., 0.\n'
'GRID,5, , 0., 1., 0.\n'
'GRID,6, , 0., 1.01, 0.\n'
'CTRIA3,1, 100,1,20,6\n'
'CTRIA3,10,100,3,40,5\n'
'PSHELL,100,1000,0.1\n'
'MAT1,1000,3.0,, 0.3\n'
'ENDDATA'
)
bdf_filename = 'nonunique.bdf'
bdf_filename_out = 'unique.bdf'
with codec_open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
tol = 0.2
# Collapse 5/6 and 20/3; Put a 40 and 20 to test non-sequential IDs
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
msg = 'nnodes=%s\n' % len(model.nodes)
for nid, node in sorted(iteritems(model.nodes)):
msg += 'nid=%s xyz=%s\n' % (nid, node.xyz)
assert len(model.nodes) == 4, msg
# os.remove(bdf_filename)
os.remove(bdf_filename_out)
tol = 0.009
# Don't collapse anything because the tolerance is too small
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 6, len(model.nodes)
os.remove(bdf_filename_out)
tol = 0.2
node_set = [2, 3]
# Node 2 is not defined, so crash
with self.assertRaises(RuntimeError):
# node 2 is not defined because it should be node 20
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False,
log=log, debug=False)
tol = 0.2
node_list = [20, 3]
# Only collpase 2 nodes
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_list, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 5, len(model.nodes)
os.remove(bdf_filename_out)
tol = 0.2
node_set = set([20, 3])
# Only collpase 2 nodes
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 5, len(model.nodes)
os.remove(bdf_filename_out)
tol = 0.2
aset = np.array([20, 3, 4], dtype='int32')
bset = np.array([20, 3], dtype='int32')
node_set = np.intersect1d(aset, bset)
assert len(node_set) > 0, node_set
# Only collpase 2 nodes
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False, debug=False)
model = BDF(debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 5, len(model.nodes)
os.remove(bdf_filename_out)
def test_eq3(self):
"""node_set=None"""
lines = [
'$pyNastran: version=msc',
'$pyNastran: punch=True',
'$pyNastran: encoding=ascii',
'$NODES',
'$ Nodes to merge:',
'$ 5987 10478',
'$ GRID 5987 35.46 -6. 0.',
'$ GRID 10478 35.46 -6. 0.',
'$ 5971 10479',
'$ GRID 5971 34.92 -6. 0.',
'$ GRID 10479 34.92 -6. 0.',
'$ 6003 10477',
'$ GRID 6003 36. -6. 0.',
'$ GRID 10477 36. -6. 0.',
'GRID 5971 34.92 -6. 0.',
'GRID 5972 34.92-5.73333 0.',
'GRID 5973 34.92-5.46667 0.',
'GRID 5987 35.46 -6. 0.',
'GRID 5988 35.46-5.73333 0.',
'GRID 5989 35.46-5.46667 0.',
'GRID 6003 36. -6. 0.',
'GRID 6004 36.-5.73333 0.',
'GRID 6005 36.-5.46667 0.',
'GRID 10476 36. -6. -1.5',
'GRID 10477 36. -6. 0.',
'GRID 10478 35.46 -6. 0.',
'GRID 10479 34.92 -6. 0.',
'GRID 10561 34.92 -6. -.54',
'$ELEMENTS_WITH_PROPERTIES',
'PSHELL 1 1 .1',
'CQUAD4 5471 1 5971 5987 5988 5972',
'CQUAD4 5472 1 5972 5988 5989 5973',
'CQUAD4 5486 1 5987 6003 6004 5988',
'CQUAD4 5487 1 5988 6004 6005 5989',
'PSHELL 11 1 .1',
'CTRIA3 9429 11 10561 10476 10478',
'CTRIA3 9439 11 10478 10479 10561',
'CTRIA3 9466 11 10476 10477 10478',
'$MATERIALS',
'MAT1 1 3. .3',
]
bdf_filename = 'nonunique2.bdf'
bdf_filename_out = 'unique2.bdf'
with codec_open(bdf_filename, 'w') as bdf_file:
bdf_file.write('\n'.join(lines))
tol = 0.01
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
model = BDF(debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 11, len(model.nodes)
os.remove(bdf_filename)
os.remove(bdf_filename_out)
def test_eq4(self):
r"""
5
6 *-------* 40
| \ |
| \ |
| \ |
*-------* 3
1 20
"""
msg = 'CEND\n'
msg += 'BEGIN BULK\n'
msg += 'GRID,1, , 0., 0., 0.\n'
msg += 'GRID,20,, 1., 0., 0.\n'
msg += 'GRID,3, , 1.01, 0., 0.\n'
msg += 'GRID,41,, 1., 1., 0.\n' # eq
msg += 'GRID,4,, 1., 1., 0.\n' # eq
msg += 'GRID,40,, 1., 1., 0.\n' # eq
msg += 'GRID,4,, 1., 1., 0.\n' # eq
msg += 'GRID,5, , 0., 1., 0.\n'
msg += 'GRID,6, , 0., 1.01, 0.\n'
msg += 'CTRIA3,1, 100,1,20,6\n'
msg += 'CTRIA3,10,100,3,40,5\n'
msg += 'PSHELL,100,1000,0.1\n'
msg += 'MAT1,1000,3.0,, 0.3\n'
msg += 'ENDDATA'
bdf_filename = 'nonunique.bdf'
bdf_filename_out = 'unique.bdf'
with codec_open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
tol = 0.2
node_set = [4, 40, 41]
# Collapse 5/6 and 20/3; Put a 40 and 20 to test non-sequential IDs
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
nids = model.nodes.keys()
assert len(model.nodes) == 6, 'nnodes=%s nodes=%s' % (len(model.nodes), nids)
assert 1 in nids, nids
assert 20 in nids, nids
assert 3 in nids, nids
assert 4 in nids, nids
assert 5 in nids, nids
assert 6 in nids, nids
assert 40 not in nids, nids
assert 41 not in nids, nids
#print(nids)
os.remove(bdf_filename)
os.remove(bdf_filename_out)
def test_fix_bad_quads(self):
"""split high interior angle quads"""
msg = [
'SOL 101',
'CEND',
'BEGIN BULK',
'GRID,1,,0.,0.,0.',
'GRID,2,,1.,0.,0.',
'GRID,3,,1.,1.,0.',
'GRID,4,,0.,1.,0.',
'GRID,5,,1.,1.,0.00001',
'GRID,6,,0.,0.,0.00001',
'CQUAD4,1, 2, 1,2,3,4',
'CQUAD4,2, 2, 1,2,3,5',
'CQUAD4,3, 2, 1,6,3,5',
]
bdf_filename = 'fix_bad_quads.bdf'
with open(bdf_filename, 'w') as bdf_file:
bdf_file.write('\n'.join(msg))
model = read_bdf(bdf_filename, log=log, xref=False, debug=False)
model.cross_reference(xref=True, xref_elements=False,
xref_nodes_with_elements=False,
xref_properties=False,
xref_masses=False,
xref_materials=False,
xref_loads=False,
xref_constraints=False,
xref_aero=False,
xref_sets=False,
xref_optimization=False)
convert_bad_quads_to_tris(model, min_edge_length=0.01)
#for eid, elem in sorted(iteritems(model.elements)):
#print(elem)
assert model.card_count['CQUAD4'] == 2, model.card_count
assert model.card_count['CTRIA3'] == 1, model.card_count
os.remove(bdf_filename)
def test_renumber_01(self):
"""renumbers a deck in a couple ways"""
bdf_filename = os.path.abspath(
os.path.join(pkg_path, '..', 'models', 'bwb', 'BWB_saero.bdf'))
bdf_filename_out1 = os.path.abspath(
os.path.join(pkg_path, '..', 'models', 'bwb', 'BWB_saero1.out'))
bdf_filename_out2 = os.path.abspath(
os.path.join(pkg_path, '..', 'models', 'bwb', 'BWB_saero2.out'))
bdf_filename_out3 = os.path.abspath(
os.path.join(pkg_path, '..', 'models', 'bwb', 'BWB_saero3.out'))
model = bdf_renumber(bdf_filename, bdf_filename_out1, size=8,
is_double=False, starting_id_dict=None,
round_ids=False, cards_to_skip=None)
model = read_bdf(bdf_filename, log=log)
bdf_renumber(model, bdf_filename_out2, size=16, is_double=False,
starting_id_dict={
'eid' : 1000, 'pid':2000, 'mid':3000,
'spc_id' : 4000,},
round_ids=False, cards_to_skip=None)
bdf_renumber(bdf_filename, bdf_filename_out3, size=8,
is_double=False, starting_id_dict=None,
round_ids=True, cards_to_skip=None)
read_bdf(bdf_filename_out1, log=log)
read_bdf(bdf_filename_out2, log=log)
read_bdf(bdf_filename_out3, log=log)
def test_merge_01(self):
"""merges multiple bdfs into a single deck"""
#log = SimpleLogger(level='info')
bdf_filename1 = os.path.abspath(os.path.join(
pkg_path, '..', 'models', 'bwb', 'BWB_saero.bdf'))
bdf_filename2 = os.path.abspath(os.path.join(
pkg_path, '..', 'models', 'sol_101_elements', 'static_solid_shell_bar.bdf'))
bdf_filename3 = os.path.abspath(os.path.join(
pkg_path, '..', 'models', 'solid_bending', 'solid_bending.bdf'))
bdf_filename4 = os.path.abspath(os.path.join(
pkg_path, '..', 'models', 'iSat', 'ISat_Dploy_Sm.dat'))
bdf_filename_out1 = os.path.abspath(
os.path.join(pkg_path, '..', 'models', 'bwb', 'BWBsaero_staticbar_8.out'))
bdf_filename_out2 = os.path.abspath(
os.path.join(pkg_path, '..', 'models', 'bwb', 'BWBsaero_static_bar_16.out'))
bdf_filename_out3 = os.path.abspath(
os.path.join(pkg_path, '..', 'models', 'bwb', 'BWBsaero_staticbar_isat.out'))
bdf_filenames1 = [bdf_filename1, bdf_filename2]
bdf_filenames2 = [bdf_filename1, bdf_filename2, bdf_filename3, bdf_filename4]
bdf_merge(bdf_filenames1, bdf_filename_out=bdf_filename_out1,
renumber=True, encoding=None, size=8, is_double=False,
cards_to_skip=None, log=log)
bdf_merge(bdf_filenames1, bdf_filename_out=bdf_filename_out2,
renumber=False, encoding=None, size=16, is_double=False,
cards_to_skip=None, log=log)
bdf_merge(bdf_filenames2, bdf_filename_out=bdf_filename_out3,
renumber=False, encoding=None, size=16, is_double=False,
cards_to_skip=None, log=log)
read_bdf(bdf_filename_out1, log=log)
read_bdf(bdf_filename_out2, log=log)
read_bdf(bdf_filename_out3, log=log)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| saullocastro/pyNastran | pyNastran/bdf/mesh_utils/test/test_mesh_utils.py | Python | lgpl-3.0 | 17,642 |
#!/usr/bin/env python
'''
fit best estimate of magnetometer offsets using the algorithm from
Bill Premerlani
'''
import sys, time, os, math
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
# command line option handling
from optparse import OptionParser
parser = OptionParser("magfit_delta.py [options]")
parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_option("--condition",dest="condition", default=None, help="select packets by condition")
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
parser.add_option("--verbose", action='store_true', default=False, help="verbose offset output")
parser.add_option("--gain", type='float', default=0.01, help="algorithm gain")
parser.add_option("--noise", type='float', default=0, help="noise to add")
parser.add_option("--max-change", type='float', default=10, help="max step change")
parser.add_option("--min-diff", type='float', default=50, help="min mag vector delta")
parser.add_option("--history", type='int', default=20, help="how many points to keep")
parser.add_option("--repeat", type='int', default=1, help="number of repeats through the data")
(opts, args) = parser.parse_args()
if opts.mav10:
os.environ['MAVLINK10'] = '1'
import mavutil
from rotmat import Vector3, Matrix3
if len(args) < 1:
print("Usage: magfit_delta.py [options] <LOGFILE...>")
sys.exit(1)
def noise():
'''a noise vector'''
from random import gauss
v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1))
v.normalize()
return v * opts.noise
def find_offsets(data, ofs):
'''find mag offsets by applying Bills "offsets revisited" algorithm
on the data
This is an implementation of the algorithm from:
http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf
'''
# a limit on the maximum change in each step
max_change = opts.max_change
# the gain factor for the algorithm
gain = opts.gain
data2 = []
for d in data:
d = d.copy() + noise()
d.x = float(int(d.x + 0.5))
d.y = float(int(d.y + 0.5))
d.z = float(int(d.z + 0.5))
data2.append(d)
data = data2
history_idx = 0
mag_history = data[0:opts.history]
for i in range(opts.history, len(data)):
B1 = mag_history[history_idx] + ofs
B2 = data[i] + ofs
diff = B2 - B1
diff_length = diff.length()
if diff_length <= opts.min_diff:
# the mag vector hasn't changed enough - we don't get any
# information from this
history_idx = (history_idx+1) % opts.history
continue
mag_history[history_idx] = data[i]
history_idx = (history_idx+1) % opts.history
# equation 6 of Bills paper
delta = diff * (gain * (B2.length() - B1.length()) / diff_length)
# limit the change from any one reading. This is to prevent
# single crazy readings from throwing off the offsets for a long
# time
delta_length = delta.length()
if delta_length > max_change:
delta *= max_change / delta_length
# set the new offsets
ofs = ofs - delta
if opts.verbose:
print ofs
return ofs
def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
# open the log file
mlog = mavutil.mavlink_connection(filename, notimestamps=opts.notimestamps)
data = []
mag = None
offsets = Vector3(0,0,0)
# now gather all the data
while True:
# get the next MAVLink message in the log
m = mlog.recv_match(condition=opts.condition)
if m is None:
break
if m.get_type() == "SENSOR_OFFSETS":
# update offsets that were used during this flight
offsets = Vector3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z)
if m.get_type() == "RAW_IMU" and offsets != None:
# extract one mag vector, removing the offsets that were
# used during that flight to get the raw sensor values
mag = Vector3(m.xmag, m.ymag, m.zmag) - offsets
data.append(mag)
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % offsets)
# run the fitting algorithm
ofs = offsets
ofs = Vector3(0,0,0)
for r in range(opts.repeat):
ofs = find_offsets(data, ofs)
print('Loop %u offsets %s' % (r, ofs))
sys.stdout.flush()
print("New offsets: %s" % ofs)
total = 0.0
for filename in args:
magfit(filename)
| meee1/pymavlink | pymavlink/examples/magfit_delta.py | Python | lgpl-3.0 | 4,788 |
# -*- coding: utf-8 -*-
# @Author: Claire-Eleuthèriane Gerrer
# @Date: 2021-10-26 09:44:54
# @Last Modified by: Claire-Eleuthèriane Gerrer
# @Last Modified time: 2021-11-19 09:56:57
"""
Set FMU simulation parameters
=============================
"""
# %%
# ``FMUFunction`` is an OpenTURNS-friendly overlay of the class
# ``ÒpenTURNSFMUFunction``, closer to the underlying PyFMI implementation.
# Some FMU simulation parameters can be given to ``FMUFunction``, yet most of
# them can only be passed to an ``OpenTURNSFMUFunction``.
# %%
# ------------
# %%
# First, retrieve the path to the FMU *deviation.fmu*.
# Recall the deviation model is static, i.e. its output does not evolve over
# time.
import otfmi.example.utility
import openturns as ot
path_fmu = otfmi.example.utility.get_path_fmu("deviation")
# %%
# The FMU simulation final time is the only simulation-related parameter that
# can be passed to ``FMUFunction``. This parameter is useless if the FMU is
# really time-independent (like this example); yet it can be come in use if the
# FMU requires time to converge.
function = otfmi.FMUFunction(
path_fmu,
inputs_fmu=["E", "I"],
outputs_fmu=["y"],
final_time=50.)
inputPoint = ot.Point([2e9, 7e7])
outputPoint = function(inputPoint)
print(outputPoint)
# %%
# To set more parameters for the FMU simulation, ``OpenTURNSFMUFunction`` can be
# employed. Below, we set the start time for the simulation, the PyFMI algorithm
# running the simulation, and require simulation silent mode.
midlevel_function = otfmi.OpenTURNSFMUFunction(
path_fmu,
inputs_fmu=["E", "I"],
outputs_fmu=["y"])
outputPoint = midlevel_function.simulate(
inputPoint,
start_time=10.,
algorithm="FMICSAlg",
options={"silent_mode": True})
# %%
# For advanced users, the middle-level class ``OpenTURNSFMUFunction`` also gives
# access to the PyFMI model. We can hence access all PyFMI's object methods:
pyfmi_model = midlevel_function.model
print(dir(pyfmi_model))
# %%
# .. note::
# Otfmi' classes ``FMUFunction`` and ``OpenTURNSFMUFunction`` are designed to
# highlight the most useful PyFMI's methods and simplify their use!
| openturns/otfmi | doc/example/static/plot_set.py | Python | lgpl-3.0 | 2,177 |
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: LGPL-3.0-only
import sys
import unittest
import packet
class obj(object):
def __init__(self, dict_):
self.__dict__.update(dict_)
class ErrorTest(unittest.TestCase):
def test_cause(self):
msg = "boom"
cause = "cause"
error = packet.Error(msg, cause)
self.assertIn(error.cause, cause)
class BaseAPITest(unittest.TestCase):
def setUp(self):
self.auth_token = "fake_auth_token"
self.consumer_token = "fake_consumer_token"
self.end_point = "api.packet.net"
self._user_agent_prefix = "fake_user_agent"
def test_init_all(self):
base = packet.baseapi.BaseAPI(
self.auth_token, self.consumer_token, self._user_agent_prefix
)
self.assertEqual(base.end_point, self.end_point)
self.assertEqual(base.auth_token, self.auth_token)
self.assertEqual(base.consumer_token, self.consumer_token)
self.assertEqual(base._user_agent_prefix, self._user_agent_prefix)
def test_call_api_with_end_point(self):
base = packet.baseapi.BaseAPI(
self.auth_token, self.consumer_token, self._user_agent_prefix
)
if int(sys.version[0]) == 3:
self.assertRaisesRegex(
packet.Error,
"method type not recognized as one of",
base.call_api,
"fake_path",
"bad_method",
)
class ResponseErrorTest(unittest.TestCase):
def setUp(self):
self.resp500 = obj({"status_code": 500})
self.errBoom = {"error": "boom"}
self.errBangBoom = {"errors": ["bang", "boom"]}
self.exception = Exception("x")
def test_init_empty(self):
error = packet.ResponseError(self.resp500, None, None)
self.assertIn("empty", str(error))
def test_init_string(self):
error = packet.ResponseError(self.resp500, "whoops", None)
self.assertIn("whoops", str(error))
def test_init_error(self):
error = packet.ResponseError(self.resp500, self.errBoom, self.exception)
self.assertIn("Error 500: boom", str(error))
self.assertEqual(500, error.response.status_code)
self.assertEqual(self.exception, error.cause)
def test_init_errors(self):
error = packet.ResponseError(self.resp500, self.errBangBoom, self.exception)
self.assertIn("Error 500: bang, boom", str(error))
self.assertEqual(500, error.response.status_code)
self.assertEqual(self.exception, error.cause)
if __name__ == "__main__":
sys.exit(unittest.main())
| packethost/packet-python | test/test_baseapi.py | Python | lgpl-3.0 | 2,643 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import src.isa
class Parser(object):
def __init__(self):
self.__instr = src.isa.instructions
def parse(self, tokens):
jumpmark_address = {}
address_counter = 0
tokens_to_remove = []
# calculate all addresses for jumpmarks
for t in tokens:
if ":" in t["instr"]:
jumpmark_address[t["instr"].replace(":","")] = address_counter
tokens_to_remove.append(t)
else:
address_counter = address_counter + self.__instr[t["instr"]][2]
for t in tokens_to_remove:
tokens.remove(t)
for t in tokens:
raw_op1 = t["op1"] if t["op1"] != None else str(0)
raw_op2 = t["op2"] if t["op2"] != None else str(0)
if "@" in raw_op1:
t["op1"] = str(jumpmark_address[raw_op1.replace("@","")])
if "@" in raw_op2:
t["op2"] = str(jumpmark_address[raw_op2.replace("@","")])
parsed_tokens = []
for t in tokens:
(op_code, argc, memc) = self.__instr[t["instr"]]
raw_op1 = t["op1"] if t["op1"] != None else str(0)
raw_op2 = t["op2"] if t["op2"] != None else str(0)
op1 = int(raw_op1.replace("r",""))
op2 = raw_op2.replace("$", "")
try:
op2 = int(op2.replace("r",""))
except ValueError:
op2 = int(op2.replace("r",""), 16)
flags = 0
i = t["instr"]
if i == 'ld' or i == 'st':
if not "$" in raw_op2:
flags = flags | (1 << 0)
parsed_tokens.append((op_code, flags, op1, 0)) # instruction
address0 = ((op2 & 0x000000ff) >> 0)
address1 = ((op2 & 0x0000ff00) >> 8)
address2 = ((op2 & 0x00ff0000) >> 16)
address3 = ((op2 & 0xff000000) >> 24)
parsed_tokens.append((address0,address1,address2,address3)) # address
elif (i == 'jmp' or i == 'breq' or i == 'brne' or i == 'brp' or i == 'brn' or i == 'call' or i == 'ret'):
if not "r" in raw_op1:
flags = flags | (1 << 0)
parsed_tokens.append((op_code, flags, op1, op2))
else:
if not "r" in raw_op2:
flags = flags | (1 << 0)
parsed_tokens.append((op_code, flags, op1, op2)) # instruction
return parsed_tokens
| m-wichmann/pyArch | asm/src/parser.py | Python | lgpl-3.0 | 2,559 |
"""
WSGI config for bookmarks project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bookmarks.settings')
application = get_wsgi_application()
| ch1huizong/dj | bookmarks/bookmarks/wsgi.py | Python | unlicense | 395 |
#-*- coding: utf8 -*-
from __future__ import division
import numpy as n, pylab as p, networkx as x, random as r, collections as c, string
__doc__="""Este arquivo possui a classe Sistem, base para todas as animações
G=x.read_gml("1-400cpp.gml") # digrafo com peso
S=Sistem(G)
S.draw("grafo1.png")
S.add_msgs([msg1,msg2...])
S.rm_msgs([msg1,msg2...])
S.order()
S.draw("grafo1.png")
# nadds[i] tem as msgs adicionadas para o frame i+1.
nadds=[[msgs..,..,],[msgs..],[]]
# nrms[i] tem as msgs removidas para o frame i+1.
nrms=[[msgs..,..,],[msgs..],[]]
i=0
for a,r in zip(nadds,nrms):
S.add_msgs(a)
S.rm_msgs(b)
S.order()
S.draw("grafo%i.png"%(i,))
i+=1
0) fazer classe com parametros minimos. Fazer com que plote pelo pygraphviz sem layout.
1) fazer classe com parametros minimos. Fazer com que plote pelo pygraphviz com layout pre-determinado.
"""
class Sistem:
def __init__(self,G="lad3100_3200.gml",positionMethod="sinusoid",rankCriteria="degree",fixedSize=40):
"""G Digrafo do networkx ou gml para
positionMethod: "random" ou "sinusoid"
rankCriteria: "alphabetic" ou "degree" """
self.rank=None
self.positions=None
self.ecolors=[]
if type(G) == type("string"):
G=x.read_gml(G) # DiGraph
self.g=G
self.positionMethod=positionMethod
self.rankCriteria=rankCriteria
self.fixedSize=fixedSize
N=1. # aresta do quadrado considerado
# ranks nodes in an exact order
rank=self.rankNodes() # implement
# gets (x,y) pairs for each ranked node:
positions=self.positionRanks()
ruido=self.computeNoise()
def update(self):
self.rankNodes()
self.positionRanks()
def addNode(self,nodeName):
self.g.add_node(nodeName,weight=1.0)
self.update()
def addEdge(self,pre,pos):
self.g.add_edge(pre,pos,weight=1.0)
self.update()
def rankNodes(self,force=False):
"""Order nodes in convenient ways to visualisation"""
# order by degree, than by force, than by clustering coefficient, than alphabetically
if self.rank==None or force:
print("realizando ordenacao por %s" % (self.rankCriteria,))
if self.rankCriteria=="alphabetic":
self.rank=self.g.nodes()
self.rank.sort()
elif self.rankCriteria=="degree":
ordenacao= c.OrderedDict(sorted(self.g.degree().items(), key=lambda x: -x[1]))
self.ordenacao=ordenacao
self.rank=ordenacao.keys()
self.siglas=["G%i"%(i,) for i in ordenacao.values()]
else:
print(u"ordenação encontrada. Consulte Sistem.rank.")
return self.rank
def positionRanks(self,force=False):
"""Get a position for each of the self.order[i] node
Default is random.
set self.positionMethod to:
"random",
"sinusoid" """
if self.positions!=None and not force:
print("posicoes encontradas, não atualizando posições")
else:
print("posicoes nao encontradas, realizando posicionamento para o grafo considerado")
if self.positionMethod=="random":
nn=self.g.number_of_nodes()
loc=n.random.random(nn*2)
locxy=n.reshape(loc,(nn,2))
elif self.positionMethod=="sinusoid":
if self.fixedSize:
nn=self.fixedSize
else:
nn=self.g.number_of_nodes()
xx=n.linspace(0.0,1,nn) # aumentar intervalos primeiros
parte1=nn/4
parte2=nn-parte1
xx=n.hstack( (n.linspace(0.0,0.5,parte1,endpoint=False),n.linspace(0.5,1,parte2)) ) # aumentar intervalos primeiros
yy=n.sin(xx*2*n.pi) # mudar o numero de voltas
locxy=n.vstack((xx*4,yy)).T
sobra = len(self.rank)-locxy.shape[1]
if sobra < 0:
pass
#locxy=locxy[:sobra]
else:
poss=n.zeros((2,sobra))
#poss[0]+=3.5-n.linspace(0,2,sobra)
#poss[1]+=.2+n.linspace(0,2,sobra)
parte1=int(sobra/4)
parte2=sobra-parte1
poss[0]+=n.hstack( (3.5-n.linspace(0,1,parte1), 3.5-n.linspace(1,2,parte2)) )
poss[1]+=n.hstack( (.2+n.linspace(0,1,parte1),.2+n.linspace(1,2,parte2)) )
locxy=n.hstack( ( locxy.T, poss ) ).T
self.positions=locxy
#positions={}
#i=0
#for rank in self.rank:
# if i > self.g.number_of_nodes()-self.fixedSize:
# positions[rank]=locxy[i -self.g.number_of_nodes()+self.fixedSize ]
# else:
# positions[rank]=n.array((100,100))
# i+=1
#self.positions=positions
return self.positions
def util(self,which="plotpos"):
if which is "plotpos":
p.plot(SSi.positions[:,0],SSi.positions[:,1],"bo")
p.show()
def draw(self,nome="sistema.png",numero_serie=0):
p.clf()
A=x.to_agraph(self.g)
A.node_attr['style']='filled'
#A.node_attr['fillcolor']='red'
#A.graph_attr["bgcolor"]="forestgreen"
A.graph_attr["bgcolor"]="black"
#A.graph_attr["page"]="31.5,17"
#A.graph_attr["margin"]="1.7"
A.graph_attr["pad"]=.1
A.graph_attr["size"]="9.5,12"
#A.edge_attr["style"]="solid"
#A.graph_attr["size"]="55.0,55000.0"
#A.graph_attr['arrowhead']="diamond"
#A.graph_attr['dir']="both"
#A.graph_attr['rankdir']="LR"
#A.graph_attr['splines']="filled"
#A.edge_attr.update(arrowType="vec")
#A.graph_attr['splines']="compound"
#A.graph_attr['overlap']="true"
#A.graph_attr['forcelabels']=True
#A.graph_attr["center"]=1
#A.layout()
TTABELACORES=2**10 # tamanho da tabela de cores
cm=p.cm.Reds(range(TTABELACORES)) # tabela de cores
#cm=p.cm.hot(range(TTABELACORES)) # tabela de cores
self.cm=cm
nodes=A.nodes()
colors=[]
loopWeights=[]
loopnodes=[i[0] for i in self.g.selfloop_edges()]
self.loopnodes=loopnodes
# medidas para usar na visualização de cada vértice
# para largura e altura
ind=self.g.in_degree(weight='weight'); mind=max(ind.values())/3+0.1
oud=self.g.out_degree(weight='weight'); moud=max(oud.values())/3+.1
miod=max(mind,moud)
#miod=self.g.number_of_edges()+1.
s=self.g.degree()
# para colorir
cc=x.clustering(self.g.to_undirected()) # para colorir
self.cc=cc
ii=0
for node in nodes:
n_=A.get_node(node)
if node.isdigit():
foo=int(node)
else:
foo=node
ifoo=self.rank.index(foo)
#print ifoo, self.siglas[ifoo]
n_.attr['fillcolor']= '#%02x%02x%02x' % tuple([255*i for i in cm[int(cc[foo]*255)][:-1]])
if node in loopnodes:
loopW=self.g[node][node]["weight"]
loopWeights.append(loopW)
else:
loopW=0
n_.attr['fixedsize']=True
#n_.attr['width']= abs(20*((ind[foo]-loopW)/mind+0.5))
#n_.attr['height']= abs(20*((oud[foo]-loopW)/moud+0.5))
n_.attr['width']= abs(.07*((ind[foo]-loopW)/miod+0.5))
n_.attr['height']= abs(.07*((oud[foo]-loopW)/miod+0.5))
#n_.attr['width']= 10*max((ind[int(node)]-loopW)/mind,0.5)
#n_.attr['height']= 10*max((oud[int(node)]-loopW)/moud,0.5)
#print("largura, altura: ", n_.attr['width'],n_.attr['height'])
I=self.rank.index(foo)
#pos="%f,%f"%tuple(self.positions[ifoo]*300+100); ii+=1
pos="%f,%f"%tuple(self.positions[ifoo]); ii+=1
#n_.attr['label']="Q%i"%(ii+1,)
n_.attr["pos"]=pos
n_.attr["pin"]=True
#n_.attr["fontsize"]=1700
n_.attr["fontsize"]=15
n_.attr["fontcolor"]="white"
#n_.attr['label']="%s"%(self.siglas[ifoo],)
if numero_serie%100<20: # 2 slides a cada 10 slides
n_.attr['label']="%s"%(self.siglas[ifoo],)
else:
n_.attr['label']=""
#print(pos)
colors.append('#%02x%02x%02x' % tuple([255*i for i in cm[int(cc[foo]*255)][:-1]]))
"""
"""
edges=A.edges()
pesos=[s[2]["weight"] for s in S.g.edges(data=True)]
self.pesos=pesos
self.pesos_=[]
pesosMax=max(pesos)
self.pesosMax=pesosMax
for e in edges:
factor=float(e.attr['weight'])
self.pesos_.append(factor)
#e.attr['penwidth']=195*factor
e.attr['penwidth']=.2*factor
#e.attr['arrowhead']="diamond"
#e.attr["arrowsize"]=20
e.attr["arrowsize"]=.5
#e.attr['dir']="both"
#e.attr['dir']="forward"
#e.attr['rankdir']="LR"
#e.attr['splines']="curved"
#e.attr['splines']="compound"
#e.attr["fontsize"]=4000
#e.attr['headlabel']="A"
#e.attr['headlabel']=r"."
#e.attr["fontcolor"]="red"
#e.attr["arrowhead"]="both"
#e.attr["arrowhead"]="vee"
#e.attr["arrowhead"]="tee"
#e.attr["arrowhead"]="curve"
e.attr["arrowhead"]="lteeoldiamond"
#e.attr["style"]="solid"
#e.attr["arrowhead"]="diamond"
#e.attr["arrowtail"]="dot"
#e.attr["alpha"]=0.2
w=factor/pesosMax # factor em [0-1]
#cor=p.cm.hot(int(w*255))
cor=p.cm.Reds(int(w*255))
cor=p.cm.Spectral(int(w*255))
self.cor=cor
cor256=255*n.array(cor[:-1])
r0=int(cor256[0]/16)
r1=int(cor256[0]-r0*16)
r=hex(r0)[-1]+hex(r1)[-1]
g0=int(cor256[1]/16)
g1=int(cor256[1]-g0*16)
g=hex(g0)[-1]+hex(g1)[-1]
b0=int(cor256[2]/16)
b1=int(cor256[2]-b0*16)
b=hex(b0)[-1]+hex(b1)[-1]
#corRGB="#"+r+g+b+":#"+r+g+b
corRGB="#"+r+g+b
e.attr["color"]=corRGB
self.ecolors.append(e.attr["color"])
#e.attr["color"]="white"
#e.attr["color"]="#0000ff:#ff0000"
#A.layout(prog="fdp") # fdp ou neato
label="imagem: %i, |g|= %i, |e|= %i"%(numero_serie,A.number_of_nodes(),A.number_of_edges())
A.graph_attr["label"]=label
#A.graph_attr["fontsize"]="1400"
#### Adicionar nodes nas posicoes relativas a cada posicao
rank=1
for pos in self.positions:
A.add_node(rank)
n_=A.get_node(rank)
n_.attr['fixedsize']=True
n_.attr['width']= .05
n_.attr['height']= .05
n_.attr["pos"]="%f,%f"%tuple(pos);
if rank < 41:
n_.attr["pos"]="%f,%f"%(pos[0], -1.2)
else:
n_.attr["pos"]="%f,%f"%(pos[0]+.2, pos[1]+.2)
n_.attr["pin"]=True
n_.attr["fontsize"]=8.700
n_.attr["fontcolor"]="white"
if rank < 41:
if rank%5==0:
n_.attr['label']=str(rank)
else:
n_.attr['label']=""
else:
n_.attr['width']= .03
n_.attr['height']= .02
if rank%20==0:
n_.attr['label']=str(rank)
n_.attr['fontsize']=8
else:
n_.attr['label']=""
rank+=1
# adiciona alargador em x:
#amin=self.positions[:,0].min()
#amax=self.positions[:,0].max()
#ambito=amax-amin
#A.add_node(1000000)
#n_=A.get_node(1000000)
#n_.attr['fixedsize']=True
#n_.attr['width']= .03
#n_.attr['height']= .03
#n_.attr["pos"]="%f,%f"%(amin-.2,-1.1)
#print n_.attr["pos"]
#n_.attr['label']=""
#A.add_node(1000001)
#n_=A.get_node(1000001)
#n_.attr['fixedsize']=True
#n_.attr['width']=.03
#n_.attr['height']=.03
#n_.attr["pos"]="%f,%f"%(amax+ambito*.5,-1.1)
#n_.attr['label']=""
#A.layout(prog="neato") # fdp ou neato
#A.draw('%s' % (nome,), prog="fdp") # twopi ou circo
#A.graph_attr["size"]="15.0,55.0"
#A.graph_attr["longLabel"]=True
#A.graph_attr["color"]="gray90"
A.graph_attr["fontcolor"]="white"
#A.draw('%s' % (nome,)) # twopi ou circo
A.draw('%s' % (nome,), prog="neato") # twopi ou circo
print('scrita figura: %s' % (nome,)) # printando nome
################
# remoção de todos os vertices auxiliares
self.A=A
#A.draw('%s' % (nome,),prog="fdp") # twopi ou circo
def computeNoise(self):
"""Count empty messages, empty addressess, swapped messages in time.."""
pass
#
S=Sistem()
S.draw()
print("escrita figura teste")
#####################
## Roda no ipython
# : run sistema.py
# : S.draw() # cria figura sistema.png
# : run fazRedeInteracao.py # cria g, mm, aa, ids, etc
# : SS=Sistem(g)
# : SS.draw("sistema2.png") # salva no sistema2.png
# : g_=x.DiGraph()
# : SS_=Sistem(g_)
# : SS_.addMsgs([msg1,msg2...])
# : SS_.draw("sistema_.png") # salva no sistema2.png
# : SS_.addMsgs([msg1,msg2...])
# : SS_.rmMsgs([msg1,msg2...])
# : SS_.draw("sistema_2.png") # salva no sistema2.png
#######################################3
######################################3
from dateutil import parser
import mailbox, pytz
utc=pytz.UTC
figs=1
#figs=False
if figs:
import pylab as p
#caminho="/home/rfabbri/repos/FIM/python/cppStdLib/"
#caminho="/home/rfabbri/repos/FIM/python/lau/"
caminho="/home/rfabbri/repos/FIM/python/lad/"
#caminho="/home/rfabbri/repos/FIM/python/metarec/"
mm={} # dicionário com infos necessárias das msgs
ids=[] # ordem dos ids que apareceram
vz=[] # msgs vazias, para verificação
aa={} # dicionario com autores como chaves, valores sao msgs
ids_r={} # dicionario com chaves que sao ids das msgs aas quais sao resposta
for i in xrange(1,5001): # soh 500 msgs
mbox = mailbox.mbox(caminho+str(i))
if mbox.keys(): # se msg nao vazia
m=mbox[0]
au=m['from']
au=au.replace('"','')
au=au.split("<")[-1][:-1]
if " " in au:
au=au.split(" ")[0]
if au not in aa:
aa[au]=[]
date=m['date']
date=date.replace("METDST","MEST")
date=date.replace("MET DST","MEST")
#date=date.replace(" CST"," (CST)")
date=date.replace("(GMT Standard Time)","")
date=date.replace(" CDT"," (CDT)")
date=date.replace(" GMT","")
date=date.replace("(WET DST)","")
date=date.replace("-0600 CST","-0600")
#print date
if "GMT-" in date:
index=date[::-1].index("-")
date=date[:-index-1]+")"
if 'added' in date: date = date.split(" (")[0]
if m['references']:
id_ant=m['references'].split('\t')[-1]
id_ant=id_ant.split(' ')[-1]
else:
id_ant=None
if id_ant not in ids_r.keys():
ids_r[id_ant]=[]
date=parser.parse(date)
try: # colocando localizador em que não tem, para poder comparar
date=utc.localize(date)
except:
pass
ids_r[id_ant].append( (au,m["message-id"],date) )
mm[m["message-id"]]=(au,id_ant,date)
aa[au].append( (m["message-id"], id_ant, date) )
ids.append(m['message-id'])
else:
vz.append(i)
print("criados aa, mm, vz, ids")
ends=aa.keys()
g=x.DiGraph()
resposta_perdida=[] # para os ids das msgs cuja resposta está perdida
respondido_antes=[]
imgi=0
for i in ids:
m=mm[i]
if m[0] in g.nodes():
if "weight" in g.node[m[0]].keys():
g.node[m[0]]["weight"]+=1
else:
g.add_node(m[0],weight=1.)
respondido_antes.append(i)
else:
g.add_node(m[0],weight=1.)
if m[1]:
if m[1] in mm.keys():
m0=mm[m[1]]
if g.has_edge(m0[0],m[0]):
g[m0[0]][m[0]]["weight"]+=1
else:
g.add_edge(m0[0], m[0], weight=1.)
else:
resposta_perdida.append(i)
print("criado digrafo: g com todas as mensagens")
print("obtendo lista de vertices e suas siglas")
d=g.degree()
# Vertices ordenados do maior grau para o menor
sequencia=sorted(d, key=d.get, reverse=True)
siglas=["%s" % (d[s],) for s in sequencia]
Si=Sistem(g)
Si.rank=sequencia
Si.siglas=siglas
Si.positionRanks(True)
#Si.positionRanks()
Si.draw("este.png")
G=x.copy.deepcopy(g)
##############################################
print("iniciando animacao")
gg=x.DiGraph()
SSi=Sistem(gg)
SSi.rank=sequencia # preserva ordem e siglas do geral
SSi.siglas=siglas
SSi.positionRanks(True)
JANELA=100
resposta_perdida=[] # para os ids das msgs cuja resposta está perdida
respondido_antes=[]
imgi=0
j=0
m_passadas=[]
for i in ids:
m=mm[i] ; m_passadas.append(m)
if m[0] in gg.nodes():
if "weight" in gg.node[m[0]].keys():
gg.node[m[0]]["weight"]+=1
else:
gg.add_node(m[0],weight=1.)
respondido_antes.append(i)
else:
gg.add_node(m[0],weight=1.)
if m[1]:
if m[1] in mm.keys():
m0=mm[m[1]]
if gg.has_edge(m0[0],m[0]):
gg[m0[0]][m[0]]["weight"]+=1
else:
gg.add_edge(m0[0], m[0], weight=1.)
else:
resposta_perdida.append(i)
if j>=JANELA:
# deleta msgs antigas
m=m_passadas.pop(0)
if m[0] in gg.nodes():
if "weight" in gg.node[m[0]].keys():
if gg.node[m[0]]["weight"]>1:
gg.node[m[0]]["weight"]-=1.
else:
if gg.degree()[m[0]]>0:
print("deixando vertice permanecer devido aa aresta")
gg.node[m[0]]["weight"]-=1.
else:
print("removendo vertice")
gg.remove_node(m[0])
else:
print("vertice sem peso, iniciando com peso 0. Msg removida: %i Vertice reinicializado: %s"%(j-JANELA,m[0]))
gg.node[m[0]]["weight"]=0.
else:
print(u"vértice não existente quando procurado para diminuicao de peso: %s"%(m[0],))
if m[1]: # se é resposta para alguém
if m[1] in mm.keys():
m0=mm[m[1]] # mensagem original
if gg[m0[0]][m[0]]["weight"]>1:
gg[m0[0]][m[0]]["weight"]-=1
else:
gg.remove_edge(m0[0], m[0])
if gg.degree(m0[0])==0:
gg.remove_node(m0[0])
else:
resposta_perdida.append(i)
print("andando com a janela")
else:
print("formando janela")
j+=1
SSi.update()
SSi.draw("./v1lad/%05d.png"%(imgi,),imgi); imgi+=1
print("criado digrafo: gg mensagens")
| ttm/gmaneToolkit | visualization/sistemaMinimo_5000.py | Python | unlicense | 19,581 |
# rdiff.py: diff against remote repositories
#
# Copyright 2007-10 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
'''diff against remote repositories
When this extension is loaded, if the first argument to the diff command
is a remote repository URL, the diff will be performed against that
URL. If revision arguments are given, the first revision is the revision
in the source repository, and the second revision is looked up in the
destination.
'''
from mercurial.i18n import _
from mercurial import changegroup, cmdutil, scmutil, commands, hg, patch
from mercurial import util
from mercurial import localrepo
import os
#!#*@#$()* continual unfriendly API changes!
def findincomingfn(repo):
try:
from mercurial import discovery
discovery.findcommonincoming
def fi(*args, **opts):
ret = discovery.findcommonincoming(repo, *args, **opts)
#print ret
if type(ret[1]) != bool:
return ret[1]
if not ret[1]:
return []
return ret[2]
return fi
except ImportError:
return repo.findincoming
def getpeer(ui, opts, source):
try:
peer = hg.peer(ui, opts, source)
# ewwww, life without an API is messy
if isinstance(peer, localrepo.localpeer):
peer = localrepo.locallegacypeer(peer._repo)
return peer
except AttributeError:
return hg.repository(ui, source)
def capable(repo, name):
try:
return repo.capable(name)
except AttributeError:
# pre-2.3
return name in repo.capabilities
def rdiff(ui, repo, url, lrev=None, rrev=None, *pats, **opts):
def rui():
try:
return hg.remoteui(repo, opts)
except AttributeError:
# pre 1.6
return cmdutil.remoteui(repo, opts)
try:
other = getpeer(rui(), {}, url)
except AttributeError:
# pre-1.3
other = hg.repository(ui, url)
cmdutil.setremoteconfig(ui, opts)
ui.status(_('comparing with %s\n') % url)
if rrev:
if capable(other, 'lookup'):
rrev = other.lookup(rrev)
else:
error = _("Other repository doesn't support revision lookup, so a rev cannot be specified.")
raise util.Abort(error)
incoming = findincomingfn(repo)(other, heads=rrev and [rrev] or [])
if not incoming:
# remote is a subset of local
if not rrev:
if capable(other, 'lookup'):
rrev = other.lookup('tip')
else:
raise util.Abort(_('cannot determine remote tip'))
other = repo
bundle = None
try:
if incoming:
# create a bundle (uncompressed if other repo is not local)
if not rrev:
cg = other.changegroup(incoming, "incoming")
else:
if not capable(other, 'changegroupsubset'):
raise util.Abort(_("Partial incoming cannot be done because other repository doesn't support changegroupsubset."))
cg = other.changegroupsubset(incoming, rrev and [rrev] or [],
'incoming')
bundle = changegroup.writebundle(cg, '', 'HG10UN')
other = hg.repository(ui, bundle)
if lrev:
lrev = repo.changectx(lrev).node()
rrev = other.changectx(rrev or 'tip').node()
if opts['reverse']:
lrev, rrev = rrev, lrev
if not lrev:
# bundle dirstate removed prior to hg 1.1
lrev = repo.dirstate.parents()[0]
try:
try:
# scmutil.match expects a context not a repo
m = scmutil.match(repo[None], pats, opts)
except (ImportError, AttributeError):
m = cmdutil.match(repo, pats, opts)
chunks = patch.diff(other, lrev, rrev, match=m,
opts=patch.diffopts(ui, opts))
for chunk in chunks:
ui.write(chunk)
except AttributeError:
# 1.0 compatibility
fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
patch.diff(other, lrev, rrev, fns, match=matchfn,
opts=patch.diffopts(ui, opts))
finally:
if hasattr(other, 'close'):
other.close()
if bundle:
os.unlink(bundle)
def diff(orig, ui, repo, *pats, **opts):
"""
[rdiff]
If the first argument to the diff command is a remote repository URL,
the diff will be performed against that URL. If revision arguments are
given, the first revision is the revision in the source repository,
and the second revision is looked up in the destination.
The --reverse flag cause the direction of the diff to be reversed.
"""
url = None
rrev = None
if pats:
path = ui.expandpath(pats[0])
if hasattr(hg, 'parseurl'):
args = hg.parseurl(ui.expandpath(pats[0]), [])
# parseurl changed from returning two args to three
path, rrev = args[0], args[-1]
# 1.6 (3d6915f5a2bb): parseurl returns (url, (branch, branches))
if type(rrev) == tuple:
rrev = rrev[0]
if '://' in path or os.path.isdir(os.path.join(path, '.hg')):
url = path
pats = pats[1:]
if url:
lrev = None
if len(opts['rev']) > 2 or rrev and len(opts['rev']) > 1:
raise util.Abort(_('too many revisions'))
if opts['rev']:
lrev = opts['rev'][0]
if len(opts['rev']) > 1:
rrev = opts['rev'][1]
return rdiff(ui, repo, url, lrev, rrev, *pats, **opts)
else:
return orig(ui, repo, *pats, **opts)
def wrapcommand(table, command, wrapper):
aliases, entry = cmdutil.findcmd(command, table)
for alias, e in table.iteritems():
if e is entry:
key = alias
break
origfn = entry[0]
def wrap(*args, **kwargs):
return wrapper(origfn, *args, **kwargs)
wrap.__doc__ = getattr(origfn, '__doc__')
wrap.__module__ = getattr(origfn, '__module__')
newentry = list(entry)
newentry[0] = wrap
table[key] = tuple(newentry)
return newentry
def uisetup(ui):
rdiffopts = [('', 'reverse', None, _('reverse patch direction'))] + \
commands.remoteopts
odoc = diff.__doc__
entry = wrapcommand(commands.table, 'diff', diff)
entry[0].__doc__ += odoc
entry[1].extend(rdiffopts)
| atweiden/dotfiles | .hgext/hg_rdiff.py | Python | unlicense | 6,711 |
#!/usr/bin/env python
import sys
import os
import json
class ClustJson:
def __init__(self, jsonFile):
fp = open(jsonFile)
self.clusts = {}
self.data = json.load(fp)
self.ipcounts = {}
for cdata in self.data['ip_clust']:
self.ipcounts[cdata['ip']] = (
int(cdata['total_dom_count']), int(cdata['bl_count']))
self.clusts[cdata['ip']] = cdata['cluster_doms']
fp.close()
@property
def inspectDomName(self):
return self.data['dom_name']
@property
def inspectDomRHIP(self):
return self.data['rhip']
def ipCC(self, ip):
if ip in self.data['ip_info']:
return self.data['ip_info'][ip]['cc']
else:
return 'ZZ'
def ipASN(self, ip):
if ip in self.data['ip_info']:
return self.data['ip_info'][ip]['asn']
else:
return 'NA'
def ipBGP(self, ip):
if ip in self.data['ip_info']:
return self.data['ip_info'][ip]['bgp']
else:
return 'NA'
def ipDomCount(self, ip):
if ip in self.ipcounts:
return self.ipcounts[ip][0]
else:
return 0
def ipBLDomCount(self, ip):
if ip in self.ipcounts:
return self.ipcounts[ip][1]
else:
return 0
def ipClusterDoms(self, ip):
if ip in self.clusts:
return self.clusts[ip]
else:
return None
def allClustDoms(self):
return self.data['all_clust']['doms']
def allClustBLCount(self):
return self.data['all_clust']['bl_count']
def allClustDomCount(self):
return self.data['all_clust']['total']
def main():
if len(sys.argv) != 2:
print sys.argv[0], "json"
sys.exit(2)
cj = ClustJson(sys.argv[1])
print "\nInvestigating Dom: {0} on the follwing RHIPs:", cj.inspectDomName
for ip in cj.inspectDomRHIP:
print "\t{0} (CC: {1} - ASN: {2} - BGP: {3}) -- Total Doms: {4} - BlackList: {5}".format(
ip, cj.ipCC(ip), cj.ipASN(ip), cj.ipBGP(ip), cj.ipDomCount(ip), cj.ipBLDomCount(ip))
for ip in cj.inspectDomRHIP:
print '\nIP {0} Cluster ({1} out of {2} - Blacklist: {3}):'.format(ip, len(cj.ipClusterDoms(ip)), cj.ipDomCount(ip), cj.ipBLDomCount(ip))
for dom in cj.ipClusterDoms(ip):
print "\t{0} {1} {2}".format(dom['name'], dom['dist'], "BlackList" if dom['bl'] else "")
print "\nCombined Cluster ({0} out of {1} - Blacklist: {2}):".format(len(cj.allClustDoms()), cj.allClustDomCount(), cj.allClustBLCount())
for dom in cj.allClustDoms():
print "\t{0} {1} {2}".format(dom['name'], dom['dist'], "BlackList" if dom['bl'] else "")
print ""
if __name__ == '__main__':
sys.exit(main())
| kulsoom-abdullah/Draviz | ClustJson.py | Python | unlicense | 2,834 |
#!/usr/bin/env python
from bitmovin.utils import Serializable
class LiveDashManifest(Serializable):
def __init__(self, manifest_id: str, time_shift: float = None, live_edge_offset: float = None):
super().__init__()
self.manifestId = manifest_id
self.timeshift = time_shift
self.liveEdgeOffset = live_edge_offset
| bitmovin/bitmovin-python | bitmovin/resources/models/encodings/live/live_dash_manifest.py | Python | unlicense | 351 |
def number(n, f=None):
return f(n) if f else n
def zero(f=None):
return number(0, f)
def one(f=None):
return number(1, f)
def two(f=None):
return number(2, f)
def three(f=None):
return number(3, f)
def four(f=None):
return number(4, f)
def five(f=None):
return number(5, f)
def six(f=None):
return number(6, f)
def seven(f=None):
return number(7, f)
def eight(f=None):
return number(8, f)
def nine(f=None):
return number(9, f)
def plus(arg2):
return lambda x: x + arg2
def minus(arg2):
return lambda x: x - arg2
def times(arg2):
return lambda x: x * arg2
def divided_by(arg2):
return lambda x: x // arg2 # based on examples, integer division | SelvorWhim/competitive | Codewars/CalculatingWithFunctions.py | Python | unlicense | 703 |