query
stringlengths 1
46.9k
| pos
stringlengths 75
104k
| neg
listlengths 12
12
| scores
listlengths 12
12
|
|---|---|---|---|
Function enhance
Enhance the object with new item or enhanced items
|
def enhance(self):
""" Function enhance
Enhance the object with new item or enhanced items
"""
self.update({'parameters':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemParameter)})
self.update({'interfaces':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemInterface)})
self.update({'subnets':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemSubnet)})
|
[
"def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'os_default_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOsDefaultTemplate)})\n self.update({'config_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemConfigTemplate)})\n self.update({'ptables':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPTable)})\n self.update({'media':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemMedia)})\n self.update({'architectures':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemArchitecture)})",
"def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'images':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemImages)})",
"def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'os_default_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOsDefaultTemplate)})\n self.update({'operatingsystems':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOperatingSystem)})",
"def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'puppetclasses':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPuppetClasses)})\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'interfaces':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemInterface)})\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemSmartClassParameter)})",
"def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'puppetclasses':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPuppetClasses)})\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ItemSmartClassParameter)})",
"def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n if self.objName in ['hosts', 'hostgroups',\n 'puppet_classes']:\n from foreman.itemSmartClassParameter\\\n import ItemSmartClassParameter\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ItemSmartClassParameter)})",
"public T enhance(T t) {\n if (!needsEnhancement(t)) {\n return t;\n }\n\n try {\n return getEnhancedClass().getConstructor(baseClass).newInstance(t);\n } catch (Exception e) {\n throw new RuntimeException(String.format(\"Could not enhance object %s (%s)\", t, t.getClass()), e);\n }\n }",
"private static void doEnhancement(CtClass cc, Version modelVersion) throws CannotCompileException,\n NotFoundException, ClassNotFoundException {\n CtClass inter = cp.get(OpenEngSBModel.class.getName());\n cc.addInterface(inter);\n addFields(cc);\n addGetOpenEngSBModelTail(cc);\n addSetOpenEngSBModelTail(cc);\n addRetrieveModelName(cc);\n addRetrieveModelVersion(cc, modelVersion);\n addOpenEngSBModelEntryMethod(cc);\n addRemoveOpenEngSBModelEntryMethod(cc);\n addRetrieveInternalModelId(cc);\n addRetrieveInternalModelTimestamp(cc);\n addRetrieveInternalModelVersion(cc);\n addToOpenEngSBModelValues(cc);\n addToOpenEngSBModelEntries(cc);\n cc.setModifiers(cc.getModifiers() & ~Modifier.ABSTRACT);\n }",
"def enhance(self, inverse=False, gamma=1.0, stretch=\"no\",\n stretch_parameters=None, **kwargs):\n \"\"\"Image enhancement function. It applies **in this order** inversion,\n gamma correction, and stretching to the current image, with parameters\n *inverse* (see :meth:`Image.invert`), *gamma* (see\n :meth:`Image.gamma`), and *stretch* (see :meth:`Image.stretch`).\n \"\"\"\n self.invert(inverse)\n if stretch_parameters is None:\n stretch_parameters = {}\n\n stretch_parameters.update(kwargs)\n self.stretch(stretch, **stretch_parameters)\n self.gamma(gamma)",
"def load(self, data):\n \"\"\" Function load\n Store the object data\n \"\"\"\n self.clear()\n self.update(data)\n self.enhance()",
"def enhance2dataset(dset):\n \"\"\"Apply enhancements to dataset *dset* and return the resulting data\n array of the image.\"\"\"\n attrs = dset.attrs\n img = get_enhanced_image(dset)\n # Clip image data to interval [0.0, 1.0]\n data = img.data.clip(0.0, 1.0)\n data.attrs = attrs\n\n return data",
"function enhancedEcommerceProductAction(track, action, data) {\n enhancedEcommerceTrackProduct(track);\n window.ga('ec:setAction', action, data || {});\n}"
] |
[
0.8752948045730591,
0.8696154356002808,
0.8695272207260132,
0.8679497838020325,
0.8630505800247192,
0.8407313823699951,
0.7447202205657959,
0.7079142928123474,
0.707258939743042,
0.6839107275009155,
0.6810131072998047,
0.6799886226654053
] |
Update the kong configuration
|
def kong(ctx, namespace, yes):
"""Update the kong configuration
"""
m = KongManager(ctx.obj['agile'], namespace=namespace)
click.echo(utils.niceJson(m.create_kong(yes)))
|
[
"def kong_requests\n kong_config = Octo.get_config :kong\n if kong_config[:enabled]\n url = '/consumers/'\n payload = {\n username: self.username,\n custom_id: self.enterprise_id\n }\n\n process_kong_request(url, :PUT, payload)\n create_keyauth( self.username, self.apikey)\n end\n end",
"def do_upgrade():\n \"\"\"Carry out the upgrade.\"\"\"\n op.alter_column(\n table_name='knwKBRVAL',\n column_name='id_knwKB',\n type_=db.MediumInteger(8, unsigned=True),\n existing_nullable=False,\n existing_server_default='0'\n )",
"def kitchen_config(backend, kitchen, add, get, unset, listall):\n \"\"\"\n Get and Set Kitchen variable overrides\n \"\"\"\n err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)\n if use_kitchen is None:\n raise click.ClickException(err_str)\n check_and_print(DKCloudCommandRunner.config_kitchen(backend.dki, use_kitchen, add, get, unset, listall))",
"static void updateConfiguration(DbConn cnx)\n {\n // Default queue\n Queue q = null;\n long i = cnx.runSelectSingle(\"q_select_count_all\", Integer.class);\n if (i == 0L)\n {\n Queue.create(cnx, \"DEFAULT\", \"default queue\", true);\n jqmlogger.info(\"A default queue was created in the configuration\");\n }\n else\n {\n try\n {\n jqmlogger.info(\"Default queue is named \" + cnx.runSelectSingle(\"q_select_default\", 4, String.class));\n }\n catch (NonUniqueResultException e)\n {\n // Faulty configuration, but why not\n q = Queue.select(cnx, \"q_select_all\").get(0);\n cnx.runUpdate(\"q_update_default_none\");\n cnx.runUpdate(\"q_update_default_by_id\", q.getId());\n jqmlogger.info(\"Queue \" + q.getName() + \" was modified to become the default queue as there were multiple default queues\");\n }\n catch (NoResultException e)\n {\n // Faulty configuration, but why not\n q = Queue.select(cnx, \"q_select_all\").get(0);\n cnx.runUpdate(\"q_update_default_none\");\n cnx.runUpdate(\"q_update_default_by_id\", q.getId());\n jqmlogger.info(\"Queue \" + q.getName() + \" was modified to become the default queue as there were multiple default queues\");\n }\n }\n\n // Global parameters\n initSingleParam(\"mavenRepo\", \"http://repo1.maven.org/maven2/\", cnx);\n initSingleParam(Constants.GP_DEFAULT_CONNECTION_KEY, Constants.GP_JQM_CONNECTION_ALIAS, cnx);\n initSingleParam(\"logFilePerLaunch\", \"true\", cnx);\n initSingleParam(\"internalPollingPeriodMs\", \"60000\", cnx);\n initSingleParam(\"disableWsApi\", \"false\", cnx);\n initSingleParam(\"enableWsApiSsl\", \"false\", cnx);\n initSingleParam(\"enableWsApiAuth\", \"true\", cnx);\n initSingleParam(\"enableInternalPki\", \"true\", cnx);\n\n // Roles\n RRole adminr = createRoleIfMissing(cnx, \"administrator\", \"all permissions without exception\", \"*:*\");\n createRoleIfMissing(cnx, \"config admin\", \"can read and write all configuration, except security configuration\", \"node:*\", \"queue:*\",\n \"qmapping:*\", \"jndi:*\", \"prm:*\", \"jd:*\");\n createRoleIfMissing(cnx, \"config viewer\", \"can read all configuration except for security configuration\", \"node:read\", \"queue:read\",\n \"qmapping:read\", \"jndi:read\", \"prm:read\", \"jd:read\");\n createRoleIfMissing(cnx, \"client\", \"can use the full client API except reading logs, files and altering position\", \"node:read\",\n \"queue:read\", \"job_instance:*\", \"jd:read\");\n createRoleIfMissing(cnx, \"client power user\", \"can use the full client API\", \"node:read\", \"queue:read\", \"job_instance:*\", \"jd:read\",\n \"logs:read\", \"queue_position:create\", \"files:read\");\n createRoleIfMissing(cnx, \"client read only\", \"can query job instances and get their files\", \"queue:read\", \"job_instance:read\",\n \"logs:read\", \"files:read\");\n\n // Users\n createUserIfMissing(cnx, \"root\", new SecureRandomNumberGenerator().nextBytes().toHex(), \"all powerful user\", adminr.getName());\n\n // Mail session\n i = cnx.runSelectSingle(\"jndi_select_count_for_key\", Integer.class, \"mail/default\");\n if (i == 0)\n {\n Map<String, String> prms = new HashMap<>();\n prms.put(\"smtpServerHost\", \"smtp.gmail.com\");\n\n JndiObjectResource.create(cnx, \"mail/default\", \"javax.mail.Session\", \"com.enioka.jqm.providers.MailSessionFactory\",\n \"default parameters used to send e-mails\", true, prms);\n }\n\n // Done\n cnx.commit();\n }",
"def config(config_dict: typing.Mapping) -> Config:\n \"\"\"Configures the konch shell. This function should be called in a\n .konchrc file.\n\n :param dict config_dict: Dict that may contain 'context', 'banner', and/or\n 'shell' (default shell class to use).\n \"\"\"\n logger.debug(f\"Updating with {config_dict}\")\n _cfg.update(config_dict)\n return _cfg",
"def update_conf(self):\n \"\"\"Update configuration values from database.\n\n This method should be called when there is an update notification.\n \"\"\"\n parsed = self.parse_conf()\n\n if not parsed:\n return None\n\n # Update app config\n self.app.config.update(parsed)",
"def config(self, kw=None, **kwargs):\n \"\"\"configure redirect to support additional options\"\"\"\n themebg = kwargs.pop(\"themebg\", self._themebg)\n toplevel = kwargs.pop(\"toplevel\", self._toplevel)\n theme = kwargs.pop(\"theme\", self.current_theme)\n color = self._get_bg_color()\n if themebg != self._themebg:\n if themebg is False:\n self.configure(bg=\"white\")\n else:\n self.configure(bg=color)\n self._themebg = themebg\n if toplevel != self._toplevel:\n if toplevel is True:\n self._setup_toplevel_hook(color)\n else:\n tk.Toplevel.__init__ = self.__init__toplevel\n self._toplevel = toplevel\n if theme != self.current_theme:\n self.set_theme(theme)\n return tk.Tk.config(self, kw, **kwargs)",
"def apply_config(self, config):\n \"\"\"\n Takes the given config dictionary and sets the hosts and base_path\n attributes.\n\n If the kazoo client connection is established, its hosts list is\n updated to the newly configured value.\n \"\"\"\n self.hosts = config[\"hosts\"]\n old_base_path = self.base_path\n self.base_path = config[\"path\"]\n if not self.connected.is_set():\n return\n\n logger.debug(\"Setting ZK hosts to %s\", self.hosts)\n self.client.set_hosts(\",\".join(self.hosts))\n\n if old_base_path and old_base_path != self.base_path:\n logger.critical(\n \"ZNode base path changed!\" +\n \" Lighthouse will need to be restarted\" +\n \" to watch the right znodes\"\n )",
"def update(self):\n \"\"\" updates the configuration settings \"\"\"\n with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file:\n self.config.write(config_file)",
"def _update(self, kwargs):\n \"\"\"Update the config with the given dictionary\"\"\"\n from pygal.util import merge\n dir_self_set = set(dir(self))\n merge(\n self.__dict__,\n dict([(k, v) for (k, v) in kwargs.items()\n if not k.startswith('_') and k in dir_self_set])\n )",
"def update(self, conf_dict):\n '''\n Updates this configuration with a dictionary.\n\n :param conf_dict: A python dictionary to update this configuration\n with.\n '''\n\n if isinstance(conf_dict, dict):\n iterator = six.iteritems(conf_dict)\n else:\n iterator = iter(conf_dict)\n\n for k, v in iterator:\n if not IDENTIFIER.match(k):\n raise ValueError('\\'%s\\' is not a valid indentifier' % k)\n\n cur_val = self.__values__.get(k)\n\n if isinstance(cur_val, Config):\n cur_val.update(conf_dict[k])\n else:\n self[k] = conf_dict[k]",
"def update_ckan_ini(self, skin=True):\n \"\"\"\n Use config-tool to update development.ini with our environment settings\n\n :param skin: use environment template skin plugin True/False\n \"\"\"\n command = [\n '/usr/lib/ckan/bin/paster', '--plugin=ckan', 'config-tool',\n '/project/development.ini', '-e',\n 'sqlalchemy.url = postgresql://<hidden>',\n 'ckan.datastore.read_url = postgresql://<hidden>',\n 'ckan.datastore.write_url = postgresql://<hidden>',\n 'ckan.datapusher.url = http://datapusher:8800',\n 'solr_url = http://solr:8080/solr',\n 'ckan.storage_path = /var/www/storage',\n 'ckan.plugins = datastore resource_proxy text_view ' +\n ('datapusher ' if exists(self.target + '/datapusher') else '')\n + 'recline_grid_view recline_graph_view'\n + (' {0}_theme'.format(self.name) if skin else ''),\n 'ckan.site_title = ' + self.name,\n 'ckan.site_logo =',\n 'ckan.auth.create_user_via_web = false',\n ]\n self.run_command(command=command, rw_project=True)"
] |
[
0.7508227229118347,
0.6796973943710327,
0.6773249506950378,
0.6770706176757812,
0.6703119874000549,
0.6700146794319153,
0.6699696779251099,
0.6675214767456055,
0.6635774374008179,
0.6632673740386963,
0.662484884262085,
0.6613070964813232
] |
Schedules this publish action as a Celery task.
|
def schedule_task(self):
"""
Schedules this publish action as a Celery task.
"""
from .tasks import publish_task
publish_task.apply_async(kwargs={'pk': self.pk}, eta=self.scheduled_time)
|
[
"def schedule(self, when=None, action=None, **kwargs):\n \"\"\"\n Schedule this item to be published.\n\n :param when: Date/time when this item should go live. None means now.\n \"\"\"\n action = '_publish'\n super(BaseVersionedModel, self).schedule(when=when, action=action,\n **kwargs)",
"def push_actions(self, actions, scheduler_instance_id):\n \"\"\"Post the actions to execute to the satellite.\n Indeed, a scheduler post its checks to a poller and its actions to a reactionner.\n\n :param actions: Action list to send\n :type actions: list\n :param scheduler_instance_id: Scheduler instance identifier\n :type scheduler_instance_id: uuid\n :return: True on success, False on failure\n :rtype: bool\n \"\"\"\n logger.debug(\"Pushing %d actions from %s\", len(actions), scheduler_instance_id)\n return self.con.post('_push_actions', {'actions': actions,\n 'scheduler_instance_id': scheduler_instance_id},\n wait=True)",
"def schedule():\n \"\"\"HTTP endpoint for scheduling tasks\n\n If a task with the same code already exists, the one with the shorter\n interval will be made active.\n \"\"\"\n code = request.form['code']\n interval = int(request.form['interval'])\n\n task_id = binascii.b2a_hex(os.urandom(5))\n new_task = Task(id=task_id)\n new_task.active = True\n new_task.code = code\n new_task.interval = interval\n\n # TODO(derek): Assert there is only one other_task\n other_task = Task.query.filter_by(code=code, active=True).first()\n\n if other_task:\n if other_task.interval <= new_task.interval:\n new_task.active = False\n else:\n other_task.active = False\n other_task.save()\n current_app.scheduler.cancel(other_task.id)\n\n if new_task.active:\n print current_app.scheduler.schedule\n current_app.scheduler.schedule({\n 'id': task_id,\n 'code': new_task.code,\n 'interval': new_task.interval\n })\n\n new_task.save()\n\n return json.dumps({\n 'status': 'success',\n 'id': task_id,\n })",
"def schedule(self, when=None, action=None, **kwargs):\n \"\"\"\n Schedule an update of this object.\n\n when: The date for the update.\n\n action: if provided it will be looked up\n on the implementing class and called with\n **kwargs. If action is not provided each k/v pair\n in kwargs will be set on self and then self\n is saved.\n\n kwargs: any other arguments you would like passed\n for this change. Saved as a json object so must cleanly\n serialize.\n \"\"\"\n\n # when is empty or passed, just save it now.\n if not when or when <= timezone.now():\n self.do_scheduled_update(action, **kwargs)\n else:\n ctype = ContentType.objects.get_for_model(self.__class__)\n Schedule(\n content_type=ctype,\n object_args=self.get_scheduled_filter_args(),\n when=when,\n action=action,\n json_args=kwargs\n ).save()",
"def publish(self):\n \"\"\"\n Iterate over the scheduler collections and apply any actions found\n \"\"\"\n\n try:\n for collection in self.settings.get(\"scheduler\").get(\"collections\"):\n yield self.publish_for_collection(collection)\n except Exception as ex:\n self.logger.error(ex)",
"def schedule_saved(sender, instance, **kwargs):\n \"\"\"\n Fires off the celery task to ensure that this schedule is in the scheduler\n\n Arguments:\n sender {class} -- The model class, always Schedule\n instance {Schedule} --\n The instance of the Schedule that we want to sync\n \"\"\"\n from contentstore.tasks import sync_schedule\n\n sync_schedule.delay(str(instance.id))",
"def scheduler(broker=None):\n \"\"\"\n Creates a task from a schedule at the scheduled time and schedules next run\n \"\"\"\n if not broker:\n broker = get_broker()\n db.close_old_connections()\n try:\n for s in Schedule.objects.exclude(repeats=0).filter(next_run__lt=timezone.now()):\n args = ()\n kwargs = {}\n # get args, kwargs and hook\n if s.kwargs:\n try:\n # eval should be safe here because dict()\n kwargs = eval('dict({})'.format(s.kwargs))\n except SyntaxError:\n kwargs = {}\n if s.args:\n args = ast.literal_eval(s.args)\n # single value won't eval to tuple, so:\n if type(args) != tuple:\n args = (args,)\n q_options = kwargs.get('q_options', {})\n if s.hook:\n q_options['hook'] = s.hook\n # set up the next run time\n if not s.schedule_type == s.ONCE:\n next_run = arrow.get(s.next_run)\n while True:\n if s.schedule_type == s.MINUTES:\n next_run = next_run.replace(minutes=+(s.minutes or 1))\n elif s.schedule_type == s.HOURLY:\n next_run = next_run.replace(hours=+1)\n elif s.schedule_type == s.DAILY:\n next_run = next_run.replace(days=+1)\n elif s.schedule_type == s.WEEKLY:\n next_run = next_run.replace(weeks=+1)\n elif s.schedule_type == s.MONTHLY:\n next_run = next_run.replace(months=+1)\n elif s.schedule_type == s.QUARTERLY:\n next_run = next_run.replace(months=+3)\n elif s.schedule_type == s.YEARLY:\n next_run = next_run.replace(years=+1)\n if Conf.CATCH_UP or next_run > arrow.utcnow():\n break\n s.next_run = next_run.datetime\n s.repeats += -1\n # send it to the cluster\n q_options['broker'] = broker\n q_options['group'] = q_options.get('group', s.name or s.id)\n kwargs['q_options'] = q_options\n s.task = django_q.tasks.async_task(s.func, *args, **kwargs)\n # log it\n if not s.task:\n logger.error(\n _('{} failed to create a task from schedule [{}]').format(current_process().name,\n s.name or s.id))\n else:\n logger.info(\n _('{} created a task from schedule [{}]').format(current_process().name, s.name or s.id))\n # default behavior is to delete a ONCE schedule\n if s.schedule_type == s.ONCE:\n if s.repeats < 0:\n s.delete()\n continue\n # but not if it has a positive repeats\n s.repeats = 0\n # save the schedule\n s.save()\n except Exception as e:\n logger.error(e)",
"async def run(self, kwargs=None):\n \"\"\"\n Wraps the action in a :class:`asyncio.Task` and schedules its\n execution.\n\n *kwargs* is an (optional) dictionnary of additional arguments to pass\n to the Action function.\n \"\"\"\n task = self._prepare(kwargs)\n\n try:\n await task\n except Exception as e:\n # FIXME: write a better Exception handler.\n raise e",
"public function scheduleNowAction()\n {\n $codes = $this->getMassActionCodes();\n foreach ($codes as $key) {\n Mage::getModel('cron/schedule')\n ->setJobCode($key)\n ->setScheduledReason(Aoe_Scheduler_Model_Schedule::REASON_SCHEDULENOW_WEB)\n ->schedule()\n ->save();\n\n $this->_getSession()->addSuccess($this->__('Job \"%s\" has been scheduled.', $key));\n }\n $this->_redirect('*/*/index');\n }",
"def publish(\n self,\n action='get',\n resource=None,\n camera_id=None,\n mode=None,\n publish_response=None,\n properties=None):\n \"\"\"Run action.\n\n :param method: Specify the method GET, POST or PUT. Default is GET.\n :param resource: Specify one of the resources to fetch from arlo.\n :param camera_id: Specify the camera ID involved with this action\n :param mode: Specify the mode to set, else None for GET operations\n :param publish_response: Set to True for SETs. Default False\n \"\"\"\n url = NOTIFY_ENDPOINT.format(self.device_id)\n\n body = ACTION_BODY.copy()\n\n if properties is None:\n properties = {}\n\n if resource:\n body['resource'] = resource\n\n if action == 'get':\n body['properties'] = None\n else:\n # consider moving this logic up a layer\n if resource == 'schedule':\n properties.update({'active': True})\n elif resource == 'subscribe':\n body['resource'] = \"subscriptions/\" + \\\n \"{0}_web\".format(self.user_id)\n dev = []\n dev.append(self.device_id)\n properties.update({'devices': dev})\n elif resource == 'modes':\n available_modes = self.available_modes_with_ids\n properties.update({'active': available_modes.get(mode)})\n elif resource == 'privacy':\n properties.update({'privacyActive': not mode})\n body['resource'] = \"cameras/{0}\".format(camera_id)\n\n body['action'] = action\n body['properties'] = properties\n body['publishResponse'] = publish_response\n\n body['from'] = \"{0}_web\".format(self.user_id)\n body['to'] = self.device_id\n body['transId'] = \"web!{0}\".format(self.xcloud_id)\n\n _LOGGER.debug(\"Action body: %s\", body)\n\n ret = \\\n self._session.query(url, method='POST', extra_params=body,\n extra_headers={\"xCloudId\": self.xcloud_id})\n\n if ret and ret.get('success'):\n return 'success'\n\n return None",
"def schedule(code, interval, secret_key=None, url=None):\n \"\"\"Schedule a string of `code` to be executed every `interval`\n\n Specificying an `interval` of 0 indicates the event should only be run\n one time and will not be rescheduled.\n \"\"\"\n if not secret_key:\n secret_key = default_key()\n if not url:\n url = default_url()\n\n url = '%s/schedule' % url\n values = {\n 'interval': interval,\n 'code': code,\n }\n return _send_with_auth(values, secret_key, url)",
"def run(self, **kwargs):\n \"\"\"\n Runs an instance of a scheduled task\n \"\"\"\n log = self.get_logger(**kwargs)\n failures = ScheduleFailure.objects\n log.info(\"Attempting to requeue <%s> failed schedules\" % failures.count())\n for failure in failures.iterator():\n schedule = Schedule.objects.values(\n \"id\", \"auth_token\", \"endpoint\", \"payload\"\n )\n schedule = schedule.get(id=failure.schedule_id)\n schedule[\"schedule_id\"] = str(schedule.pop(\"id\"))\n # Cleanup the failure before requeueing it.\n failure.delete()\n DeliverTask.apply_async(kwargs=schedule)"
] |
[
0.78624427318573,
0.7138023376464844,
0.7120329737663269,
0.7091947197914124,
0.7091774940490723,
0.7052851915359497,
0.7049896121025085,
0.6995416283607483,
0.6987134218215942,
0.6980477571487427,
0.6970206499099731,
0.6968892216682434
] |
Get the version object for the related object.
|
def get_version(self):
"""
Get the version object for the related object.
"""
return Version.objects.get(
content_type=self.content_type,
object_id=self.object_id,
version_number=self.publish_version,
)
|
[
"def previous_version(self, object, relations_as_of='end'):\n \"\"\"\n Return the previous version of the given object.\n\n In case there is no previous object existing, meaning the given object\n is the first version of the object, then the function returns this\n version.\n\n ``relations_as_of`` is used to fix the point in time for the version;\n this affects which related objects are returned when querying for\n object relations. See ``VersionManager.version_as_of`` for details on\n valid ``relations_as_of`` values.\n\n :param Versionable object: object whose previous version will be\n returned.\n :param mixed relations_as_of: determines point in time used to access\n relations. 'start'|'end'|datetime|None\n :return: Versionable\n \"\"\"\n if object.version_birth_date == object.version_start_date:\n previous = object\n else:\n previous = self.filter(\n Q(identity=object.identity),\n Q(version_end_date__lte=object.version_start_date)\n ).order_by('-version_end_date').first()\n\n if not previous:\n raise ObjectDoesNotExist(\n \"previous_version couldn't find a previous version of \"\n \"object \" + str(object.identity))\n\n return self.adjust_version_as_of(previous, relations_as_of)",
"def get_version(self, version_id=None):\n \"\"\"Return specific version ``ObjectVersion`` instance or HEAD.\n\n :param version_id: Version ID of the object.\n :returns: :class:`~invenio_files_rest.models.ObjectVersion` instance or\n HEAD of the stored object.\n \"\"\"\n return ObjectVersion.get(bucket=self.obj.bucket, key=self.obj.key,\n version_id=version_id)",
"def retrieve_version(self, obj, version):\n \"\"\"Retrieve the version of the object\n \"\"\"\n current_version = getattr(obj, VERSION_ID, None)\n\n if current_version is None:\n # No initial version\n return obj\n\n if str(current_version) == str(version):\n # Same version\n return obj\n\n # Retrieve the object from the repository\n pr = api.get_tool(\"portal_repository\")\n # bypass permission check to AccessPreviousVersions\n result = pr._retrieve(\n obj, selector=version, preserve=(), countPurged=True)\n return result.object",
"def next_version(self, object, relations_as_of='end'):\n \"\"\"\n Return the next version of the given object.\n\n In case there is no next object existing, meaning the given\n object is the current version, the function returns this version.\n\n Note that if object's version_end_date is None, this does not check\n the database to see if there is a newer version (perhaps created by\n some other code), it simply returns the passed object.\n\n ``relations_as_of`` is used to fix the point in time for the version;\n this affects which related objects are returned when querying for\n object relations. See ``VersionManager.version_as_of`` for details\n on valid ``relations_as_of`` values.\n\n :param Versionable object: object whose next version will be returned.\n :param mixed relations_as_of: determines point in time used to access\n relations. 'start'|'end'|datetime|None\n :return: Versionable\n \"\"\"\n if object.version_end_date is None:\n next = object\n else:\n next = self.filter(\n Q(identity=object.identity),\n Q(version_start_date__gte=object.version_end_date)\n ).order_by('version_start_date').first()\n\n if not next:\n raise ObjectDoesNotExist(\n \"next_version couldn't find a next version of object \" +\n str(object.identity))\n\n return self.adjust_version_as_of(next, relations_as_of)",
"def current_version(self, object, relations_as_of=None, check_db=False):\n \"\"\"\n Return the current version of the given object.\n\n The current version is the one having its version_end_date set to NULL.\n If there is not such a version then it means the object has been\n 'deleted' and so there is no current version available. In this case\n the function returns None.\n\n Note that if check_db is False and object's version_end_date is None,\n this does not check the database to see if there is a newer version\n (perhaps created by some other code), it simply returns the passed\n object.\n\n ``relations_as_of`` is used to fix the point in time for the version;\n this affects which related objects are returned when querying for\n object relations. See ``VersionManager.version_as_of`` for details on\n valid ``relations_as_of`` values.\n\n :param Versionable object: object whose current version will be\n returned.\n :param mixed relations_as_of: determines point in time used to access\n relations. 'start'|'end'|datetime|None\n :param bool check_db: Whether or not to look in the database for a\n more recent version\n :return: Versionable\n \"\"\"\n if object.version_end_date is None and not check_db:\n current = object\n else:\n current = self.current.filter(identity=object.identity).first()\n\n return self.adjust_version_as_of(current, relations_as_of)",
"def get(cls, object_version, key):\n \"\"\"Get the tag object.\"\"\"\n return cls.query.filter_by(\n version_id=as_object_version_id(object_version),\n key=key,\n ).one_or_none()",
"def related_objects(self, related, objs):\n \"\"\"\n Gets a QuerySet of current objects related to ``objs`` via the\n relation ``related``.\n \"\"\"\n from versions.models import Versionable\n\n related_model = related.related_model\n if issubclass(related_model, Versionable):\n qs = related_model.objects.current\n else:\n qs = related_model._base_manager.all()\n return qs.using(self.using).filter(\n **{\"%s__in\" % related.field.name: objs}\n )",
"def get_version(brain_or_object):\n \"\"\"Get the version of the current object\n\n :param brain_or_object: A single catalog brain or content object\n :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain\n :returns: The current version of the object, or None if not available\n :rtype: int or None\n \"\"\"\n obj = get_object(brain_or_object)\n if not is_versionable(obj):\n return None\n return getattr(aq_base(obj), \"version_id\", 0)",
"def get_object(cls, bucket, key, version_id):\n \"\"\"Retrieve object and abort if it doesn't exists.\n\n If the file is not found, the connection is aborted and the 404\n error is returned.\n\n :param bucket: The bucket (instance or id) to get the object from.\n :param key: The file key.\n :param version_id: The version ID.\n :returns: A :class:`invenio_files_rest.models.ObjectVersion` instance.\n \"\"\"\n obj = ObjectVersion.get(bucket, key, version_id=version_id)\n if not obj:\n abort(404, 'Object does not exists.')\n\n cls.check_object_permission(obj)\n\n return obj",
"def get_object(self, id, **args):\n \"\"\"Fetches the given object from the graph.\"\"\"\n return self.request(\"{0}/{1}\".format(self.version, id), args)",
"def get_version(self, version_id, expand=[]):\n \"\"\"\n Get a specific version of this layer\n \"\"\"\n target_url = self._client.get_url('VERSION', 'GET', 'single', {'layer_id': self.id, 'version_id': version_id})\n return self._manager._get(target_url, expand=expand)",
"def as_object_version(value):\n \"\"\"Get an object version object from an object version ID or an object version.\n\n :param value: A :class:`invenio_files_rest.models.ObjectVersion` or an\n object version ID.\n :returns: A :class:`invenio_files_rest.models.ObjectVersion` instance.\n \"\"\"\n return value if isinstance(value, ObjectVersion) \\\n else ObjectVersion.query.filter_by(version_id=value).one_or_none()"
] |
[
0.76417475938797,
0.7552536129951477,
0.7453399896621704,
0.7332892417907715,
0.7306791543960571,
0.7263893485069275,
0.7225692272186279,
0.7194266319274902,
0.718664824962616,
0.716795802116394,
0.7164691090583801,
0.7163046598434448
] |
Process a publish action on the related object, returns a boolean if a change is made.
Only objects where a version change is needed will be updated.
|
def _publish(self):
"""
Process a publish action on the related object, returns a boolean if a change is made.
Only objects where a version change is needed will be updated.
"""
obj = self.content_object
version = self.get_version()
actioned = False
# Only update if needed
if obj.current_version != version:
version = self.get_version()
obj.current_version = version
obj.save(update_fields=['current_version'])
actioned = True
return actioned
|
[
"def process_action(self):\n \"\"\"\n Process the action and update the related object, returns a boolean if a change is made.\n \"\"\"\n if self.publish_version == self.UNPUBLISH_CHOICE:\n actioned = self._unpublish()\n else:\n actioned = self._publish()\n\n # Only log if an action was actually taken\n if actioned:\n self._log_action()\n\n return actioned",
"def _unpublish(self):\n \"\"\"\n Process an unpublish action on the related object, returns a boolean if a change is made.\n\n Only objects with a current active version will be updated.\n \"\"\"\n obj = self.content_object\n actioned = False\n\n # Only update if needed\n if obj.current_version is not None:\n obj.current_version = None\n obj.save(update_fields=['current_version'])\n actioned = True\n\n return actioned",
"def publish(self, ar):\n \"\"\"Set status to prepublished/published/republished\n \"\"\"\n wf = api.get_tool(\"portal_workflow\")\n status = wf.getInfoFor(ar, \"review_state\")\n transitions = {\"verified\": \"publish\",\n \"published\": \"republish\"}\n transition = transitions.get(status, \"prepublish\")\n logger.info(\"AR Transition: {} -> {}\".format(status, transition))\n try:\n wf.doActionFor(ar, transition)\n return True\n except WorkflowException as e:\n logger.debug(e)\n return False",
"def process_action(self, request, queryset):\n \"\"\"\n Publishes the selected objects by passing the value of \\\n 'when' to the object's publish method. The object's \\\n `purge_archives` method is also called to limit the number \\\n of old items that we keep around. The action is logged as \\\n either 'published' or 'scheduled' depending on the value of \\\n 'when', and the user is notified with a message.\n\n Returns a 'render redirect' to the result of the \\\n `get_done_url` method.\n \"\"\"\n form = self.form(request.POST)\n if form.is_valid():\n when = form.cleaned_data.get('when')\n count = 0\n for obj in queryset:\n count += 1\n obj.publish(user=request.user, when=when)\n obj.purge_archives()\n object_url = self.get_object_url(obj)\n if obj.state == obj.PUBLISHED:\n self.log_action(\n obj, CMSLog.PUBLISH, url=object_url)\n else:\n self.log_action(\n obj, CMSLog.SCHEDULE, url=object_url)\n message = \"%s objects published.\" % count\n self.write_message(message=message)\n\n return self.render(request, redirect_url= self.get_done_url(),\n message=message,\n collect_render_data=False)\n return self.render(request, queryset=queryset, publish_form=form, action='Publish')",
"public function onPublish($objectAttribute, $object, $publishedNodes)\n {\n $currentVersion = $object->currentVersion();\n\n // We find all translations present in the current version. We calculate\n // this from the language mask already present in the fetched version,\n // so no further round-trip to the DB is required.\n $languageList = eZContentLanguage::decodeLanguageMask(\n $currentVersion->attribute('language_mask'),\n true\n );\n\n // We want to have the class attribute identifier of the attribute\n // containing the current ezrichtext, as we then can use the more efficient\n // eZContentObject->fetchAttributesByIdentifier() to get the data\n $identifier = $objectAttribute->attribute('contentclass_attribute_identifier');\n\n $attributes = $object->fetchAttributesByIdentifier(\n array($identifier),\n $currentVersion->attribute('version'),\n $languageList['language_list']\n );\n\n foreach ($attributes as $attribute) {\n $relations = $this->fieldType->getRelations($attribute->content());\n\n $linkedObjectIds = array_merge(\n $relations[Relation::LINK]['contentIds'],\n $this->getObjectIdsForNodeIds($relations[Relation::LINK]['locationIds'])\n );\n\n $embeddedObjectIds = array_merge(\n $relations[Relation::EMBED]['contentIds'],\n $this->getObjectIdsForNodeIds($relations[Relation::EMBED]['locationIds'])\n );\n\n if (!empty($linkedObjectIds)) {\n $object->appendInputRelationList(\n array_unique($linkedObjectIds),\n eZContentObject::RELATION_LINK\n );\n }\n\n if (!empty($embeddedObjectIds)) {\n $object->appendInputRelationList(\n array_unique($embeddedObjectIds),\n eZContentObject::RELATION_EMBED\n );\n }\n\n if (!empty($linkedObjectIds) || !empty($embeddedObjectIds)) {\n $object->commitInputRelations($currentVersion->attribute('version'));\n\n // Apparently, eZ kernel does not know how to work with composite relations\n // so we remove those and create the non composite ones.\n $this->fixInputRelations($object, $currentVersion->attribute('version'));\n }\n }\n\n return true;\n }",
"def _modify(self, **patch):\n '''Modify only draft or legacy policies\n\n Published policies cannot be modified\n :raises: OperationNotSupportedOnPublishedPolicy\n '''\n\n legacy = patch.pop('legacy', False)\n tmos_ver = self._meta_data['bigip']._meta_data['tmos_version']\n self._filter_version_specific_options(tmos_ver, **patch)\n if 'Drafts' not in self._meta_data['uri'] and \\\n LooseVersion(tmos_ver) >= LooseVersion('12.1.0') and \\\n not legacy:\n msg = 'Modify operation not allowed on a published policy.'\n raise OperationNotSupportedOnPublishedPolicy(msg)\n super(Policy, self)._modify(**patch)",
"def publish(self, version_id=None):\n \"\"\"\n Creates a publish task just for this version, which publishes as soon as any import is complete.\n\n :return: the publish task\n :rtype: Publish\n :raises Conflict: If the version is already published, or already has a publish job.\n \"\"\"\n if not version_id:\n version_id = self.version.id\n\n target_url = self._client.get_url('VERSION', 'POST', 'publish', {'layer_id': self.id, 'version_id': version_id})\n r = self._client.request('POST', target_url, json={})\n return self._client.get_manager(Publish).create_from_result(r.json())",
"def process_action(self, request, queryset):\n \"\"\"\n Unpublishes the selected objects by calling the object's \\\n unpublish method. The action is logged and the user is \\\n notified with a message.\n\n Returns a 'render redirect' to the result of the \\\n `get_done_url` method.\n \"\"\"\n count = 0\n for obj in queryset:\n count += 1\n obj.unpublish()\n object_url = self.get_object_url(obj)\n self.log_action(obj, CMSLog.UNPUBLISH, url=object_url)\n url = self.get_done_url()\n msg = self.write_message(message=\"%s objects unpublished.\" % count)\n return self.render(request, redirect_url=url,\n message=msg,\n collect_render_data=False)",
"function onPublish( $contentObjectAttribute, $contentObject, $publishedNodes )\n {\n eZDebug::writeDebug( 'Start', __METHOD__ );\n $paex = $contentObjectAttribute->content();\n if ( !$paex instanceof eZPaEx )\n {\n return true;\n }\n\n // Update empty paex data from parent paex\n // NOTE: if the current user don't have permission to edit paex data, and is\n // creating a new object (publishing version 1), force paex object update\n // to get values set in parent\n if ( !$paex->canEdit() && $contentObject->attribute( 'current_version' ) == 1 )\n $paex->updateFromParent( true );\n else\n $paex->updateFromParent();\n\n eZDebug::writeDebug( 'End', __METHOD__ );\n return true;\n }",
"def _update(self, **kwargs):\n '''Update only draft or legacy policies\n\n Published policies cannot be updated\n :raises: OperationNotSupportedOnPublishedPolicy\n '''\n\n legacy = kwargs.pop('legacy', False)\n tmos_ver = self._meta_data['bigip']._meta_data['tmos_version']\n self._filter_version_specific_options(tmos_ver, **kwargs)\n if 'Drafts' not in self._meta_data['uri'] and \\\n LooseVersion(tmos_ver) >= LooseVersion('12.1.0') and \\\n not legacy:\n msg = 'Update operation not allowed on a published policy.'\n raise OperationNotSupportedOnPublishedPolicy(msg)\n super(Policy, self)._update(**kwargs)",
"def save_related(self, request, form, *args, **kwargs):\n \"\"\"\n Send the signal `publishing_post_save_related` when a draft copy is\n saved and all its relationships have also been created.\n \"\"\"\n result = super(PublishingAdmin, self) \\\n .save_related(request, form, *args, **kwargs)\n # Send signal that draft has been saved and all relationships created\n if form.instance:\n publishing_signals.publishing_post_save_related.send(\n sender=type(self), instance=form.instance)\n return result",
"def process_actions(action_ids=None):\n \"\"\"\n Process actions in the publishing schedule.\n\n Returns the number of actions processed.\n \"\"\"\n actions_taken = 0\n action_list = PublishAction.objects.prefetch_related(\n 'content_object',\n ).filter(\n scheduled_time__lte=timezone.now(),\n )\n\n if action_ids is not None:\n action_list = action_list.filter(id__in=action_ids)\n\n for action in action_list:\n action.process_action()\n action.delete()\n actions_taken += 1\n\n return actions_taken"
] |
[
0.8715091943740845,
0.8080272674560547,
0.7380136847496033,
0.7335137128829956,
0.7129730582237244,
0.6991931200027466,
0.6968812942504883,
0.6956944465637207,
0.6950230002403259,
0.6946202516555786,
0.6925586462020874,
0.6898219585418701
] |
Process an unpublish action on the related object, returns a boolean if a change is made.
Only objects with a current active version will be updated.
|
def _unpublish(self):
"""
Process an unpublish action on the related object, returns a boolean if a change is made.
Only objects with a current active version will be updated.
"""
obj = self.content_object
actioned = False
# Only update if needed
if obj.current_version is not None:
obj.current_version = None
obj.save(update_fields=['current_version'])
actioned = True
return actioned
|
[
"def process_action(self):\n \"\"\"\n Process the action and update the related object, returns a boolean if a change is made.\n \"\"\"\n if self.publish_version == self.UNPUBLISH_CHOICE:\n actioned = self._unpublish()\n else:\n actioned = self._publish()\n\n # Only log if an action was actually taken\n if actioned:\n self._log_action()\n\n return actioned",
"def _publish(self):\n \"\"\"\n Process a publish action on the related object, returns a boolean if a change is made.\n\n Only objects where a version change is needed will be updated.\n \"\"\"\n obj = self.content_object\n version = self.get_version()\n actioned = False\n\n # Only update if needed\n if obj.current_version != version:\n version = self.get_version()\n obj.current_version = version\n obj.save(update_fields=['current_version'])\n actioned = True\n\n return actioned",
"def process_action(self, request, queryset):\n \"\"\"\n Unpublishes the selected objects by calling the object's \\\n unpublish method. The action is logged and the user is \\\n notified with a message.\n\n Returns a 'render redirect' to the result of the \\\n `get_done_url` method.\n \"\"\"\n count = 0\n for obj in queryset:\n count += 1\n obj.unpublish()\n object_url = self.get_object_url(obj)\n self.log_action(obj, CMSLog.UNPUBLISH, url=object_url)\n url = self.get_done_url()\n msg = self.write_message(message=\"%s objects unpublished.\" % count)\n return self.render(request, redirect_url=url,\n message=msg,\n collect_render_data=False)",
"public function doUnpublish()\n {\n $owner = $this->owner;\n // Skip if this record isn't saved\n if (!$owner->isInDB()) {\n return false;\n }\n\n // Skip if this record isn't on live\n if (!$owner->isPublished()) {\n return false;\n }\n\n $owner->invokeWithExtensions('onBeforeUnpublish');\n\n // Modify in isolated mode\n static::withVersionedMode(function () use ($owner) {\n static::set_stage(static::LIVE);\n\n // This way our ID won't be unset\n $clone = clone $owner;\n $clone->delete();\n });\n\n $owner->invokeWithExtensions('onAfterUnpublish');\n return true;\n }",
"public function onAfterUnpublish()\n {\n $member = Security::getCurrentUser();\n if (!$member || !$member->exists()) {\n return false;\n }\n\n $this->getAuditLogger()->info(sprintf(\n '\"%s\" (ID: %s) unpublished %s \"%s\" (ID: %s)',\n $member->Email ?: $member->Title,\n $member->ID,\n $this->owner->singular_name(),\n $this->owner->Title,\n $this->owner->ID\n ));\n }",
"def publish(self, ar):\n \"\"\"Set status to prepublished/published/republished\n \"\"\"\n wf = api.get_tool(\"portal_workflow\")\n status = wf.getInfoFor(ar, \"review_state\")\n transitions = {\"verified\": \"publish\",\n \"published\": \"republish\"}\n transition = transitions.get(status, \"prepublish\")\n logger.info(\"AR Transition: {} -> {}\".format(status, transition))\n try:\n wf.doActionFor(ar, transition)\n return True\n except WorkflowException as e:\n logger.debug(e)\n return False",
"def post_event_unpublish(self, id, **data):\n \"\"\"\n POST /events/:id/unpublish/\n Unpublishes an event. In order for a free event to be unpublished, it must not have any pending or completed orders,\n even if the event is in the past. In order for a paid event to be unpublished, it must not have any pending or completed\n orders, unless the event has been completed and paid out. Returns a boolean indicating success or failure of the\n unpublish.\n \"\"\"\n \n return self.post(\"/events/{0}/unpublish/\".format(id), data=data)",
"def unpublish(self):\n \"\"\"\n Un-publish the current object.\n \"\"\"\n if self.is_draft and self.publishing_linked:\n publishing_signals.publishing_pre_unpublish.send(\n sender=type(self), instance=self)\n # Unlink draft and published copies then delete published.\n # NOTE: This indirect dance is necessary to avoid triggering\n # unwanted MPTT tree structure updates via `delete`.\n type(self.publishing_linked).objects \\\n .filter(pk=self.publishing_linked.pk) \\\n .delete() # Instead of self.publishing_linked.delete()\n # NOTE: We update and save the object *after* deleting the\n # published version, in case the `save()` method does some\n # validation that breaks when unlinked published objects exist.\n self.publishing_linked = None\n self.publishing_published_at = None\n\n # Save the draft to remove its relationship with the published copy\n publishing_signals.publishing_unpublish_save_draft.send(\n sender=type(self), instance=self)\n\n publishing_signals.publishing_post_unpublish.send(\n sender=type(self), instance=self)",
"def unpublish_view(self, request, object_id):\n \"\"\"\n Instantiates a class-based view that redirects to Wagtail's 'unpublish'\n view for models that extend 'Page' (if the user has sufficient\n permissions). We do this via our own view so that we can reliably\n control redirection of the user back to the index_view once the action\n is completed. The view class used can be overridden by changing the\n 'unpublish_view_class' attribute.\n \"\"\"\n kwargs = {'model_admin': self, 'object_id': object_id}\n view_class = self.unpublish_view_class\n return view_class.as_view(**kwargs)(request)",
"function unpublish()\n {\n if ( $this->attribute( 'status' ) == eZContentObjectVersion::STATUS_PUBLISHED )\n {\n $this->setAttribute( 'status', eZContentObjectVersion::STATUS_ARCHIVED );\n $parentNodeList = $this->attribute( 'parent_nodes' );\n $parentNodeIDList = array();\n foreach( $parentNodeList as $parentNode )\n {\n $parentNodeIDList[] = $parentNode->attribute( 'parent_node' );\n }\n if ( count( $parentNodeIDList ) == 0 )\n {\n eZDebug::writeWarning( $this, \"unable to get parent nodes for version\" );\n return;\n }\n $parentNodeIDString = implode( ',' , $parentNodeIDList );\n $contentObjectID = $this->attribute( 'contentobject_id' );\n $version = $this->attribute( 'version' );\n $db = eZDB::instance();\n $query = \"update ezcontentobject_tree\n set contentobject_is_published = '0'\n where parent_node_id in ( $parentNodeIDString ) and\n contentobject_id = $contentObjectID and\n contentobject_version = $version\" ;\n $db->query( $query );\n }\n else\n {\n eZDebug::writeWarning( $this, \"trying to unpublish non published version\" );\n }\n\n }",
"public function onAfterUnpublish()\n {\n foreach ($this->owner->Fields() as $field) {\n $field->deleteFromStage(Versioned::LIVE);\n }\n }",
"def unpublish(self, request, article_id, language):\n \"\"\"\n Publish or unpublish a language of a article\n \"\"\"\n article = get_object_or_404(self.model, pk=article_id)\n if not article.has_publish_permission(request):\n return HttpResponseForbidden(force_text(_('You do not have permission to unpublish this article')))\n if not article.publisher_public_id:\n return HttpResponseForbidden(force_text(_('This article was never published')))\n try:\n article.unpublish(language)\n message = _('The %(language)s article \"%(article)s\" was successfully unpublished') % {\n 'language': get_language_object(language)['name'], 'article': article}\n messages.info(request, message)\n LogEntry.objects.log_action(\n user_id=request.user.id,\n content_type_id=ContentType.objects.get_for_model(Article).pk,\n object_id=article_id,\n object_repr=article.get_title(),\n action_flag=CHANGE,\n change_message=message,\n )\n except RuntimeError:\n exc = sys.exc_info()[1]\n messages.error(request, exc.message)\n except ValidationError:\n exc = sys.exc_info()[1]\n messages.error(request, exc.message)\n path = admin_reverse('cms_articles_article_changelist')\n if request.GET.get('redirect_language'):\n path = '%s?language=%s&article_id=%s' % (\n path,\n request.GET.get('redirect_language'),\n request.GET.get('redirect_article_id')\n )\n return HttpResponseRedirect(path)"
] |
[
0.8640452027320862,
0.8454350829124451,
0.7525824904441833,
0.7485924363136292,
0.7259954214096069,
0.7147408723831177,
0.7138216495513916,
0.7106776833534241,
0.7017576098442078,
0.7005733847618103,
0.6970431208610535,
0.6896604299545288
] |
Adds a log entry for this action to the object history in the Django admin.
|
def _log_action(self):
"""
Adds a log entry for this action to the object history in the Django admin.
"""
if self.publish_version == self.UNPUBLISH_CHOICE:
message = 'Unpublished page (scheduled)'
else:
message = 'Published version {} (scheduled)'.format(self.publish_version)
LogEntry.objects.log_action(
user_id=self.user.pk,
content_type_id=self.content_type.pk,
object_id=self.object_id,
object_repr=force_text(self.content_object),
action_flag=CHANGE,
change_message=message
)
|
[
"def _add_history(self, entry_type, entry):\n \"\"\" Generic method to add entry as entry_type to the history \"\"\"\n meta_string = \"{time} - {etype} - {entry}\".format(\n time=self._time(),\n etype=entry_type.upper(),\n entry=entry)\n\n self._content['history'].insert(0, meta_string)\n self.logger(meta_string)",
"def history(self, message):\n \"\"\" Records an Action-level log message\n\n Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is\n defined, sends to STDOUT\n \"\"\"\n if Settings.ActionLogs:\n self._write_log(\"action\", Settings.LogTime, message)",
"def log_action(self, instance, action, action_date=None, url=\"\",\n update_parent=True):\n \"\"\"\n Store an action in the database using the CMSLog model.\n The following attributes are calculated and set on the log entry:\n\n * **model_repr** - A unicode representation of the instance.\n * **object_repr** - The verbose_name of the instance model class.\n * **section** - The name of ancestor bundle that is directly \\\n attached to the admin site.\n\n :param instance: The instance that this action was performed \\\n on.\n :param action: The action type. Must be one of the options \\\n in CMSLog.ACTIONS.\n :param action_date: The datetime the action occurred.\n :param url: The url that the log entry should point to, \\\n Defaults to an empty string.\n :param update_parent: If true this will update the last saved time \\\n on the object pointed to by this bundle's object_view. \\\n Defaults to True.\n \"\"\"\n\n section = None\n if self.bundle:\n bundle = self.bundle\n while bundle.parent:\n bundle = bundle.parent\n section = bundle.name\n\n # if we have a object view that comes from somewhere else\n # save it too to update it.\n changed_object = instance\n bundle = self.bundle\n while bundle.object_view == bundle.parent_attr:\n bundle = bundle.parent\n\n if update_parent and changed_object.__class__ != bundle._meta.model:\n object_view, name = bundle.get_initialized_view_and_name(\n bundle.object_view, kwargs=self.kwargs)\n\n changed_object = object_view.get_object()\n changed_object.save()\n\n if not section:\n section = \"\"\n\n if url:\n url = urlparse.urlparse(url).path\n\n rep = unicode(instance)\n if rep:\n rep = rep[:255]\n\n log = CMSLog(action=action, url=url, section=section,\n model_repr=instance._meta.verbose_name,\n object_repr=rep,\n user_name=self.request.user.username,\n action_date=action_date)\n log.save()",
"def _add_history(self, redo_func, redo_kwargs, undo_func, undo_kwargs,\n **kwargs):\n \"\"\"\n Add a new log (undo/redoable) to this history context\n\n :parameter str redo_func: function to redo the action, must be a\n method of :class:`Bundle`\n :parameter dict redo_kwargs: kwargs to pass to the redo_func. Each\n item must be serializable (float or str, not objects)\n :parameter str undo_func: function to undo the action, must be a\n method of :class:`Bundle`\n :parameter dict undo_kwargs: kwargs to pass to the undo_func. Each\n item must be serializable (float or str, not objects)\n :parameter str history: label of the history parameter\n :raises ValueError: if the label for this history item is forbidden or\n already exists\n \"\"\"\n if not self.history_enabled:\n return\n\n param = HistoryParameter(self, redo_func, redo_kwargs,\n undo_func, undo_kwargs)\n\n metawargs = {'context': 'history',\n 'history': kwargs.get('history', self._default_label('hist', **{'context': 'history'}))}\n\n self._check_label(metawargs['history'])\n\n self._attach_params([param], **metawargs)",
"def history_view(self, request, object_id, extra_context=None):\n \"The 'history' admin view for this model.\"\n from django.contrib.admin.models import LogEntry\n # First check if the user can see this history.\n model = self.model\n obj = get_object_or_404(self.get_queryset(request),\n pk=unquote(object_id))\n if not self.has_change_permission(request, obj):\n raise PermissionDenied\n\n # Then get the history for this object.\n opts = model._meta\n app_label = opts.app_label\n action_list = LogEntry.objects.filter(\n object_id=unquote(str(obj.identity)),\n # this is the change for our override;\n content_type=get_content_type_for_model(model)\n ).select_related().order_by('action_time')\n ctx = self.admin_site.each_context(request)\n\n context = dict(ctx,\n title=('Change history: %s') % force_text(obj),\n action_list=action_list,\n module_name=capfirst(\n force_text(opts.verbose_name_plural)),\n object=obj,\n opts=opts,\n preserved_filters=self.get_preserved_filters(request),\n )\n context.update(extra_context or {})\n return TemplateResponse(request, self.object_history_template or [\n \"admin/%s/%s/object_history.html\" % (app_label, opts.model_name),\n \"admin/%s/object_history.html\" % app_label,\n \"admin/object_history.html\"\n ], context)",
"def save_admin_log(build, **kwargs):\n \"\"\"Saves an action to the admin log.\"\"\"\n message = kwargs.pop('message', None)\n release = kwargs.pop('release', None)\n run = kwargs.pop('run', None)\n\n if not len(kwargs) == 1:\n raise TypeError('Must specify a LOG_TYPE argument')\n\n log_enum = kwargs.keys()[0]\n log_type = getattr(models.AdminLog, log_enum.upper(), None)\n if not log_type:\n raise TypeError('Bad log_type argument: %s' % log_enum)\n\n if current_user.is_anonymous():\n user_id = None\n else:\n user_id = current_user.get_id()\n\n log = models.AdminLog(\n build_id=build.id,\n log_type=log_type,\n message=message,\n user_id=user_id)\n\n if release:\n log.release_id = release.id\n\n if run:\n log.run_id = run.id\n log.release_id = run.release_id\n\n db.session.add(log)",
"def history_view(self, request, object_id, extra_context=None):\n from django.template.response import TemplateResponse\n from django.contrib.admin.options import get_content_type_for_model\n from django.contrib.admin.utils import unquote\n from django.core.exceptions import PermissionDenied\n from django.utils.text import capfirst\n from django.utils.encoding import force_text\n from django.utils.translation import ugettext as _\n\n \"The 'history' admin view for this model.\"\n from django.contrib.admin.models import LogEntry\n # First check if the user can see this history.\n model = self.model\n obj = self.get_object(request, unquote(object_id))\n if obj is None:\n return self._get_obj_does_not_exist_redirect(request, model._meta, object_id)\n\n if not self.has_change_permission(request, obj):\n raise PermissionDenied\n\n # Then get the history for this object.\n opts = model._meta\n app_label = opts.app_label\n action_list = LogEntry.objects.filter(\n object_id=unquote(object_id),\n content_type=get_content_type_for_model(model)\n ).select_related().order_by('-action_time')[:self.max_history_length]\n\n context = dict(\n self.admin_site.each_context(request),\n title=_('Change history: %s') % force_text(obj),\n action_list=action_list,\n module_name=capfirst(force_text(opts.verbose_name_plural)),\n object=obj,\n opts=opts,\n preserved_filters=self.get_preserved_filters(request),\n )\n context.update(extra_context or {})\n\n request.current_app = self.admin_site.name\n\n return TemplateResponse(request, self.object_history_template or [\n \"admin/%s/%s/object_history.html\" % (app_label, opts.model_name),\n \"admin/%s/object_history.html\" % app_label,\n \"admin/object_history.html\"\n ], context)",
"def admin_log(instances, msg: str, who: User=None, **kw):\n \"\"\"\n Logs an entry to admin logs of model(s).\n :param instances: Model instance or list of instances\n :param msg: Message to log\n :param who: Who did the change\n :param kw: Optional key-value attributes to append to message\n :return: None\n \"\"\"\n\n from django.contrib.admin.models import LogEntry, CHANGE\n from django.contrib.admin.options import get_content_type_for_model\n from django.utils.encoding import force_text\n\n # use system user if 'who' is missing\n if not who:\n username = settings.DJANGO_SYSTEM_USER if hasattr(settings, 'DJANGO_SYSTEM_USER') else 'system'\n who, created = User.objects.get_or_create(username=username)\n\n # append extra keyword attributes if any\n att_str = ''\n for k, v in kw.items():\n if hasattr(v, 'pk'): # log only primary key for model instances, not whole str representation\n v = v.pk\n att_str += '{}={}'.format(k, v) if not att_str else ', {}={}'.format(k, v)\n if att_str:\n att_str = ' [{}]'.format(att_str)\n msg = str(msg) + att_str\n\n if not isinstance(instances, list) and not isinstance(instances, tuple):\n instances = [instances]\n for instance in instances:\n if instance:\n LogEntry.objects.log_action(\n user_id=who.pk,\n content_type_id=get_content_type_for_model(instance).pk,\n object_id=instance.pk,\n object_repr=force_text(instance),\n action_flag=CHANGE,\n change_message=msg,\n )",
"def action_log_create(sender, instance, created, **kwargs):\n \"\"\"\n Signal receiver that creates a log entry when a model instance is first saved to the database.\n\n Direct use is discouraged, connect your model through :py:func:`actionslog.registry.register` instead.\n \"\"\"\n if created:\n changes = model_instance_diff(None, instance)\n\n log_entry = LogAction.objects.create_log_action(\n instance=instance,\n action=LogAction.CREATE,\n changes=json.dumps(changes),\n )",
"def add(self, **kwargs):\n '''\n in infor.\n '''\n\n post_data = {}\n\n for key in self.request.arguments:\n post_data[key] = self.get_arguments(key)[0]\n\n MLog.add(post_data)\n kwargs.pop('uid', None) # delete `uid` if exists in kwargs\n\n self.redirect('/log/')",
"def history_view(self, request, object_id, extra_context=None):\n \"\"\"The 'history' admin view for this model.\"\"\"\n request.current_app = self.admin_site.name\n model = self.model\n opts = model._meta\n app_label = opts.app_label\n pk_name = opts.pk.attname\n history = getattr(model, model._meta.simple_history_manager_attribute)\n object_id = unquote(object_id)\n action_list = history.filter(**{pk_name: object_id})\n if not isinstance(history.model.history_user, property):\n # Only select_related when history_user is a ForeignKey (not a property)\n action_list = action_list.select_related(\"history_user\")\n history_list_display = getattr(self, \"history_list_display\", [])\n # If no history was found, see whether this object even exists.\n try:\n obj = self.get_queryset(request).get(**{pk_name: object_id})\n except model.DoesNotExist:\n try:\n obj = action_list.latest(\"history_date\").instance\n except action_list.model.DoesNotExist:\n raise http.Http404\n\n if not self.has_change_permission(request, obj):\n raise PermissionDenied\n\n # Set attribute on each action_list entry from admin methods\n for history_list_entry in history_list_display:\n value_for_entry = getattr(self, history_list_entry, None)\n if value_for_entry and callable(value_for_entry):\n for list_entry in action_list:\n setattr(list_entry, history_list_entry, value_for_entry(list_entry))\n\n content_type = ContentType.objects.get_by_natural_key(*USER_NATURAL_KEY)\n admin_user_view = \"admin:%s_%s_change\" % (\n content_type.app_label,\n content_type.model,\n )\n context = {\n \"title\": _(\"Change history: %s\") % force_text(obj),\n \"action_list\": action_list,\n \"module_name\": capfirst(force_text(opts.verbose_name_plural)),\n \"object\": obj,\n \"root_path\": getattr(self.admin_site, \"root_path\", None),\n \"app_label\": app_label,\n \"opts\": opts,\n \"admin_user_view\": admin_user_view,\n \"history_list_display\": history_list_display,\n }\n context.update(self.admin_site.each_context(request))\n context.update(extra_context or {})\n extra_kwargs = {}\n return render(request, self.object_history_template, context, **extra_kwargs)",
"def add_log_entry(self, entry):\n \"\"\" :db.model.job record holds event log, that can be accessed by MX\n this method adds a record and removes oldest one if necessary \"\"\"\n event_log = self.job_record.event_log\n if len(event_log) > job.MAX_NUMBER_OF_EVENTS:\n del event_log[-1]\n event_log.insert(0, entry)"
] |
[
0.7889394760131836,
0.7844164371490479,
0.7813898324966431,
0.7616400718688965,
0.7595550417900085,
0.7529367804527283,
0.7482128739356995,
0.7426053881645203,
0.7355849742889404,
0.7336613535881042,
0.7288952469825745,
0.7254387140274048
] |
Process the action and update the related object, returns a boolean if a change is made.
|
def process_action(self):
"""
Process the action and update the related object, returns a boolean if a change is made.
"""
if self.publish_version == self.UNPUBLISH_CHOICE:
actioned = self._unpublish()
else:
actioned = self._publish()
# Only log if an action was actually taken
if actioned:
self._log_action()
return actioned
|
[
"def _publish(self):\n \"\"\"\n Process a publish action on the related object, returns a boolean if a change is made.\n\n Only objects where a version change is needed will be updated.\n \"\"\"\n obj = self.content_object\n version = self.get_version()\n actioned = False\n\n # Only update if needed\n if obj.current_version != version:\n version = self.get_version()\n obj.current_version = version\n obj.save(update_fields=['current_version'])\n actioned = True\n\n return actioned",
"def handle_update(self, action, params):\n \"\"\"Handle the specified action on this component.\"\"\"\n _LOGGER.debug('Keypad: \"%s\" Handling \"%s\" Action: %s Params: %s\"' % (\n self._keypad.name, self.name, action, params))\n return False",
"def handle_update(self, action, params):\n \"\"\"Handle the specified action on this component.\"\"\"\n _LOGGER.debug('Keypad: \"%s\" %s Action: %s Params: %s\"' % (\n self._keypad.name, self, action, params))\n if action != Led._ACTION_LED_STATE:\n _LOGGER.debug(\"Unknown action %d for led %d in keypad %d\" % (\n action, self.number, self.keypad.name))\n return False\n elif len(params) < 1:\n _LOGGER.debug(\"Unknown params %s (action %d on led %d in keypad %d)\" % (\n params, action, self.number, self.keypad.name))\n return False\n self._state = bool(params[0])\n self._query_waiters.notify()\n self._dispatch_event(Led.Event.STATE_CHANGED, {'state': self._state})\n return True",
"public function process()\n {\n parent::process();\n\n $flydown = $this->getCompositeWidget('action_flydown');\n $selected_id = $flydown->value;\n\n if (isset($this->action_items_by_id[$selected_id])) {\n $this->selected = $this->action_items_by_id[$selected_id];\n\n if ($this->selected->widget !== null) {\n $this->selected->widget->process();\n }\n } else {\n $this->selected = null;\n }\n }",
"def handle_update(self, action, params):\n \"\"\"Handle the specified action on this component.\"\"\"\n _LOGGER.debug('Keypad: \"%s\" %s Action: %s Params: %s\"' % (\n self._keypad.name, self, action, params))\n ev_map = {\n Button._ACTION_PRESS: Button.Event.PRESSED,\n Button._ACTION_RELEASE: Button.Event.RELEASED\n }\n if action not in ev_map:\n _LOGGER.debug(\"Unknown action %d for button %d in keypad %d\" % (\n action, self.number, self.keypad.name))\n return False\n self._dispatch_event(ev_map[action], {})\n return True",
"public function process_actions($left, $right, $moveup, $movedown) {\n //should this action be processed by this list object?\n if (!(array_key_exists($left, $this->records) || array_key_exists($right, $this->records) || array_key_exists($moveup, $this->records) || array_key_exists($movedown, $this->records))) {\n return false;\n }\n if (!empty($left)) {\n $oldparentitem = $this->move_item_left($left);\n if ($this->item_is_last_on_page($oldparentitem->id)) {\n // Item has jumped onto the next page, change page when we redirect.\n $this->page ++;\n $this->pageurl->params(array($this->pageparamname => $this->page));\n }\n } else if (!empty($right)) {\n $this->move_item_right($right);\n if ($this->item_is_first_on_page($right)) {\n // Item has jumped onto the previous page, change page when we redirect.\n $this->page --;\n $this->pageurl->params(array($this->pageparamname => $this->page));\n }\n } else if (!empty($moveup)) {\n $this->move_item_up_down('up', $moveup);\n if ($this->item_is_first_on_page($moveup)) {\n // Item has jumped onto the previous page, change page when we redirect.\n $this->page --;\n $this->pageurl->params(array($this->pageparamname => $this->page));\n }\n } else if (!empty($movedown)) {\n $this->move_item_up_down('down', $movedown);\n if ($this->item_is_last_on_page($movedown)) {\n // Item has jumped onto the next page, change page when we redirect.\n $this->page ++;\n $this->pageurl->params(array($this->pageparamname => $this->page));\n }\n } else {\n return false;\n }\n\n redirect($this->pageurl);\n }",
"def handle_update(self, args):\n \"\"\"The callback invoked by the main event loop if there's an event from this keypad.\"\"\"\n component = int(args[0])\n action = int(args[1])\n params = [int(x) for x in args[2:]]\n _LOGGER.debug(\"Updating %d(%s): c=%d a=%d params=%s\" % (\n self._integration_id, self._name, component, action, params))\n if component in self._components:\n return self._components[component].handle_update(action, params)\n return False",
"def handle_update(self, args):\n \"\"\"Handles an event update for this object, e.g. dimmer level change.\"\"\"\n _LOGGER.debug(\"handle_update %d -- %s\" % (self._integration_id, args))\n state = int(args[0])\n if state != Output._ACTION_ZONE_LEVEL:\n return False\n level = float(args[1])\n _LOGGER.debug(\"Updating %d(%s): s=%d l=%f\" % (\n self._integration_id, self._name, state, level))\n self._level = level\n self._query_waiters.notify()\n self._dispatch_event(Output.Event.LEVEL_CHANGED, {'level': self._level})\n return True",
"def do_processing\n json = self.json_data\n if md5_hash_is_valid?(json)\n fields = fields_from_json(json).merge({:raw_transaction_id => self.id})\n transaction = Transaction.new(fields)\n if transaction.save()\n self.update_attributes(:is_processed => true, :is_authentic => true)\n return true\n else\n return false\n end\n else\n self.update_attributes(:is_processed => true, :is_authentic => false)\n return false\n end\n end",
"def post(self, request, *args, **kwargs):\n \"\"\"\n Method for handling POST requests.\n Checks for a modify confirmation and performs\n the action by calling `process_action`.\n\n \"\"\"\n queryset = self.get_selected(request)\n\n if request.POST.get('modify'):\n response = self.process_action(request, queryset)\n if not response:\n url = self.get_done_url()\n return self.render(request, redirect_url=url)\n else:\n return response\n else:\n return self.render(request, redirect_url=request.build_absolute_uri())",
"def response_action(self, request, queryset): # noqa\n \"\"\"\n Handle an admin action. This is called if a request is POSTed to the\n changelist; it returns an HttpResponse if the action was handled, and\n None otherwise.\n \"\"\"\n # There can be multiple action forms on the page (at the top\n # and bottom of the change list, for example). Get the action\n # whose button was pushed.\n try:\n action_index = int(request.POST.get('index', 0))\n except ValueError: # pragma: no cover\n action_index = 0\n\n # Construct the action form.\n data = request.POST.copy()\n data.pop(helpers.ACTION_CHECKBOX_NAME, None)\n data.pop(\"index\", None)\n\n # Use the action whose button was pushed\n try:\n data.update({'action': data.getlist('action')[action_index]})\n except IndexError: # pragma: no cover\n # If we didn't get an action from the chosen form that's invalid\n # POST data, so by deleting action it'll fail the validation check\n # below. So no need to do anything here\n pass\n\n action_form = self.action_form(data, auto_id=None)\n action_form.fields['action'].choices = self.get_action_choices(request)\n\n # If the form's valid we can handle the action.\n if action_form.is_valid():\n action = action_form.cleaned_data['action']\n func, name, description = self.get_actions(request)[action]\n\n # Get the list of selected PKs. If nothing's selected, we can't\n # perform an action on it, so bail.\n if action_form.cleaned_data['select_across']:\n selected = ALL\n else:\n selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)\n\n if not selected:\n return None\n\n revision_field = self.model._concurrencymeta.field\n\n if self.check_concurrent_action:\n self.delete_selected_confirmation_template = self.get_confirmation_template()\n\n # If select_across we have to avoid the use of concurrency\n if selected is not ALL:\n filters = []\n for x in selected:\n try:\n pk, version = x.split(\",\")\n except ValueError: # pragma: no cover\n raise ImproperlyConfigured('`ConcurrencyActionMixin` error.'\n 'A tuple with `primary_key, version_number` '\n 'expected: `%s` found' % x)\n filters.append(Q(**{'pk': pk,\n revision_field.attname: version}))\n\n queryset = queryset.filter(reduce(operator.or_, filters))\n if len(selected) != queryset.count():\n messages.error(request, 'One or more record were updated. '\n '(Probably by other user) '\n 'The execution was aborted.')\n return HttpResponseRedirect(\".\")\n else:\n messages.warning(request, 'Selecting all records, you will avoid the concurrency check')\n\n response = func(self, request, queryset)\n\n # Actions may return an HttpResponse, which will be used as the\n # response from the POST. If not, we'll be a good little HTTP\n # citizen and redirect back to the changelist page.\n if isinstance(response, HttpResponse):\n return response\n else:\n return HttpResponseRedirect(\".\")",
"def process_action_link_action(render_action = :action_update, crud_type_or_security_options = nil)\n if request.get?\n # someone has disabled javascript, we have to show confirmation form first\n @record = find_if_allowed(params[:id], :read) if params[:id]\n respond_to_action(:action_confirmation)\n else\n @action_link = active_scaffold_config.action_links[action_name]\n if params[:id]\n crud_type_or_security_options ||= {:crud_type => request.post? || request.put? ? :update : :delete, :action => action_name}\n get_row(crud_type_or_security_options)\n if @record.nil?\n self.successful = false\n flash[:error] = as_(:no_authorization_for_action, :action => action_name)\n else\n yield @record\n end\n else\n yield\n end\n respond_to_action(render_action)\n end\n end"
] |
[
0.7764884829521179,
0.739753246307373,
0.7354515790939331,
0.7172083258628845,
0.7169113755226135,
0.7167596220970154,
0.7141504287719727,
0.7085196375846863,
0.7075380682945251,
0.7058738470077515,
0.7056086659431458,
0.7041708827018738
] |
Function checkAndCreate
check And Create procedure for an hostgroup
- check the hostgroup is not existing
- create the hostgroup
- Add puppet classes from puppetClassesId
- Add params from hostgroupConf
@param key: The hostgroup name or ID
@param payload: The description of the hostgroup
@param hostgroupConf: The configuration of the host group from the
foreman.conf
@param hostgroupParent: The id of the parent hostgroup
@param puppetClassesId: The dict of puppet classes ids in foreman
@return RETURN: The ItemHostsGroup object of an host
|
def checkAndCreate(self, key, payload,
hostgroupConf,
hostgroupParent,
puppetClassesId):
""" Function checkAndCreate
check And Create procedure for an hostgroup
- check the hostgroup is not existing
- create the hostgroup
- Add puppet classes from puppetClassesId
- Add params from hostgroupConf
@param key: The hostgroup name or ID
@param payload: The description of the hostgroup
@param hostgroupConf: The configuration of the host group from the
foreman.conf
@param hostgroupParent: The id of the parent hostgroup
@param puppetClassesId: The dict of puppet classes ids in foreman
@return RETURN: The ItemHostsGroup object of an host
"""
if key not in self:
self[key] = payload
oid = self[key]['id']
if not oid:
return False
# Create Hostgroup classes
if 'classes' in hostgroupConf.keys():
classList = list()
for c in hostgroupConf['classes']:
classList.append(puppetClassesId[c])
if not self[key].checkAndCreateClasses(classList):
print("Failed in classes")
return False
# Set params
if 'params' in hostgroupConf.keys():
if not self[key].checkAndCreateParams(hostgroupConf['params']):
print("Failed in params")
return False
return oid
|
[
"def create(self, create_missing=None):\n \"\"\"Do extra work to fetch a complete set of attributes for this entity.\n\n For more information, see `Bugzilla #1235377\n <https://bugzilla.redhat.com/show_bug.cgi?id=1235377>`_.\n\n \"\"\"\n return HostGroup(\n self._server_config,\n id=self.create_json(create_missing)['id'],\n ).read()",
"def present(name, **kwargs):\n '''\n Ensures that the host group exists, eventually creates new host group.\n\n .. versionadded:: 2016.3.0\n\n :param name: name of the host group\n :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)\n :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)\n :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)\n\n .. code-block:: yaml\n\n create_testing_host_group:\n zabbix_hostgroup.present:\n - name: 'My hostgroup name'\n\n\n '''\n connection_args = {}\n if '_connection_user' in kwargs:\n connection_args['_connection_user'] = kwargs['_connection_user']\n if '_connection_password' in kwargs:\n connection_args['_connection_password'] = kwargs['_connection_password']\n if '_connection_url' in kwargs:\n connection_args['_connection_url'] = kwargs['_connection_url']\n ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n\n # Comment and change messages\n comment_hostgroup_created = 'Host group {0} created.'.format(name)\n comment_hostgroup_notcreated = 'Unable to create host group: {0}. '.format(name)\n comment_hostgroup_exists = 'Host group {0} already exists.'.format(name)\n changes_hostgroup_created = {name: {'old': 'Host group {0} does not exist.'.format(name),\n 'new': 'Host group {0} created.'.format(name),\n }\n }\n\n hostgroup_exists = __salt__['zabbix.hostgroup_exists'](name, **connection_args)\n\n # Dry run, test=true mode\n if __opts__['test']:\n if hostgroup_exists:\n ret['result'] = True\n ret['comment'] = comment_hostgroup_exists\n else:\n ret['result'] = None\n ret['comment'] = comment_hostgroup_created\n ret['changes'] = changes_hostgroup_created\n return ret\n\n if hostgroup_exists:\n ret['result'] = True\n ret['comment'] = comment_hostgroup_exists\n else:\n hostgroup_create = __salt__['zabbix.hostgroup_create'](name, **connection_args)\n\n if 'error' not in hostgroup_create:\n ret['result'] = True\n ret['comment'] = comment_hostgroup_created\n ret['changes'] = changes_hostgroup_created\n else:\n ret['result'] = False\n ret['comment'] = comment_hostgroup_notcreated + six.text_type(hostgroup_create['error'])\n\n return ret",
"def checkAndCreateClasses(self, classes):\n \"\"\" Function checkAndCreateClasses\n Check and add puppet class\n\n @param classes: The classes ids list\n @return RETURN: boolean\n \"\"\"\n actual_classes = self['puppetclasses'].keys()\n for i in classes:\n if i not in actual_classes:\n self['puppetclasses'].append(i)\n self.reload()\n return set(classes).issubset(set((self['puppetclasses'].keys())))",
"def hostgroup_create(name, **kwargs):\n '''\n .. versionadded:: 2016.3.0\n\n Create a host group\n\n .. note::\n This function accepts all standard host group properties: keyword\n argument names differ depending on your zabbix version, see here__.\n\n .. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/hostgroup/object#host_group\n\n :param name: name of the host group\n :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)\n :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)\n :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)\n\n :return: ID of the created host group.\n\n CLI Example:\n .. code-block:: bash\n\n salt '*' zabbix.hostgroup_create MyNewGroup\n '''\n conn_args = _login(**kwargs)\n ret = {}\n try:\n if conn_args:\n method = 'hostgroup.create'\n params = {\"name\": name}\n params = _params_extend(params, **kwargs)\n ret = _query(method, params, conn_args['url'], conn_args['auth'])\n return ret['result']['groupids']\n else:\n raise KeyError\n except KeyError:\n return ret",
"def create(self, dhcp4, dhcp6, callback=None, errback=None):\n \"\"\"\n :param DHCPOptions dhcp4: DHCPOptions object that contains the settings for dhcp4\n :param DHCPOptions dhcp6: DHCPOptions object that contains the settings for dhcp6\n\n Create a new Scope Group. Pass a list of keywords and their values to\n configure. For the list of keywords available for address configuration, see :attr:`ns1.rest.ipam.Scopegroups.INT_FIELDS` and :attr:`ns1.rest.ipam.Scopegroups.PASSTHRU_FIELDS`\n \"\"\"\n if self.data:\n raise ScopegroupException('Scope Group already loaded')\n\n def success(result, *args):\n self.data = result\n self.id = result['id']\n self.dhcp4 = result['dhcp4']\n self.dhcp6 = result['dhcp6']\n self.name = result['name']\n self.service_group_id = result['service_group_id']\n if callback:\n return callback(self)\n else:\n return self\n\n return self._rest.create(dhcp4=dhcp4.option_list, dhcp6=dhcp6.option_list, name=self.name, service_group_id=self.service_group_id,\n callback=success, errback=errback)",
"def checkAndCreate(self, key, payload, domainId):\n \"\"\" Function checkAndCreate\n Check if a subnet exists and create it if not\n\n @param key: The targeted subnet\n @param payload: The targeted subnet description\n @param domainId: The domainId to be attached wiuth the subnet\n @return RETURN: The id of the subnet\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n #~ Ensure subnet contains the domain\n subnetDomainIds = []\n for domain in self[key]['domains']:\n subnetDomainIds.append(domain['id'])\n if domainId not in subnetDomainIds:\n subnetDomainIds.append(domainId)\n self[key][\"domain_ids\"] = subnetDomainIds\n if len(self[key][\"domains\"]) is not len(subnetDomainIds):\n return False\n return oid",
"def delete_puppetclass(self, synchronous=True, **kwargs):\n \"\"\"Remove a Puppet class from host group\n\n Here is an example of how to use this method::\n hostgroup.delete_puppetclass(data={'puppetclass_id': puppet.id})\n\n Constructs path:\n /api/hostgroups/:hostgroup_id/puppetclass_ids/:id\n\n :param synchronous: What should happen if the server returns an HTTP\n 202 (accepted) status code? Wait for the task to complete if\n ``True``. Immediately return the server's response otherwise.\n :param kwargs: Arguments to pass to requests.\n :returns: The server's response, with all JSON decoded.\n :raises: ``requests.exceptions.HTTPError`` If the server responds with\n an HTTP 4XX or 5XX message.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.update(self._server_config.get_client_kwargs())\n path = \"{0}/{1}\".format(\n self.path('puppetclass_ids'),\n kwargs['data'].pop('puppetclass_id')\n )\n return _handle_response(\n client.delete(path, **kwargs), self._server_config, synchronous)",
"def checkAndCreate(self, key, payload):\n \"\"\" Function checkAndCreate\n Check if an object exists and create it if not\n\n @param key: The targeted object\n @param payload: The targeted object description\n @return RETURN: The id of the object\n \"\"\"\n if key not in self:\n self[key] = payload\n return self[key]['id']",
"def Create(group,parent=None,description='',alias=None,location=None):\n\t\t\"\"\"Creates a new group\n\n\t\thttps://t3n.zendesk.com/entries/20979861-Create-Hardware-Group\n\n\t\t:param alias: short code for a particular account. If none will use account's default alias\n\t\t:param location: datacenter where group resides\n\t\t:param parent: groups can be nested - name of parent group. If None will be a toplevel group in the datacenter\n\t\t:param descrption: optional group description\n\t\t\"\"\"\n\t\tif alias is None: alias = clc.v1.Account.GetAlias()\n\t\tif location is None: location = clc.v1.Account.GetLocation()\n\t\tif description is None: description = ''\n\t\tif parent is None: parent = \"%s Hardware\" % (location)\n\n\t\tparents_uuid = Group.GetGroupUUID(parent,alias,location)\n\n\t\tr = clc.v1.API.Call('post','Group/CreateHardwareGroup',\n\t\t {'AccountAlias': alias, 'ParentUUID': parents_uuid, 'Name': group, 'Description': description })\n\t\tif int(r['StatusCode']) == 0: return(r['Group'])",
"def checkAndCreate(self, key, payload):\n \"\"\" Function checkAndCreate\n Check if an object exists and create it if not\n\n @param key: The targeted object\n @param payload: The targeted object description\n @return RETURN: The id of the object\n \"\"\"\n if key not in self:\n if 'templates' in payload:\n templates = payload.pop('templates')\n self[key] = payload\n self.reload()\n return self[key]['id']",
"def maybe_create_placement_group(name='', max_retries=10):\n \"\"\"Creates placement_group group or reuses existing one. Crash if unable to create\n placement_group group. If name is empty, ignores request.\"\"\"\n\n if not name:\n return\n\n client = get_ec2_client()\n while True:\n try:\n client.describe_placement_groups(GroupNames=[name])\n print(\"Reusing placement_group group: \" + name)\n break # no Exception means group name was found\n except Exception:\n print(\"Creating placement_group group: \" + name)\n try:\n _response = client.create_placement_group(GroupName=name,\n Strategy='cluster')\n except Exception:\n # because of race can get InvalidPlacementGroup.Duplicate\n pass\n\n counter = 0\n while True:\n try:\n res = client.describe_placement_groups(GroupNames=[name])\n res_entry = res['PlacementGroups'][0]\n if res_entry['State'] == 'available':\n assert res_entry['Strategy'] == 'cluster'\n break\n except Exception as e:\n print(\"Got exception: %s\" % (e,))\n counter += 1\n if counter >= max_retries:\n assert False, f'Failed to create placement_group group {name} in {max_retries} attempts'\n time.sleep(RETRY_INTERVAL_SEC)",
"def create(cls, api, default_name=None, description=None):\n \"\"\"\n http://docs.fiesta.cc/list-management-api.html#creating-a-group\n\n 200 character max on the description.\n \"\"\"\n path = 'group'\n\n data = {}\n if default_name:\n data['default_group_name'] = default_name\n if description:\n data['description'] = description\n if api.domain:\n data['domain'] = api.domain\n\n response_data = api.request(path, data=data)\n\n id = response_data['group_id']\n group = cls(api, id)\n group.default_name = response_data['default_group_name']\n\n return group"
] |
[
0.6874347925186157,
0.6812528967857361,
0.6659219264984131,
0.6627755165100098,
0.6543682217597961,
0.6508302092552185,
0.6484752893447876,
0.6453496217727661,
0.6450132727622986,
0.6433787941932678,
0.6415283679962158,
0.6366862654685974
] |
a decorator decorator, allowing the decorator to be used as:
@decorator(with, arguments, and=kwargs)
or
@decorator
Ref: http://stackoverflow.com/questions/653368/how-to-create-a-python-decorator-that-can-be-used-either-with-or-without-paramet
|
def doublewrap(f):
'''
a decorator decorator, allowing the decorator to be used as:
@decorator(with, arguments, and=kwargs)
or
@decorator
Ref: http://stackoverflow.com/questions/653368/how-to-create-a-python-decorator-that-can-be-used-either-with-or-without-paramet
'''
@functools.wraps(f)
def new_dec(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# actual decorated function
return f(args[0])
else:
# decorator arguments
return lambda realf: f(realf, *args, **kwargs)
return new_dec
|
[
"def optional_arg_class_decorator(fn):\n \"\"\"\n Based on:\n https://stackoverflow.com/questions/3888158/python-making-decorators-with-optional-arguments\n \"\"\"\n @functools.wraps(fn)\n def wrapped_decorator(*args, **kwargs):\n if len(args) == 1 and isinstance(args[0], type) and not kwargs:\n return fn(args[0])\n else:\n def real_decorator(decoratee):\n return fn(decoratee, *args, **kwargs)\n return real_decorator\n return wrapped_decorator",
"def optional_argument_decorator(_decorator):\n \"\"\"Decorate your decorator with this to allow it to always receive *args and **kwargs, making @deco equivalent to\n @deco()\"\"\"\n\n @functools.wraps(_decorator)\n def inner_decorator_make(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n func = args[0]\n args = tuple()\n kwargs = dict()\n else:\n func = None\n\n decorator = _decorator(*args, **kwargs)\n\n if func:\n return decorator(func)\n else:\n return decorator\n\n return inner_decorator_make",
"def dual_use_decorator(fn):\n \"\"\"Turn a function into a decorator that can be called with or without\n arguments.\"\"\"\n @functools.wraps(fn)\n def decorator(*args, **kw):\n if len(args) == 1 and not kw and callable(args[0]):\n return fn()(args[0])\n else:\n return fn(*args, **kw)\n return decorator",
"def decorator(func):\n r\"\"\"Makes the passed decorators to support optional args.\n \"\"\"\n def wrapper(__decorated__=None, *Args, **KwArgs):\n if __decorated__ is None: # the decorator has some optional arguments.\n return lambda _func: func(_func, *Args, **KwArgs)\n\n else:\n return func(__decorated__, *Args, **KwArgs)\n\n return wrap(wrapper, func)",
"def decorator(d):\n \"\"\"Creates a proper decorator.\n\n If the default for the first (function) argument is None, creates a\n version which be invoked as either @decorator or @decorator(kwargs...).\n\n See examples below.\n \"\"\"\n defaults = d.__defaults__\n if defaults and defaults[0] is None:\n # Can be applied as @decorator or @decorator(kwargs) because\n # first argument is None\n def decorate(fn=None, **kwargs):\n if fn is None:\n return _functools.partial(decorate, **kwargs)\n else:\n decorated = d(fn, **kwargs)\n _functools.update_wrapper(decorated, fn)\n return decorated\n else:\n # Can only be applied as @decorator\n def decorate(fn):\n decorated = d(fn)\n _functools.update_wrapper(decorated, fn)\n return decorated\n _functools.update_wrapper(decorate, d)\n return decorate",
"def detect_and_decorate(decorator, args, kwargs):\n \"\"\"\n Helper for applying a decorator when it is applied directly, and also\n applying it when it is given arguments and then applied to a function.\n \"\"\"\n # special behavior when invoked with only one non-keyword argument: act as\n # a normal decorator, decorating and returning that argument with\n # click.option\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return decorator(args[0])\n\n # if we're not doing that, we should see no positional args\n # the alternative behavior is to fall through and discard *args, but this\n # will probably confuse someone in the future when their arguments are\n # silently discarded\n elif len(args) != 0:\n raise ValueError(\"this decorator cannot take positional args\")\n\n # final case: got 0 or more kwargs, no positionals\n # do the function-which-returns-a-decorator dance to produce a\n # new decorator based on the arguments given\n else:\n\n def inner_decorator(f):\n return decorator(f, **kwargs)\n\n return inner_decorator",
"def good_decorator_accepting_args(decorator):\n \"\"\"This decorator makes decorators behave well wrt to decorated\n functions names, doc, etc. \n\n Differently from good_decorator, this accepts decorators possibly\n receiving arguments and keyword arguments.\n\n This decorato can be used indifferently with class methods and\n functions.\"\"\" \n def new_decorator(*f, **k):\n g = decorator(*f, **k)\n if 1 == len(f) and isinstance(f[0], types.FunctionType):\n g.__name__ = f[0].__name__\n g.__doc__ = f[0].__doc__\n g.__dict__.update(f[0].__dict__)\n pass\n return g\n \n new_decorator.__name__ = decorator.__name__\n new_decorator.__doc__ = decorator.__doc__\n new_decorator.__dict__.update(decorator.__dict__)\n # Required for Sphinx' automodule.\n new_decorator.__module__ = decorator.__module__\n return new_decorator",
"def parametrized_callable(decorator):\n '''Decorator used to create consistent decorators with arguments.\n Consistent in the meaning that the wrapper do not have to\n care if the wrapped callable is a function or a method,\n it will always receive a valid callable.\n If the decorator is used with a function, the wrapper will\n receive the function itself, but if the decorator is used\n with a method, the wrapper will receive a bound method\n callable directly and the first argument (self) will be removed.\n This allows writing decorators behaving consistently\n with function and method.\n\n Note that when using reflect or annotate module functions,\n depth should be incremented by one.\n\n Example::\n\n @decorator.parametrized_callable\n def mydecorator(original_function, decorator, arguments):\n\n def wrapper(callable, call, arguments):\n # processing\n return callable(call, arguments)\n\n return wrapper\n\n @mydecorator(decorator, arguments)\n def myfunction():\n pass\n\n '''\n\n def meta_decorator(*args, **kwargs):\n return _ConsistentMetaDecorator(decorator, args, kwargs)\n\n return meta_decorator",
"def decorator(caller, _func=None):\n \"\"\"decorator(caller) converts a caller function into a decorator\"\"\"\n if _func is not None: # return a decorated function\n # this is obsolete behavior; you should use decorate instead\n return decorate(_func, caller)\n # else return a decorator function\n defaultargs, defaults = '', ()\n if inspect.isclass(caller):\n name = caller.__name__.lower()\n doc = 'decorator(%s) converts functions/generators into ' \\\n 'factories of %s objects' % (caller.__name__, caller.__name__)\n elif inspect.isfunction(caller):\n if caller.__name__ == '<lambda>':\n name = '_lambda_'\n else:\n name = caller.__name__\n doc = caller.__doc__\n nargs = caller.__code__.co_argcount\n ndefs = len(caller.__defaults__ or ())\n defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])\n if defaultargs:\n defaultargs += ','\n defaults = caller.__defaults__\n else: # assume caller is an object with a __call__ method\n name = caller.__class__.__name__.lower()\n doc = caller.__call__.__doc__\n evaldict = dict(_call=caller, _decorate_=decorate)\n dec = FunctionMaker.create(\n '%s(func, %s)' % (name, defaultargs),\n 'if func is None: return lambda func: _decorate_(func, _call, (%s))\\n'\n 'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),\n evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)\n if defaults:\n dec.__defaults__ = (None,) + defaults\n return dec",
"def parametrized_function(decorator):\n '''Decorator used to create decorators with arguments.\n Should be used with function returning another function\n that will be called with the original function has the first\n parameter.\n No difference are made between method and function,\n so the wrapper function will have to know if the first\n argument is an instance (self).\n\n Note that when using reflect or annotate module functions,\n depth should be incremented by one.\n\n Example::\n\n @decorator.parametrized_function\n def mydecorator(function_original, decorator, arguments):\n\n def wrapper(call, arguments):\n # processing\n return function_original(call, arguments)\n\n return wrapper\n\n @mydecorator(decorator, arguments)\n def myfunction():\n pass\n\n '''\n\n def meta_decorator(*args, **kwargs):\n return _NormalMetaDecorator(decorator, args, kwargs)\n\n return meta_decorator",
"def decorator(wrapped_decorator):\n \"\"\"Converts a function into a decorator that optionally accepts keyword\n arguments in its declaration.\n\n Example usage:\n @utils.decorator\n def decorator(func, args, kwds, op1=None):\n ... apply op1 ...\n return func(*args, **kwds)\n\n # Form (1), vanilla\n @decorator\n foo(...)\n ...\n\n # Form (2), with options\n @decorator(op1=5)\n foo(...)\n ...\n\n Args:\n wrapped_decorator: A function that accepts positional args (func, args,\n kwds) and any additional supported keyword arguments.\n\n Returns:\n A decorator with an additional 'wrapped_decorator' property that is set to\n the original function.\n \"\"\"\n def helper(_func=None, **options):\n def outer_wrapper(func):\n @wrapping(func)\n def inner_wrapper(*args, **kwds):\n return wrapped_decorator(func, args, kwds, **options)\n return inner_wrapper\n\n if _func is None:\n # Form (2), with options.\n return outer_wrapper\n\n # Form (1), vanilla.\n if options:\n # Don't allow @decorator(foo, op1=5).\n raise TypeError('positional arguments not supported')\n return outer_wrapper(_func)\n helper.wrapped_decorator = wrapped_decorator\n return helper",
"def decorator(decor):\n \"\"\"Decorator for decorators (sic), written either as classes or functions.\n\n In either case, the decorator ``decor`` must be \"doubly-callable\":\n\n * for classes, this means implementing ``__call__`` method\n in addition to possible ``__init__``\n * for functions, this means returning a function that acts\n as an actual decorator, i.e. taking a function and returning\n its decorated version\n\n Although it works for any decorator, it's useful mainly for those\n that should take optional arguments. If the decorator is adorned\n with ``@decorator``, it's possible to use it without the pair of\n empty parentheses::\n\n @enhanced # rather than @enhanced()\n def foo():\n pass\n\n when we don't want to pass any arguments to it.\n\n .. note::\n\n :func:`decorator` makes decorator appicable for both\n functions and classes. If you want to restrict the type of\n decorator targets, use :func:`function_decorator`,\n :func:`method_decorator` or :func:`class_decorator`.\n \"\"\"\n ensure_callable(decor)\n return _wrap_decorator(decor, \"functions or classes\",\n or_(inspect.isfunction, inspect.isclass))"
] |
[
0.7788115739822388,
0.7530264854431152,
0.7509573698043823,
0.7409578561782837,
0.7368158102035522,
0.7360296249389648,
0.7338312864303589,
0.7187433242797852,
0.7176416516304016,
0.7164402008056641,
0.7146251797676086,
0.7116553783416748
] |
A decorator which can be used to mark functions
as deprecated.It will result in a deprecation warning being shown
when the function is used.
|
def deprecated(func, msg=None):
"""
A decorator which can be used to mark functions
as deprecated.It will result in a deprecation warning being shown
when the function is used.
"""
message = msg or "Use of deprecated function '{}`.".format(func.__name__)
@functools.wraps(func)
def wrapper_func(*args, **kwargs):
warnings.warn(message, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper_func
|
[
"def deprecated(func):\n '''This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.\n \n https://wiki.python.org/moin/PythonDecoratorLibrary#Generating_Deprecation_Warnings\n '''\n def new_func(*args, **kwargs):\n warn(\"Call to deprecated function {}.\".format(func.__name__),\n category=DeprecationWarning)\n return func(*args, **kwargs)\n new_func.__name__ = func.__name__\n new_func.__doc__ = func.__doc__\n new_func.__dict__.update(func.__dict__)\n return new_func",
"def deprecated(func):\n \"\"\"This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emmitted\n when the function is used.\"\"\"\n def newFunc(*args, **kwargs):\n warnings.warn(\"Call to deprecated function {}.\".format(func.__name__),\n category=DeprecationWarning, stacklevel=2)\n return func(*args, **kwargs)\n newFunc.__name__ = func.__name__\n newFunc.__doc__ = func.__doc__\n newFunc.__dict__.update(func.__dict__)\n return newFunc",
"def deprecated(func):\n \"\"\"\n A decorator for marking functions as deprecated. Results in\n a printed warning message when the function is used.\n \"\"\"\n def decorated(*args, **kwargs):\n warnings.warn('Call to deprecated function %s.' % func.__name__,\n category=DeprecationWarning,\n stacklevel=2)\n return func(*args, **kwargs)\n decorated.__name__ = func.__name__\n decorated.__doc__ = func.__doc__\n decorated.__dict__.update(func.__dict__)\n return decorated",
"def deprecated(new_name: str):\n \"\"\"\n This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.\n \"\"\"\n def decorator(func):\n @wraps(func)\n def new_func(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning) # turn off filter\n warnings.warn(\n 'Use {0} instead of {1}, {1} will be removed in the future.'\n .format(new_name, func.__name__),\n category=DeprecationWarning,\n stacklevel=2,\n )\n warnings.simplefilter('default', DeprecationWarning) # reset filter\n return func(*args, **kwargs)\n setattr(new_func, '__deprecated', True)\n return new_func\n return decorator",
"def deprecated(func):\n \"\"\"This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.\"\"\"\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n warnings.warn_explicit(\n \"Call to deprecated function {}.\".format(func.__name__),\n category=DeprecationWarning,\n filename=func.__code__.co_filename,\n lineno=func.__code__.co_firstlineno + 1\n )\n\n return func(*args, **kwargs)\n return decorated",
"def deprecated(func: Callable) -> Callable:\n \"\"\"This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.\"\"\"\n @functools.wraps(func)\n def _new_func(*args: Any, **kwargs: Any) -> Any:\n warnings.simplefilter('always', DeprecationWarning) # turn off filter\n warnings.warn(\"Call to deprecated function {}.\".format(func.__name__),\n category=DeprecationWarning,\n stacklevel=2)\n warnings.simplefilter('default', DeprecationWarning) # reset filter\n return func(*args, **kwargs)\n return _new_func",
"def deprecated(func):\n \"\"\"This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.\"\"\"\n\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n if PY3K:\n func_code = func.__code__\n else:\n func_code = func.func_code\n\n warnings.warn_explicit(\n \"Call to deprecated function {}.\".format(func.__name__),\n category=DeprecationWarning,\n filename=func_code.co_filename,\n lineno=func_code.co_firstlineno + 1,\n )\n\n return func(*args, **kwargs)\n\n return new_func",
"def deprecated(func):\n '''This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.'''\n\n import warnings\n\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n if is_python_3:\n code = func.__code__\n else:\n code = func.func_code\n warnings.warn_explicit(\n \"Call to deprecated function {}.\".format(func.__name__),\n category=Warning,\n filename=code.co_filename,\n lineno=code.co_firstlineno + 1\n )\n return func(*args, **kwargs)\n return new_func",
"def deprecated(func):\n \"\"\"This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.\"\"\"\n def newFunc(*args, **kwargs):\n print(\"Call to deprecated function %s.\" % func.__name__)\n return func(*args, **kwargs)\n newFunc.__name__ = func.__name__\n newFunc.__doc__ = func.__doc__\n newFunc.__dict__.update(func.__dict__)\n return newFunc",
"def deprecated(fn):\n \"\"\"Mark a function as deprecated and warn the user on use.\"\"\"\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n warnings.warn(fn.__doc__.split('\\n')[0],\n category=DeprecationWarning, stacklevel=2)\n return fn(*args, **kwargs)\n return wrapper",
"def deprecated(message=\"\"):\n \"\"\"\n This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used first time and filter is set for show DeprecationWarning.\n \"\"\"\n def decorator_wrapper(func):\n \"\"\"\n Generate decorator wrapper function\n :param func: function to be decorated\n :return: wrapper\n \"\"\"\n @functools.wraps(func)\n def function_wrapper(*args, **kwargs):\n \"\"\"\n Wrapper which recognize deprecated line from source code\n :param args: args for actual function\n :param kwargs: kwargs for actual functions\n :return: something that actual function might returns\n \"\"\"\n current_call_source = '|'.join(traceback.format_stack(inspect.currentframe()))\n if current_call_source not in function_wrapper.last_call_source:\n warnings.warn(\"Function {} is now deprecated! {}\".format(func.__name__, message),\n category=DeprecationWarning, stacklevel=2)\n function_wrapper.last_call_source.add(current_call_source)\n\n return func(*args, **kwargs)\n\n function_wrapper.last_call_source = set()\n\n return function_wrapper\n return decorator_wrapper",
"def deprecated(message=None):\n \"\"\"A decorator for deprecated functions\"\"\"\n def _decorator(func, message=message):\n if message is None:\n message = '%s is deprecated' % func.__name__\n\n def newfunc(*args, **kwds):\n warnings.warn(message, DeprecationWarning, stacklevel=2)\n return func(*args, **kwds)\n return newfunc\n return _decorator"
] |
[
0.9050257802009583,
0.9044513702392578,
0.9039638042449951,
0.9005829691886902,
0.8998395204544067,
0.8976660966873169,
0.8966753482818604,
0.8894274234771729,
0.8842666745185852,
0.8782861828804016,
0.8724506497383118,
0.8719274401664734
] |
Create a handler for logging to ``target``
|
def create_handler(target: str):
"""Create a handler for logging to ``target``"""
if target == 'stderr':
return logging.StreamHandler(sys.stderr)
elif target == 'stdout':
return logging.StreamHandler(sys.stdout)
else:
return logging.handlers.WatchedFileHandler(filename=target)
|
[
"def log_into(self, target, before_priv_drop=True):\n \"\"\"Simple file or UDP logging.\n\n .. note:: This doesn't require any Logger plugin and can be used\n if no log routing is required.\n\n :param str|unicode target: Filepath or UDP address.\n\n :param bool before_priv_drop: Whether to log data before or after privileges drop.\n\n \"\"\"\n command = 'logto'\n\n if not before_priv_drop:\n command += '2'\n\n self._set(command, target)\n\n return self._section",
"def get_handler(name, logname, args):\n \"\"\"\n Retrieve a logger given its name and initialize it by passing it\n appropriate arguments from the configuration. (Unnecessary\n arguments will be logged, and missing required arguments will\n cause a TypeError to be thrown.) The result should be a callable.\n\n :param name: The name of the handler to look up.\n :param logname: The name of the log section.\n :param args: A dictionary of arguments for the handler.\n\n :returns: A callable taking a single argument: the message to be\n logged.\n \"\"\"\n\n factory = _lookup_handler(name)\n\n # What arguments are we passing to the handler?\n available = set(args.keys())\n\n # Now, let's introspect the factory to pass it the right arguments\n if inspect.isclass(factory):\n argspec = inspect.getargspec(factory.__init__)\n ismethod = True\n else:\n argspec = inspect.getargspec(factory)\n ismethod = inspect.ismethod(factory)\n\n # Now, let's select the arguments we'll be passing in from the\n # args dictionary\n argnames = argspec.args[2 + ismethod:]\n recognized = set(argnames)\n\n # Now, which ones are required?\n if argspec.defaults:\n required = set(argnames[:len(argnames) - len(argspec.defaults)])\n else:\n required = set(argnames)\n missing = required - available\n if missing:\n raise TypeError(\"Missing required parameters: %s\" %\n ', '.join(repr(arg) for arg in sorted(missing)))\n\n # OK, let's determine the argument types\n type_map = getattr(factory, '_bark_types', {})\n\n # Now go convert the arguments\n kwargs = {}\n additional = set()\n for arg, value in args.items():\n # Should we ignore it?\n if not argspec.keywords and arg not in recognized:\n additional.add(arg)\n continue\n\n # Translate the value, first\n target_type = type_map.get(arg, lambda x: x)\n if target_type is bool:\n target_type = boolean\n try:\n value = target_type(value)\n except ValueError as exc:\n raise ValueError(\"Argument %r: invalid %s value %r: %s\" %\n (arg, target_type.__name__, value, exc))\n\n # OK, save it\n kwargs[arg] = value\n\n # Log any unused arguments\n if additional:\n LOG.warn(\"Unused arguments for handler of type %r for log %r: %s\" %\n (name, logname, ', '.join(repr(arg)\n for arg in sorted(additional))))\n\n # OK, we have now constructed the arguments to feed to the handler\n # factory; call it and return the result\n return factory(name, logname, **kwargs)",
"def get_handler(progname, address=None, proto=None, facility=None,\n fmt=None, datefmt=None, **_):\n \"\"\"Helper function to create a Syslog handler.\n\n See `ulogger.syslog.SyslogHandlerBuilder` for arguments and\n supported keyword arguments.\n\n Returns:\n (obj): Instance of `logging.SysLogHandler`\n \"\"\"\n builder = SyslogHandlerBuilder(\n progname, address=address, proto=proto, facility=facility,\n fmt=fmt, datefmt=datefmt)\n return builder.get_handler()",
"def http_handler(name, logname, host, url, method=\"GET\"):\n \"\"\"\n A Bark logging handler logging output to an HTTP server, using\n either GET or POST semantics.\n\n Similar to logging.handlers.HTTPHandler.\n \"\"\"\n\n return wrap_log_handler(logging.handlers.HTTPHandler(\n host, url, method=method))",
"def set_irc_targets(self, bot, *targets):\n \"\"\"Add a irc Handler using bot and log to targets (can be nicks or\n channels:\n\n ..\n >>> bot = None\n\n .. code-block:: python\n\n >>> log = logging.getLogger('irc.mymodule')\n >>> log.set_irc_targets(bot, '#chan', 'admin')\n \"\"\"\n # get formatter initialized by config (usualy on a NullHandler)\n ll = logging.getLogger('irc')\n formatter = ll.handlers[0].formatter\n # add a handler for the sub logger\n handler = Handler(bot, *targets)\n handler.setFormatter(formatter)\n self.addHandler(handler)",
"def handle_scheduled(self, target):\n\t\t\"\"\"\n\t\ttarget is a Handler or simple callable\n\t\t\"\"\"\n\t\tif not isinstance(target, Handler):\n\t\t\treturn target()\n\n\t\treturn self._handle_scheduled(target)",
"def smtp_handler(name, logname, mailhost, fromaddr, toaddrs, subject,\n credentials=None):\n \"\"\"\n A Bark logging handler logging output via SMTP. To specify a\n non-standard SMTP port, use the \"host:port\" format. To specify\n multiple \"To\" addresses, separate them with commas. To specify\n authentication credentials, supply a \"username:password\".\n\n Similar to logging.handlers.SMTPHandler.\n \"\"\"\n\n return wrap_log_handler(logging.handlers.SMTPHandler(\n mailhost, fromaddr, toaddrs, subject, credentials=credentials))",
"def get_target(cls, scheme, path, fragment, username,\n password, hostname, port, query, **kwargs):\n \"\"\"Override this method to use values from the parsed uri to initialize\n the expected target.\n\n \"\"\"\n raise NotImplementedError(\"get_target must be overridden\")",
"def register_handler(self, target=None):\n \"\"\"Decorator for a function to be used as a signal handler.\n\n :param str|unicode target: Where this signal will be delivered to. Default: ``worker``.\n\n * ``workers`` - run the signal handler on all the workers\n * ``workerN`` - run the signal handler only on worker N\n * ``worker``/``worker0`` - run the signal handler on the first available worker\n * ``active-workers`` - run the signal handlers on all the active [non-cheaped] workers\n\n * ``mules`` - run the signal handler on all of the mules\n * ``muleN`` - run the signal handler on mule N\n * ``mule``/``mule0`` - run the signal handler on the first available mule\n\n * ``spooler`` - run the signal on the first available spooler\n * ``farmN/farm_XXX`` - run the signal handler in the mule farm N or named XXX\n\n * http://uwsgi.readthedocs.io/en/latest/Signals.html#signals-targets\n\n \"\"\"\n target = target or 'worker'\n sign_num = self.num\n\n def wrapper(func):\n\n _LOG.debug(\"Registering '%s' as signal '%s' handler ...\", func.__name__, sign_num)\n\n uwsgi.register_signal(sign_num, target, func)\n\n return func\n\n return wrapper",
"def _configure_logger_handler(cls, log_dest, log_filename):\n \"\"\"\n Return a logging handler for the specified `log_dest`, or `None` if\n `log_dest` is `None`.\n \"\"\"\n\n if log_dest is None:\n return None\n\n msg_format = '%(asctime)s-%(name)s-%(message)s'\n\n if log_dest == 'stderr':\n # Note: sys.stderr is the default stream for StreamHandler\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(msg_format))\n elif log_dest == 'file':\n if not log_filename:\n raise ValueError(\"Log filename is required if log destination \"\n \"is 'file'\")\n handler = logging.FileHandler(log_filename, encoding=\"UTF-8\")\n handler.setFormatter(logging.Formatter(msg_format))\n else:\n raise ValueError(\n _format(\"Invalid log destination: {0!A}; Must be one of: \"\n \"{1!A}\", log_dest, LOG_DESTINATIONS))\n\n return handler",
"def set_target(self, target):\n '''\n :param target: target object\n '''\n self.target = target\n if target:\n self.target.set_fuzzer(self)\n return self",
"def on_go(self, target):\n \"\"\"\n RUN target WHEN SIGNALED\n \"\"\"\n if not target:\n Log.error(\"expecting target\")\n\n with self.lock:\n if not self._go:\n DEBUG and self._name and Log.note(\"Adding target to signal {{name|quote}}\", name=self.name)\n\n if not self.job_queue:\n self.job_queue = [target]\n else:\n self.job_queue.append(target)\n return\n\n (DEBUG_SIGNAL) and Log.note(\"Signal {{name|quote}} already triggered, running job immediately\", name=self.name)\n target()"
] |
[
0.7472923994064331,
0.7221838235855103,
0.7214196920394897,
0.7205566763877869,
0.7197719812393188,
0.7029629945755005,
0.7028162479400635,
0.702357828617096,
0.7023149132728577,
0.7014948725700378,
0.7007095217704773,
0.6984788775444031
] |
Initialise basic logging facilities
|
def initialise_logging(level: str, target: str, short_format: bool):
"""Initialise basic logging facilities"""
try:
log_level = getattr(logging, level)
except AttributeError:
raise SystemExit(
"invalid log level %r, expected any of 'DEBUG', 'INFO', 'WARNING', 'ERROR' or 'CRITICAL'" % level
)
handler = create_handler(target=target)
logging.basicConfig(
level=log_level,
format='%(asctime)-15s (%(process)d) %(message)s' if not short_format else '%(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
handlers=[handler]
)
|
[
"def init_logging():\n \"\"\"Initialise Python logging.\"\"\"\n fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \\\n '| %(message)s'\n logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)",
"def init_logging(to_file=False, logfile=None, default_logfile='/tmp/resync.log',\n human=True, verbose=False, eval_mode=False,\n default_logger='client', extra_loggers=None):\n \"\"\"Initialize logging.\n\n Use of log levels:\n DEBUG - very verbose, for evaluation of output (-e)\n INFO - verbose, only seen by users if they ask for it (-v)\n WARNING - messages output messages to console\n\n Logging to a file: If to_file is True then output will be written to\n a file. This will be logfile if set, else default_logfile (which may\n also be overridden).\n \"\"\"\n fmt = '%(asctime)s | %(name)s | %(levelname)s | %(message)s'\n formatter = UTCFormatter(fmt)\n\n if human:\n # Create a special handler designed just for human readable output\n hh = logging.StreamHandler()\n hh.setLevel(logging.INFO if (verbose) else logging.WARNING)\n hh.setFormatter(logging.Formatter(fmt='%(message)s'))\n if to_file:\n if (logfile is None):\n logfile = default_logfile\n fh = logging.FileHandler(filename=logfile, mode='a')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG if (eval_mode) else logging.INFO)\n\n loggers = [default_logger, 'resync']\n if (extra_loggers is not None):\n for logger in extra_loggers:\n loggers.append(logger)\n for logger in loggers:\n log = logging.getLogger(logger)\n log.setLevel(logging.DEBUG) # control at handler instead\n if human:\n log.addHandler(hh)\n if to_file:\n log.addHandler(fh)\n\n log = logging.getLogger(default_logger)\n if (to_file):\n log.info(\"Writing detailed log to %s\" % (logfile))",
"def init_logging(log_filename, verbose, quiet):\n \"\"\"Set up logging with default parameters:\n * default console logging level is INFO\n * ERROR, WARNING and CRITICAL are redirected to stderr\n\n Args:\n log_filename (str): if set, will write DEBUG log there\n verbose (bool): DEBUG level in console, overrides 'quiet'\n quiet (bool): WARNING level in console\n \"\"\"\n # TODO: consider making one verbosity parameter instead of two mutually exclusive\n # TODO: default values for parameters\n logger = logging.getLogger('')\n logger.setLevel(logging.DEBUG)\n\n # add file handler if needed\n if log_filename:\n file_handler = logging.FileHandler(log_filename)\n file_handler.setLevel(logging.DEBUG)\n # TODO: initialize all formatters in the beginning of this function\n file_handler.setFormatter(\n logging.Formatter(\n fmt=\"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\\t%(message)s\"\n ))\n logger.addHandler(file_handler)\n\n # console stdout and stderr handlers\n console_handler = logging.StreamHandler(sys.stdout)\n stderr_hdl = logging.StreamHandler(sys.stderr)\n\n # formatters\n fmt_verbose = logging.Formatter(\n fmt=\"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\\t%(message)s\",\n datefmt='%Y-%m-%d,%H:%M:%S.%f'\n )\n fmt_regular = logging.Formatter(\n \"%(asctime)s [%(levelname).4s] [%(filename).8s] %(message)s\", \"%H:%M:%S\")\n\n # set formatters and log levels\n if verbose:\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(fmt_verbose)\n stderr_hdl.setFormatter(fmt_verbose)\n elif quiet:\n console_handler.setLevel(logging.WARNING)\n console_handler.setFormatter(fmt_regular)\n stderr_hdl.setFormatter(fmt_regular)\n else:\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(fmt_regular)\n stderr_hdl.setFormatter(fmt_regular)\n\n # TODO: do we really need these to be redirected?\n # redirect ERROR, WARNING and CRITICAL to sterr\n f_err = SingleLevelFilter(logging.ERROR, True)\n f_warn = SingleLevelFilter(logging.WARNING, True)\n f_crit = SingleLevelFilter(logging.CRITICAL, True)\n console_handler.addFilter(f_err)\n console_handler.addFilter(f_warn)\n console_handler.addFilter(f_crit)\n logger.addHandler(console_handler)\n\n f_info = SingleLevelFilter(logging.INFO, True)\n f_debug = SingleLevelFilter(logging.DEBUG, True)\n stderr_hdl.addFilter(f_info)\n stderr_hdl.addFilter(f_debug)\n logger.addHandler(stderr_hdl)",
"def init_base_logging(directory=\"./log\", verbose=0, silent=False, color=False, no_file=False,\n truncate=True, config_location=None):\n \"\"\"\n Initialize the Icetea logging by creating a directory to store logs\n for this run and initialize the console logger for Icetea itself.\n\n :param directory: Directory where to store the resulting logs\n :param verbose: Log level as integer\n :param silent: Log level warning\n :param no_file: Log to file\n :param color: Log coloring\n :param truncate: Log truncating\n :param config_location: Location of config file.\n :raises IOError if unable to read configuration file.\n :raises OSError if log path already exists.\n :raises ImportError if colored logging was requested but coloredlogs module is not installed.\n \"\"\"\n global LOGPATHDIR\n global STANDALONE_LOGGING\n global TRUNCATE_LOG\n global COLOR_ON\n global SILENT_ON\n global VERBOSE_LEVEL\n\n if config_location:\n try:\n _read_config(config_location)\n except IOError as error:\n raise IOError(\"Unable to read from configuration file {}: {}\".format(config_location,\n error))\n except jsonschema.SchemaError as error:\n raise jsonschema.SchemaError(\"Logging configuration schema \"\n \"file malformed: {}\".format(error))\n\n LOGPATHDIR = os.path.join(directory, datetime.datetime.now().strftime(\n \"%Y-%m-%d_%H%M%S.%f\").rstrip(\"0\"))\n\n # Initialize the simple console logger for IceteaManager\n icetealogger = logging.getLogger(\"icetea\")\n icetealogger.propagate = False\n icetealogger.setLevel(logging.DEBUG)\n stream_handler = logging.StreamHandler()\n formatter = BenchFormatter(LOGGING_CONFIG.get(\"IceteaManager\").get(\"format\"),\n LOGGING_CONFIG.get(\"IceteaManager\").get(\"dateformat\"))\n if not color:\n stream_handler.setFormatter(formatter)\n elif color and not COLORS:\n raise ImportError(\"Missing coloredlogs module. Please install with \"\n \"pip to use colors in logging.\")\n else:\n\n class ColoredBenchFormatter(coloredlogs.ColoredFormatter):\n \"\"\"\n This is defined as an internal class here because coloredlogs is and optional\n dependency.\n \"\"\"\n converter = datetime.datetime.fromtimestamp\n\n def formatTime(self, record, datefmt=None):\n date_and_time = self.converter(record.created, tz=pytz.utc)\n if \"%F\" in datefmt:\n msec = \"%03d\" % record.msecs\n datefmt = datefmt.replace(\"%F\", msec)\n str_time = date_and_time.strftime(datefmt)\n return str_time\n\n COLOR_ON = color\n stream_handler.setFormatter(ColoredBenchFormatter(\n LOGGING_CONFIG.get(\"IceteaManager\").get(\"format\"),\n LOGGING_CONFIG.get(\"IceteaManager\").get(\"dateformat\"),\n LEVEL_FORMATS, FIELD_STYLES))\n\n SILENT_ON = silent\n VERBOSE_LEVEL = verbose\n if not no_file:\n try:\n os.makedirs(LOGPATHDIR)\n except OSError:\n raise OSError(\"Log path %s already exists.\" % LOGPATHDIR)\n filename = LOGGING_CONFIG.get(\"IceteaManager\").get(\"file\").get(\"name\", \"icetea.log\")\n icetealogger = _add_filehandler(icetealogger, get_base_logfilename(filename),\n formatter, \"IceteaManager\")\n if verbose and not silent:\n stream_handler.setLevel(logging.DEBUG)\n elif silent:\n stream_handler.setLevel(logging.WARN)\n else:\n stream_handler.setLevel(getattr(logging, LOGGING_CONFIG.get(\"IceteaManager\").get(\"level\")))\n icetealogger.addHandler(stream_handler)\n TRUNCATE_LOG = truncate\n if TRUNCATE_LOG:\n icetealogger.addFilter(ContextFilter())\n STANDALONE_LOGGING = False",
"def init_logging(stream=sys.stderr, filepath=None,\n format='%(asctime).19s [%(levelname)s] %(name)s: %(message)s'):\n \"\"\"\n Setup logging for the microcache module, but only do it once!\n\n :param stream: stream to log to (defaults to sys.stderr)\n :param filepath: path to a file to log to as well (defaults to None)\n :param format: override the default format with whatever you like\n \"\"\"\n if not (len(logger.handlers) == 1 and isinstance(logger.handlers[0], logging.NullHandler)):\n logger.warn('logging has already been initialized, refusing to do it again')\n return\n formatter = logging.Formatter(format)\n if stream is not None:\n handler = logging.StreamHandler(stream=stream)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n if filepath is not None:\n handler = logging.FileHandler(filename=filepath)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.info('successfully initialized logger')",
"def setup_logging(format=\"%(asctime)s - %(levelname)s - %(message)s\", level='INFO'):\n \"\"\"Setup the logging framework with a basic configuration\"\"\"\n try:\n import coloredlogs\n coloredlogs.install(fmt=format, level=level)\n except ImportError:\n logging.basicConfig(format=format, level=level)",
"function initLogging(conf) {\n logger.setLevels({\n error: 0,\n warn: 1,\n info: 2,\n verbose: 3,\n debug: 4,\n silly: 5\n });\n logger.addColors({\n debug: 'green',\n info: 'cyan',\n silly: 'magenta',\n warn: 'yellow',\n error: 'red'\n });\n\n logger.remove(logger.transports.Console);\n logger.add(logger.transports.Console, {\n level: conf.level || DEF_LOGGER_LEVEL,\n silent: conf.silent || DEF_LOGGER_SILENT,\n colorize: true,\n timestamp: true\n });\n}",
"def init_logging(debug=False, logfile=None):\n \"\"\"Initialize logging.\"\"\"\n loglevel = logging.DEBUG if debug else logging.INFO\n logformat = '%(asctime)s %(name)s: %(levelname)s: %(message)s'\n formatter = logging.Formatter(logformat)\n stderr = logging.StreamHandler()\n stderr.setFormatter(formatter)\n\n root = logging.getLogger()\n root.setLevel(loglevel)\n root.handlers = [stderr]\n\n if logfile:\n fhandler = logging.FileHandler(logfile)\n fhandler.setFormatter(formatter)\n root.addHandler(fhandler)",
"def setup_logging(namespace):\n \"\"\"\n setup global logging\n \"\"\"\n\n loglevel = {\n 0: logging.ERROR,\n 1: logging.WARNING,\n 2: logging.INFO,\n 3: logging.DEBUG,\n }.get(namespace.verbosity, logging.DEBUG)\n\n if namespace.verbosity > 1:\n logformat = '%(levelname)s csvpandas %(lineno)s %(message)s'\n else:\n logformat = 'csvpandas %(message)s'\n\n logging.basicConfig(stream=namespace.log, format=logformat, level=loglevel)",
"def ServerLoggingStartupInit():\n \"\"\"Initialize the server logging configuration.\"\"\"\n global LOGGER\n if local_log:\n logging.debug(\"Using local LogInit from %s\", local_log)\n local_log.LogInit()\n logging.debug(\"Using local AppLogInit from %s\", local_log)\n LOGGER = local_log.AppLogInit()\n else:\n LogInit()\n LOGGER = AppLogInit()",
"def init_logging(verbose=False, format='%(asctime)s %(message)s'):\n \"\"\" Common utility for setting up logging in PyCBC.\n\n Installs a signal handler such that verbosity can be activated at\n run-time by sending a SIGUSR1 to the process.\n \"\"\"\n def sig_handler(signum, frame):\n logger = logging.getLogger()\n log_level = logger.level\n if log_level == logging.DEBUG:\n log_level = logging.WARN\n else:\n log_level = logging.DEBUG\n logging.warn('Got signal %d, setting log level to %d',\n signum, log_level)\n logger.setLevel(log_level)\n\n signal.signal(signal.SIGUSR1, sig_handler)\n\n if verbose:\n initial_level = logging.DEBUG\n else:\n initial_level = logging.WARN\n logging.getLogger().setLevel(initial_level)\n logging.basicConfig(format=format, level=initial_level)",
"def init_logging(settings):\n '''Set up logger'''\n lg_format = '%(asctime)s : - %(message)s'\n lg_dateformat = '%Y.%m.%d %H:%M:%S'\n logging.basicConfig(format=lg_format, datefmt=lg_dateformat)\n\n log = get_logger()\n\n handler = logging.handlers.WatchedFileHandler(\n filename=settings['log_file'] \\\n if 'log_file' in settings.keys() else None,\n encoding='utf-8')\n formatter = logging.Formatter(fmt=lg_format, datefmt=lg_dateformat)\n handler.setFormatter(formatter)\n log.addHandler(handler)\n return log"
] |
[
0.7720361351966858,
0.7429147362709045,
0.7419867515563965,
0.7385619282722473,
0.7373452186584473,
0.7355812191963196,
0.7353217005729675,
0.7333750128746033,
0.7330207824707031,
0.7322512269020081,
0.7310941815376282,
0.7308551669120789
] |
Escape all DN special characters found in s
with a back-slash (see RFC 4514, section 2.4)
|
def escape_dn_chars(s):
"""
Escape all DN special characters found in s
with a back-slash (see RFC 4514, section 2.4)
"""
if s:
assert isinstance(s, six.string_types)
s = s.replace('\\', '\\\\')
s = s.replace(',', '\\,')
s = s.replace('+', '\\+')
s = s.replace('"', '\\"')
s = s.replace('<', '\\<')
s = s.replace('>', '\\>')
s = s.replace(';', '\\;')
s = s.replace('=', '\\=')
s = s.replace('\000', '\\\000')
if s[0] == '#' or s[0] == ' ':
s = ''.join(('\\', s))
if s[-1] == ' ':
s = ''.join((s[:-1], '\\ '))
return s
|
[
"public static final String escape(String s) {\n StringBuilder buf = new StringBuilder();\n for (int i=0; i<s.length(); ) {\n int c = Character.codePointAt(s, i);\n i += UTF16.getCharCount(c);\n if (c >= ' ' && c <= 0x007F) {\n if (c == '\\\\') {\n buf.append(\"\\\\\\\\\"); // That is, \"\\\\\"\n } else {\n buf.append((char)c);\n }\n } else {\n boolean four = c <= 0xFFFF;\n buf.append(four ? \"\\\\u\" : \"\\\\U\");\n buf.append(hex(c, four ? 4 : 8));\n }\n }\n return buf.toString();\n }",
"public static String escapeJavaString(String s)\n {\n StringBuilder sb = new StringBuilder();\n\n for (int i = 0; i < s.length(); i++) {\n char ch = s.charAt(i);\n\n switch (ch) {\n case '\\\\':\n sb.append(\"\\\\\\\\\");\n break;\n case '\\n':\n sb.append(\"\\\\n\");\n break;\n case '\\r':\n sb.append(\"\\\\r\");\n break;\n case '\"':\n sb.append(\"\\\\\\\"\");\n break;\n default:\n sb.append(ch);\n }\n }\n\n return sb.toString();\n }",
"public static String escapeUnicode(String s) {\n int len = s.length();\n int i = 0;\n while (i < len) {\n char ch = s.charAt(i);\n if (ch > 255) {\n StringBuilder buf = new StringBuilder();\n buf.append(s.substring(0, i));\n while (i < len) {\n ch = s.charAt(i);\n if (ch > 255) {\n buf.append(\"\\\\u\");\n buf.append(Character.forDigit((ch >> 12) % 16, 16));\n buf.append(Character.forDigit((ch >> 8) % 16, 16));\n buf.append(Character.forDigit((ch >> 4) % 16, 16));\n buf.append(Character.forDigit((ch ) % 16, 16));\n } else {\n buf.append(ch);\n }\n i++;\n }\n s = buf.toString();\n } else {\n i++;\n }\n }\n return s;\n }",
"@Nonnull\n public static String escapeString(@Nonnull String s) {\n // We replace double quotes with a back slash followed\n // by a double quote. We replace backslashes with a double\n // backslash\n if (s.indexOf('\\\"') == -1 && s.indexOf('\\\\') == -1) {\n return s;\n }\n StringBuilder sb = new StringBuilder(s.length() + 20);\n for (int i = 0; i < s.length(); i++) {\n char ch = s.charAt(i);\n if (ch == '\\\\') {\n sb.append(\"\\\\\\\\\");\n } else if (ch == '\\\"') {\n sb.append(\"\\\\\\\"\");\n } else {\n sb.append(ch);\n }\n }\n return verifyNotNull(sb.toString());\n }",
"public static String unescape(String s) {\n StringBuilder buf = new StringBuilder();\n int[] pos = new int[1];\n for (int i=0; i<s.length(); ) {\n char c = s.charAt(i++);\n if (c == '\\\\') {\n pos[0] = i;\n int e = unescapeAt(s, pos);\n if (e < 0) {\n throw new IllegalArgumentException(\"Invalid escape sequence \" +\n s.substring(i-1, Math.min(i+8, s.length())));\n }\n buf.appendCodePoint(e);\n i = pos[0];\n } else {\n buf.append(c);\n }\n }\n return buf.toString();\n }",
"public static String escapeJava(String s) {\n\t\tif (s == null) {\n\t\t\treturn null;\n\t\t}\n\t\tStringBuilder sb = new StringBuilder(Math.min(2, s.length() * 3 / 2));\n\t\tfor (int i = 0; i < s.length(); ++i) {\n\t\t\tchar c = s.charAt(i);\n\t\t\tif (c == '\\b') {\n\t\t\t\tsb.append(\"\\\\b\");\n\t\t\t} else if (c == '\\n') {\n\t\t\t\tsb.append(\"\\\\n\");\n\t\t\t} else if (c == '\\t') {\n\t\t\t\tsb.append(\"\\\\t\");\n\t\t\t} else if (c == '\\f') {\n\t\t\t\tsb.append(\"\\\\f\");\n\t\t\t} else if (c == '\\r') {\n\t\t\t\tsb.append(\"\\\\r\");\n\t\t\t} else if (c == '\\\\') {\n\t\t\t\tsb.append(\"\\\\\\\\\");\n\t\t\t} else if (c == '\"') {\n\t\t\t\tsb.append(\"\\\\\\\"\");\n\t\t\t} else if (c < 32 || c > 0x7f) {\n\t\t\t\tsb.append(\"\\\\u\");\n\t\t\t\tsb.append(hex4(c));\n\t\t\t} else {\n\t\t\t\tsb.append(c);\n\t\t\t}\n\t\t}\n\t\treturn sb.toString();\n\t}",
"static protected String escapeForRegExp(String s){\r\n\t\tString r = s; \r\n\t\tif (r != null && r.length() > 0){\r\n\t\t\tfor (String c: new String[]{\"\\\\\", \"|\", \"&\", \"?\", \"*\", \"+\", \"{\", \"}\",\r\n\t\t\t\t\t\"[\", \"]\", \"~\", \".\", \"#\", \"@\", \"\\\"\", \"(\", \")\", \"<\", \">\",\r\n\t\t\t\t\t\"^\"}){\r\n\t\t\t\tr = r.replace(c, \"\\\\\" + c);\r\n\t\t\t}\r\n\t\t}\r\n\t\treturn r;\r\n\t}",
"private static String escapeJavaScriptChars(String s) {\n StringBuilder sb = new StringBuilder();\n for (int i = 0; i < s.length(); i++) {\n char ch = s.charAt(i);\n switch (ch) {\n case '\\b':\n sb.append(\"\\\\b\");\n break;\n case '\\t':\n sb.append(\"\\\\t\");\n break;\n case '\\n':\n sb.append(\"\\\\n\");\n break;\n case '\\f':\n sb.append(\"\\\\f\");\n break;\n case '\\r':\n sb.append(\"\\\\r\");\n break;\n case '\"':\n sb.append(\"\\\\\\\"\");\n break;\n case '\\'':\n sb.append(\"\\\\\\'\");\n break;\n case '\\\\':\n sb.append(\"\\\\\\\\\");\n break;\n default:\n if (ch < 32 || ch >= 127) {\n sb.append(String.format(\"\\\\u%04X\", (int)ch));\n } else {\n sb.append(ch);\n }\n break;\n }\n }\n return sb.toString();\n }",
"protected String escape(String s) {\r\n\t\tif(s == null)\r\n\t\t\treturn null;\r\n StringBuilder sb = new StringBuilder();\r\n escape(s, sb);\r\n return sb.toString();\r\n }",
"private static String stringWithEscapeSubstitutions(final String s) {\n\t\tfinal StringBuffer ret = new StringBuffer();\n\t\tfinal int len = s.length();\n\t\tint indx = 0;\n\t\tint c;\n\t\twhile (indx < len) {\n\t\t\tc = s.charAt(indx);\n\t\t\tif (c == '\\n') {\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tif (c == '\\\\') {\n\t\t\t\tindx++;\n\t\t\t\tif (indx >= len) {\n\t\t\t\t\tret.append((char) c);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\tint cn = s.charAt(indx);\n\n\t\t\t\tif (cn == '\\\\') {\n\t\t\t\t\tret.append('\\\\');\n\t\t\t\t} else if (cn == ' ') {\n\t\t\t\t\tret.append(' ');\n\t\t\t\t} else if (cn == 't') {\n\t\t\t\t\tret.append('\\t');\n\t\t\t\t} else if (cn == 'n') {\n\t\t\t\t\tret.append('\\n');\n\t\t\t\t} else if (cn == 'r') {\n\t\t\t\t\tret.append('\\r');\n\t\t\t\t} else if (cn >= '\\60' && cn <= '\\67') {\n\t\t\t\t\tint escape = cn - '0';\n\t\t\t\t\tindx++;\n\t\t\t\t\tif (indx >= len) {\n\t\t\t\t\t\tret.append((char) escape);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\tcn = s.charAt(indx);\n\t\t\t\t\tif (cn >= '\\60' && cn <= '\\67') {\n\t\t\t\t\t\tescape = escape << 3;\n\t\t\t\t\t\tescape = escape | cn - '0';\n\n\t\t\t\t\t\tindx++;\n\t\t\t\t\t\tif (indx >= len) {\n\t\t\t\t\t\t\tret.append((char) escape);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcn = s.charAt(indx);\n\t\t\t\t\t\tif (cn >= '\\60' && cn <= '\\67') {\n\t\t\t\t\t\t\tescape = escape << 3;\n\t\t\t\t\t\t\tescape = escape | cn - '0';\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tindx--;\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tindx--;\n\t\t\t\t\t}\n\t\t\t\t\tret.append((char) escape);\n\t\t\t\t} else {\n\t\t\t\t\tret.append((char) cn);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tret.append((char) c);\n\t\t\t}\n\t\t\tindx++;\n\t\t}\n\t\treturn new String(ret);\n\t}",
"public static String escape(CharSequence s) {\r\n\t\tStringBuilder str = new StringBuilder();\r\n\t\tint len = s.length();\r\n\t\tfor (int i = 0; i < len; ++i) {\r\n\t\t\tchar c = s.charAt(i);\r\n\t\t\tif (c == '&') str.append(\"&\");\r\n\t\t\telse if (c == '\"') str.append(\""\");\r\n\t\t\telse if (c == '\\'') str.append(\"'\");\r\n\t\t\telse if (c == '>') str.append(\">\");\r\n\t\t\telse if (c == '<') str.append(\"<\");\r\n\t\t\telse if (c < 32) str.append(\"&#\").append((int)c).append(';');\r\n\t\t\telse str.append(c);\r\n\t\t}\r\n\t\treturn str.toString();\r\n\t}",
"public static String encodeBackslash(CharSequence s, Legal legal) {\n\tStringBuilder b=new StringBuilder((int)(s.length()*1.5));\n\tfor(int i=0;i<s.length();i++) {\n\t\tif(legal.isLegal(s.charAt(i))) {\n\t\t\tb.append(s.charAt(i));\n\t\t} else {\n\t\t\tif(charToBackslash.containsKey(s.charAt(i))) {\n\t\t\t\tb.append(charToBackslash.get(s.charAt(i)));\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tb.append(\"\\\\u\");\n\t\t String hex = Integer.toHexString(s.charAt(i));\n\t\t for(int j=0;j<4-hex.length();j++)\n\t\t b.append('0');\n\t\t b.append(hex);\n\t\t}\n\t}\n\treturn(b.toString());\n }"
] |
[
0.8020768165588379,
0.7789801359176636,
0.7722911238670349,
0.7689322233200073,
0.7657805681228638,
0.7630879282951355,
0.7568450570106506,
0.7559816837310791,
0.7508007884025574,
0.745135486125946,
0.7448720335960388,
0.7414604425430298
] |
This function takes a DN as string as parameter and returns
a decomposed DN. It's the inverse to dn2str().
flags describes the format of the dn
See also the OpenLDAP man-page ldap_str2dn(3)
|
def str2dn(dn, flags=0):
"""
This function takes a DN as string as parameter and returns
a decomposed DN. It's the inverse to dn2str().
flags describes the format of the dn
See also the OpenLDAP man-page ldap_str2dn(3)
"""
# if python2, we need unicode string
if not isinstance(dn, six.text_type):
dn = dn.decode("utf_8")
assert flags == 0
result, i = _distinguishedName(dn, 0)
if result is None:
raise tldap.exceptions.InvalidDN("Cannot parse dn")
if i != len(dn):
raise tldap.exceptions.InvalidDN("Cannot parse dn past %s" % dn[i:])
return result
|
[
"def dn2str(dn):\n \"\"\"\n This function takes a decomposed DN as parameter and returns\n a single string. It's the inverse to str2dn() but will always\n return a DN in LDAPv3 format compliant to RFC 4514.\n \"\"\"\n for rdn in dn:\n for atype, avalue, dummy in rdn:\n assert isinstance(atype, six.string_types)\n assert isinstance(avalue, six.string_types)\n assert dummy == 1\n\n return ','.join([\n '+'.join([\n '='.join((atype, escape_dn_chars(avalue or '')))\n for atype, avalue, dummy in rdn])\n for rdn in dn\n ])",
"def explode_dn(dn, notypes=0, flags=0):\n \"\"\"\n explode_dn(dn [, notypes=0]) -> list\n\n This function takes a DN and breaks it up into its component parts.\n The notypes parameter is used to specify that only the component's\n attribute values be returned and not the attribute types.\n \"\"\"\n if not dn:\n return []\n dn_decomp = str2dn(dn, flags)\n rdn_list = []\n for rdn in dn_decomp:\n if notypes:\n rdn_list.append('+'.join([\n escape_dn_chars(avalue or '')\n for atype, avalue, dummy in rdn\n ]))\n else:\n rdn_list.append('+'.join([\n '='.join((atype, escape_dn_chars(avalue or '')))\n for atype, avalue, dummy in rdn\n ]))\n return rdn_list",
"def _ldap_string_prep(self, string):\n \"\"\"\n Implements the internationalized string preparation algorithm from\n RFC 4518. https://tools.ietf.org/html/rfc4518#section-2\n\n :param string:\n A unicode string to prepare\n\n :return:\n A prepared unicode string, ready for comparison\n \"\"\"\n\n # Map step\n string = re.sub('[\\u00ad\\u1806\\u034f\\u180b-\\u180d\\ufe0f-\\uff00\\ufffc]+', '', string)\n string = re.sub('[\\u0009\\u000a\\u000b\\u000c\\u000d\\u0085]', ' ', string)\n if sys.maxunicode == 0xffff:\n # Some installs of Python 2.7 don't support 8-digit unicode escape\n # ranges, so we have to break them into pieces\n # Original was: \\U0001D173-\\U0001D17A and \\U000E0020-\\U000E007F\n string = re.sub('\\ud834[\\udd73-\\udd7a]|\\udb40[\\udc20-\\udc7f]|\\U000e0001', '', string)\n else:\n string = re.sub('[\\U0001D173-\\U0001D17A\\U000E0020-\\U000E007F\\U000e0001]', '', string)\n string = re.sub(\n '[\\u0000-\\u0008\\u000e-\\u001f\\u007f-\\u0084\\u0086-\\u009f\\u06dd\\u070f\\u180e\\u200c-\\u200f'\n '\\u202a-\\u202e\\u2060-\\u2063\\u206a-\\u206f\\ufeff\\ufff9-\\ufffb]+',\n '',\n string\n )\n string = string.replace('\\u200b', '')\n string = re.sub('[\\u00a0\\u1680\\u2000-\\u200a\\u2028-\\u2029\\u202f\\u205f\\u3000]', ' ', string)\n\n string = ''.join(map(stringprep.map_table_b2, string))\n\n # Normalize step\n string = unicodedata.normalize('NFKC', string)\n\n # Prohibit step\n for char in string:\n if stringprep.in_table_a1(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain unassigned code points\n '''\n ))\n\n if stringprep.in_table_c8(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain change display or\n zzzzdeprecated characters\n '''\n ))\n\n if stringprep.in_table_c3(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain private use characters\n '''\n ))\n\n if stringprep.in_table_c4(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain non-character code points\n '''\n ))\n\n if stringprep.in_table_c5(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain surrogate code points\n '''\n ))\n\n if char == '\\ufffd':\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain the replacement character\n '''\n ))\n\n # Check bidirectional step - here we ensure that we are not mixing\n # left-to-right and right-to-left text in the string\n has_r_and_al_cat = False\n has_l_cat = False\n for char in string:\n if stringprep.in_table_d1(char):\n has_r_and_al_cat = True\n elif stringprep.in_table_d2(char):\n has_l_cat = True\n\n if has_r_and_al_cat:\n first_is_r_and_al = stringprep.in_table_d1(string[0])\n last_is_r_and_al = stringprep.in_table_d1(string[-1])\n\n if has_l_cat or not first_is_r_and_al or not last_is_r_and_al:\n raise ValueError(unwrap(\n '''\n X.509 Name object contains a malformed bidirectional\n sequence\n '''\n ))\n\n # Insignificant space handling step\n string = ' ' + re.sub(' +', ' ', string).strip() + ' '\n\n return string",
"function ldap_dn_decode($str) {\n $decoded=array(\",\",\"#\",\"+\",\"<\",\">\",\";\",\"\\\"\",\"=\",\" \",\"\\\\\");\n $encoded=array();\n foreach ($decoded as $id => $char) {\n $encoded[$id]=\"\\\\\".$char;\n }\n $str = str_replace($encoded,$decoded,$str);\n return ($str);\n }",
"function ldap_dn_encode($str) {\n $decoded=array(\"\\\\\",\",\",\"+\",\"<\",\">\",\";\",\"\\\"\",\"=\");\n \n $encoded=array();\n foreach ($decoded as $id => $char) {\n $encoded[$id]=\"\\\\\".$char;\n }\n \n $str = str_replace($decoded,$encoded,$str);\n \n // Leading and trailing space\n if (\" \" == (substr($str,0,1))) {\n $str = \"\\\\ \".$str;\n }\n if (\" \" == (substr($str,-1))) {\n $str = substr($str,0,-1).\"\\\\ \";\n }\n // Leading # (RFC says leading one only)\n if (\"#\" == (substr($str,0,1))) {\n $str = \"\\\\#\".$str;\n }\n return ($str);\n }",
"def rdn_to_dn(changes: Changeset, name: str, base_dn: str) -> Changeset:\n \"\"\" Convert the rdn to a fully qualified DN for the specified LDAP\n connection.\n\n :param changes: The changes object to lookup.\n :param name: rdn to convert.\n :param base_dn: The base_dn to lookup.\n :return: fully qualified DN.\n \"\"\"\n dn = changes.get_value_as_single('dn')\n if dn is not None:\n return changes\n\n value = changes.get_value_as_single(name)\n if value is None:\n raise tldap.exceptions.ValidationError(\n \"Cannot use %s in dn as it is None\" % name)\n if isinstance(value, list):\n raise tldap.exceptions.ValidationError(\n \"Cannot use %s in dn as it is a list\" % name)\n\n assert base_dn is not None\n\n split_base = str2dn(base_dn)\n split_new_dn = [[(name, value, 1)]] + split_base\n\n new_dn = dn2str(split_new_dn)\n\n return changes.set('dn', new_dn)",
"public static function ldap_explode_dn($dn, $options = array('casefold' => 'upper'))\n {\n if (!isset($options['onlyvalues'])) $options['onlyvalues'] = false;\n if (!isset($options['reverse'])) $options['reverse'] = false;\n if (!isset($options['casefold'])) $options['casefold'] = 'upper';\n\n // Escaping of DN and stripping of \"OID.\"\n $dn = self::canonical_dn($dn, array('casefold' => $options['casefold']));\n\n // splitting the DN\n $dn_array = preg_split('/(?<=[^\\\\\\\\]),/', $dn);\n\n // clear wrong splitting (possibly we have split too much)\n // /!\\ Not clear, if this is neccessary here\n //$dn_array = self::correct_dn_splitting($dn_array, ',');\n\n // construct subarrays for multivalued RDNs and unescape DN value\n // also convert to output format and apply casefolding\n foreach ($dn_array as $key => $value) {\n $value_u = self::unescape_dn_value($value);\n $rdns = self::split_rdn_multival($value_u[0]);\n if (count($rdns) > 1) {\n // MV RDN!\n foreach ($rdns as $subrdn_k => $subrdn_v) {\n // Casefolding\n if ($options['casefold'] == 'upper') {\n $subrdn_v = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtoupper($matches[0]);\n },\n $subrdn_v\n );\n } else if ($options['casefold'] == 'lower') {\n $subrdn_v = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtolower($matches[0]);\n },\n $subrdn_v\n );\n }\n\n if ($options['onlyvalues']) {\n preg_match('/(.+?)(?<!\\\\\\\\)=(.+)/', $subrdn_v, $matches);\n $rdn_ocl = $matches[1];\n $rdn_val = $matches[2];\n $unescaped = self::unescape_dn_value($rdn_val);\n $rdns[$subrdn_k] = $unescaped[0];\n } else {\n $unescaped = self::unescape_dn_value($subrdn_v);\n $rdns[$subrdn_k] = $unescaped[0];\n }\n }\n\n $dn_array[$key] = $rdns;\n } else {\n // normal RDN\n\n // Casefolding\n if ($options['casefold'] == 'upper') {\n $value = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtoupper($matches[0]);\n },\n $value\n );\n } else if ($options['casefold'] == 'lower') {\n $value = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtolower($matches[0]);\n },\n $value\n );\n }\n\n if ($options['onlyvalues']) {\n preg_match('/(.+?)(?<!\\\\\\\\)=(.+)/', $value, $matches);\n $dn_ocl = $matches[1];\n $dn_val = $matches[2];\n $unescaped = self::unescape_dn_value($dn_val);\n $dn_array[$key] = $unescaped[0];\n } else {\n $unescaped = self::unescape_dn_value($value);\n $dn_array[$key] = $unescaped[0];\n }\n }\n }\n\n if ($options['reverse']) {\n return array_reverse($dn_array);\n } else {\n return $dn_array;\n }\n }",
"public function dnStrToArr($dnStr, $excludeBaseDn = true, $includeAttributes = false)\n {\n if ($excludeBaseDn) {\n return ldap_explode_dn($dnStr, ($includeAttributes ? 0 : 1));\n } else {\n return ldap_explode_dn($this->adldap->getBaseDn().$dnStr, ($includeAttributes ? 0 : 1));\n }\n }",
"def nl_nlmsg_flags2str(flags, buf, _=None):\n \"\"\"Netlink Message Flags Translations.\n\n https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L664\n\n Positional arguments:\n flags -- integer.\n buf -- bytearray().\n\n Keyword arguments:\n _ -- unused.\n\n Returns:\n Reference to `buf`.\n \"\"\"\n del buf[:]\n all_flags = (\n ('REQUEST', libnl.linux_private.netlink.NLM_F_REQUEST),\n ('MULTI', libnl.linux_private.netlink.NLM_F_MULTI),\n ('ACK', libnl.linux_private.netlink.NLM_F_ACK),\n ('ECHO', libnl.linux_private.netlink.NLM_F_ECHO),\n ('ROOT', libnl.linux_private.netlink.NLM_F_ROOT),\n ('MATCH', libnl.linux_private.netlink.NLM_F_MATCH),\n ('ATOMIC', libnl.linux_private.netlink.NLM_F_ATOMIC),\n ('REPLACE', libnl.linux_private.netlink.NLM_F_REPLACE),\n ('EXCL', libnl.linux_private.netlink.NLM_F_EXCL),\n ('CREATE', libnl.linux_private.netlink.NLM_F_CREATE),\n ('APPEND', libnl.linux_private.netlink.NLM_F_APPEND),\n )\n print_flags = []\n for k, v in all_flags:\n if not flags & v:\n continue\n flags &= ~v\n print_flags.append(k)\n if flags:\n print_flags.append('0x{0:x}'.format(flags))\n buf.extend(','.join(print_flags).encode('ascii'))\n return buf",
"def ndarr2str(arr, encoding='ascii'):\n \"\"\" This is used to ensure that the return value of arr.tostring()\n is actually a string. This will prevent lots of if-checks in calling\n code. As of numpy v1.6.1 (in Python 3.2.3), the tostring() function\n still returns type 'bytes', not 'str' as it advertises. \"\"\"\n # be fast, don't check - just assume 'arr' is a numpy array - the tostring\n # call will fail anyway if not\n retval = arr.tostring()\n # would rather check \"if isinstance(retval, bytes)\", but support 2.5.\n # could rm the if PY3K check, but it makes this faster on 2.x.\n if PY3K and not isinstance(retval, str):\n return retval.decode(encoding)\n else: # is str\n return retval",
"static function ldap_escape( $str, $for_dn = false )\n {\n // see: RFC2254\n // http://msdn.microsoft.com/en-us/library/ms675768(VS.85).aspx\n // http://www-03.ibm.com/systems/i/software/ldap/underdn.html\n\n if ( $for_dn )\n {\n $metaChars = array( ',', '=', '+', '<', '>', ';', '\\\\', '\"', '#' );\n $quotedMetaChars = array( '\\2c', '\\3d', '\\2b', '\\3c', '\\3e', '\\3b', '\\5c', '\\22', '\\23' );\n }\n else\n {\n $metaChars = array( '*', '(', ')', '\\\\', chr(0) );\n $quotedMetaChars = array( '\\2a', '\\28', '\\29', '\\5c', '\\00' );\n }\n\n return str_replace( $metaChars, $quotedMetaChars, $str );\n }",
"public static function explodeDn($dn, $removeAttributePrefixes = true)\n {\n $dn = ldap_explode_dn($dn, ($removeAttributePrefixes ? 1 : 0));\n\n if (is_array($dn) && array_key_exists('count', $dn)) {\n foreach ($dn as $rdn => $value) {\n $dn[$rdn] = self::unescape($value);\n }\n }\n\n return $dn;\n }"
] |
[
0.83448725938797,
0.7021855115890503,
0.6918525099754333,
0.6917361617088318,
0.6726309657096863,
0.663023054599762,
0.6523066163063049,
0.6495392322540283,
0.6487255096435547,
0.646568775177002,
0.6458175778388977,
0.645686686038971
] |
This function takes a decomposed DN as parameter and returns
a single string. It's the inverse to str2dn() but will always
return a DN in LDAPv3 format compliant to RFC 4514.
|
def dn2str(dn):
"""
This function takes a decomposed DN as parameter and returns
a single string. It's the inverse to str2dn() but will always
return a DN in LDAPv3 format compliant to RFC 4514.
"""
for rdn in dn:
for atype, avalue, dummy in rdn:
assert isinstance(atype, six.string_types)
assert isinstance(avalue, six.string_types)
assert dummy == 1
return ','.join([
'+'.join([
'='.join((atype, escape_dn_chars(avalue or '')))
for atype, avalue, dummy in rdn])
for rdn in dn
])
|
[
"def str2dn(dn, flags=0):\n \"\"\"\n This function takes a DN as string as parameter and returns\n a decomposed DN. It's the inverse to dn2str().\n\n flags describes the format of the dn\n\n See also the OpenLDAP man-page ldap_str2dn(3)\n \"\"\"\n\n # if python2, we need unicode string\n if not isinstance(dn, six.text_type):\n dn = dn.decode(\"utf_8\")\n\n assert flags == 0\n result, i = _distinguishedName(dn, 0)\n if result is None:\n raise tldap.exceptions.InvalidDN(\"Cannot parse dn\")\n if i != len(dn):\n raise tldap.exceptions.InvalidDN(\"Cannot parse dn past %s\" % dn[i:])\n return result",
"def explode_dn(dn, notypes=0, flags=0):\n \"\"\"\n explode_dn(dn [, notypes=0]) -> list\n\n This function takes a DN and breaks it up into its component parts.\n The notypes parameter is used to specify that only the component's\n attribute values be returned and not the attribute types.\n \"\"\"\n if not dn:\n return []\n dn_decomp = str2dn(dn, flags)\n rdn_list = []\n for rdn in dn_decomp:\n if notypes:\n rdn_list.append('+'.join([\n escape_dn_chars(avalue or '')\n for atype, avalue, dummy in rdn\n ]))\n else:\n rdn_list.append('+'.join([\n '='.join((atype, escape_dn_chars(avalue or '')))\n for atype, avalue, dummy in rdn\n ]))\n return rdn_list",
"public static String getRDN(String DN) {\n if (DN == null || DN.trim().length() == 0) {\n return DN;\n }\n String RDN = null;\n try {\n LdapName name = new LdapName(DN);\n if (name.size() == 0) {\n return DN;\n }\n RDN = name.get(name.size() - 1);\n } catch (InvalidNameException e) {\n e.getMessage();\n DN = DN.trim();\n int pos1 = DN.indexOf(',');\n if (DN.charAt(pos1 - 1) == '\\\\') {\n pos1 = DN.indexOf(pos1, ',');\n }\n if (pos1 > -1) {\n RDN = DN.substring(0, pos1).trim();\n } else {\n RDN = DN;\n }\n }\n return RDN;\n }",
"def try_get_dn_string(subject, shorten=False):\n \"\"\"\n Returns DN as a string\n :param subject:\n :param shorten:\n :return:\n \"\"\"\n try:\n from cryptography.x509.oid import NameOID\n from cryptography.x509 import ObjectIdentifier\n oid_names = {\n getattr(NameOID, 'COMMON_NAME', ObjectIdentifier(\"2.5.4.3\")): \"CN\",\n getattr(NameOID, 'COUNTRY_NAME', ObjectIdentifier(\"2.5.4.6\")): \"C\",\n getattr(NameOID, 'LOCALITY_NAME', ObjectIdentifier(\"2.5.4.7\")): \"L\",\n getattr(NameOID, 'STATE_OR_PROVINCE_NAME', ObjectIdentifier(\"2.5.4.8\")): \"ST\",\n getattr(NameOID, 'STREET_ADDRESS', ObjectIdentifier(\"2.5.4.9\")): \"St\",\n getattr(NameOID, 'ORGANIZATION_NAME', ObjectIdentifier(\"2.5.4.10\")): \"O\",\n getattr(NameOID, 'ORGANIZATIONAL_UNIT_NAME', ObjectIdentifier(\"2.5.4.11\")): \"OU\",\n getattr(NameOID, 'SERIAL_NUMBER', ObjectIdentifier(\"2.5.4.5\")): \"SN\",\n getattr(NameOID, 'USER_ID', ObjectIdentifier(\"0.9.2342.19200300.100.1.1\")): \"userID\",\n getattr(NameOID, 'DOMAIN_COMPONENT', ObjectIdentifier(\"0.9.2342.19200300.100.1.25\")): \"domainComponent\",\n getattr(NameOID, 'EMAIL_ADDRESS', ObjectIdentifier(\"1.2.840.113549.1.9.1\")): \"emailAddress\",\n getattr(NameOID, 'POSTAL_CODE', ObjectIdentifier(\"2.5.4.17\")): \"ZIP\",\n }\n\n ret = []\n try:\n for attribute in subject:\n oid = attribute.oid\n dot = oid.dotted_string\n oid_name = oid_names[oid] if shorten and oid in oid_names else oid._name\n val = attribute.value\n ret.append('%s: %s' % (oid_name, val))\n except:\n pass\n return ', '.join(ret)\n\n except Exception as e:\n logger.warning('Unexpected error: %s' % e)\n return 'N/A'",
"def rdn_to_dn(changes: Changeset, name: str, base_dn: str) -> Changeset:\n \"\"\" Convert the rdn to a fully qualified DN for the specified LDAP\n connection.\n\n :param changes: The changes object to lookup.\n :param name: rdn to convert.\n :param base_dn: The base_dn to lookup.\n :return: fully qualified DN.\n \"\"\"\n dn = changes.get_value_as_single('dn')\n if dn is not None:\n return changes\n\n value = changes.get_value_as_single(name)\n if value is None:\n raise tldap.exceptions.ValidationError(\n \"Cannot use %s in dn as it is None\" % name)\n if isinstance(value, list):\n raise tldap.exceptions.ValidationError(\n \"Cannot use %s in dn as it is a list\" % name)\n\n assert base_dn is not None\n\n split_base = str2dn(base_dn)\n split_new_dn = [[(name, value, 1)]] + split_base\n\n new_dn = dn2str(split_new_dn)\n\n return changes.set('dn', new_dn)",
"def _ldap_string_prep(self, string):\n \"\"\"\n Implements the internationalized string preparation algorithm from\n RFC 4518. https://tools.ietf.org/html/rfc4518#section-2\n\n :param string:\n A unicode string to prepare\n\n :return:\n A prepared unicode string, ready for comparison\n \"\"\"\n\n # Map step\n string = re.sub('[\\u00ad\\u1806\\u034f\\u180b-\\u180d\\ufe0f-\\uff00\\ufffc]+', '', string)\n string = re.sub('[\\u0009\\u000a\\u000b\\u000c\\u000d\\u0085]', ' ', string)\n if sys.maxunicode == 0xffff:\n # Some installs of Python 2.7 don't support 8-digit unicode escape\n # ranges, so we have to break them into pieces\n # Original was: \\U0001D173-\\U0001D17A and \\U000E0020-\\U000E007F\n string = re.sub('\\ud834[\\udd73-\\udd7a]|\\udb40[\\udc20-\\udc7f]|\\U000e0001', '', string)\n else:\n string = re.sub('[\\U0001D173-\\U0001D17A\\U000E0020-\\U000E007F\\U000e0001]', '', string)\n string = re.sub(\n '[\\u0000-\\u0008\\u000e-\\u001f\\u007f-\\u0084\\u0086-\\u009f\\u06dd\\u070f\\u180e\\u200c-\\u200f'\n '\\u202a-\\u202e\\u2060-\\u2063\\u206a-\\u206f\\ufeff\\ufff9-\\ufffb]+',\n '',\n string\n )\n string = string.replace('\\u200b', '')\n string = re.sub('[\\u00a0\\u1680\\u2000-\\u200a\\u2028-\\u2029\\u202f\\u205f\\u3000]', ' ', string)\n\n string = ''.join(map(stringprep.map_table_b2, string))\n\n # Normalize step\n string = unicodedata.normalize('NFKC', string)\n\n # Prohibit step\n for char in string:\n if stringprep.in_table_a1(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain unassigned code points\n '''\n ))\n\n if stringprep.in_table_c8(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain change display or\n zzzzdeprecated characters\n '''\n ))\n\n if stringprep.in_table_c3(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain private use characters\n '''\n ))\n\n if stringprep.in_table_c4(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain non-character code points\n '''\n ))\n\n if stringprep.in_table_c5(char):\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain surrogate code points\n '''\n ))\n\n if char == '\\ufffd':\n raise ValueError(unwrap(\n '''\n X.509 Name objects may not contain the replacement character\n '''\n ))\n\n # Check bidirectional step - here we ensure that we are not mixing\n # left-to-right and right-to-left text in the string\n has_r_and_al_cat = False\n has_l_cat = False\n for char in string:\n if stringprep.in_table_d1(char):\n has_r_and_al_cat = True\n elif stringprep.in_table_d2(char):\n has_l_cat = True\n\n if has_r_and_al_cat:\n first_is_r_and_al = stringprep.in_table_d1(string[0])\n last_is_r_and_al = stringprep.in_table_d1(string[-1])\n\n if has_l_cat or not first_is_r_and_al or not last_is_r_and_al:\n raise ValueError(unwrap(\n '''\n X.509 Name object contains a malformed bidirectional\n sequence\n '''\n ))\n\n # Insignificant space handling step\n string = ' ' + re.sub(' +', ' ', string).strip() + ' '\n\n return string",
"def escape_dn_chars(s):\n \"\"\"\n Escape all DN special characters found in s\n with a back-slash (see RFC 4514, section 2.4)\n \"\"\"\n if s:\n assert isinstance(s, six.string_types)\n s = s.replace('\\\\', '\\\\\\\\')\n s = s.replace(',', '\\\\,')\n s = s.replace('+', '\\\\+')\n s = s.replace('\"', '\\\\\"')\n s = s.replace('<', '\\\\<')\n s = s.replace('>', '\\\\>')\n s = s.replace(';', '\\\\;')\n s = s.replace('=', '\\\\=')\n s = s.replace('\\000', '\\\\\\000')\n if s[0] == '#' or s[0] == ' ':\n s = ''.join(('\\\\', s))\n if s[-1] == ' ':\n s = ''.join((s[:-1], '\\\\ '))\n return s",
"static function dnLdapADominioDns($dn){\n $pattern = \"(dc=(?P<componentes>[A-Za-z]+))\";\n $matches = array();\n $dominio = \"\";\n preg_match_all($pattern, $dn, $matches );\n foreach ($matches['componentes'] as $componentes){\n $dominio .= $componentes . \".\";\n }\n return rtrim($dominio, \".\");\n }",
"@Nullable\n private String normalizedDn(String dn) {\n if (isNullOrEmpty(dn)) {\n return dn;\n } else {\n try {\n return new Dn(dn).getNormName();\n } catch (LdapInvalidDnException e) {\n LOG.debug(\"Invalid DN\", e);\n return dn;\n }\n }\n }",
"public static String prepareDN(String DN, String searchRoot) {\n if (DN == null || DN.trim().length() == 0) {\n return searchRoot;\n }\n // unescape double blackslashes\n DN = unescapeDoubleBackslash(DN);\n DN = unescapeSingleQuote(DN); // fix login failure when single quote (') is in userid\n DN = UniqueNameHelper.unescapeSpaces(DN);\n\n //process special character enclosing double quotes\n int length = DN.length();\n if ((DN.charAt(0) == '\\\"') && (DN.charAt(length - 1) == '\\\"')) {\n DN = DN.substring(1, length - 1);\n }\n\n // Remove server URL suffix if the DN is referral from another LDAP server.\n if (DN.startsWith(\"/\")) {\n int pos1 = DN.indexOf(':');\n if (pos1 > -1) {\n int pos2 = DN.indexOf('/', pos1);\n if (pos2 > 0)\n DN = DN.substring(pos2 + 1);\n }\n\n } else if (DN.toLowerCase().startsWith(\"ldap://\")) {\n boolean parsed = false;\n\n try {\n // Use the Ldap URL parser to ensure that %xx gets decoded\n LdapURL ldapURL = new LdapURL(DN);\n if (ldapURL.parsedOK()) {\n DN = ldapURL.get_dn();\n parsed = true;\n }\n } catch (Exception excp) {\n }\n\n if (!parsed) {\n int pos1 = DN.indexOf(':', \"ldap://\".length());\n if (pos1 > 0) {\n int pos2 = DN.indexOf(\"/\", pos1);\n if (pos2 > 0)\n DN = DN.substring(pos2 + 1);\n }\n }\n } else if (DN.toLowerCase().startsWith(\"ldaps://\")) {\n boolean parsed = false;\n\n try {\n // Use the Ldap URL parser to ensure that %xx gets decoded\n LdapURL ldapURL = new LdapURL(DN);\n if (ldapURL.parsedOK()) {\n DN = ldapURL.get_dn();\n parsed = true;\n }\n } catch (Exception excp) {\n }\n\n if (!parsed) {\n int pos1 = DN.indexOf(':', \"ldaps://\".length());\n if (pos1 > 0) {\n int pos2 = DN.indexOf(\"/\", pos1);\n if (pos2 > 0)\n DN = DN.substring(pos2 + 1);\n }\n }\n }\n if (searchRoot == null || searchRoot.trim().length() == 0) {\n return DN;\n }\n StringTokenizer stDN = new StringTokenizer(DN, LdapConstants.LDAP_DN_SEPARATOR);\n StringTokenizer stSearchRoot = new StringTokenizer(searchRoot, LdapConstants.LDAP_DN_SEPARATOR);\n String lastDNToken = null;\n String lastSearchRootToken = null;\n\n while (stDN.hasMoreTokens()) {\n lastDNToken = stDN.nextToken();\n }\n while (stSearchRoot.hasMoreTokens()) {\n lastSearchRootToken = stSearchRoot.nextToken();\n }\n if (lastDNToken != null)\n lastDNToken = lastDNToken.trim();\n else\n lastDNToken = \"\";\n if (lastSearchRootToken != null) {\n lastSearchRootToken = lastSearchRootToken.trim();\n } else {\n lastSearchRootToken = \"\";\n }\n if (!lastDNToken.equalsIgnoreCase(lastSearchRootToken))\n if (DN.length() > 0) {\n DN = DN + LdapConstants.LDAP_DN_SEPARATOR + searchRoot;\n } else\n DN = searchRoot;\n return DN;\n\n }",
"def get_hostname_from_dn(dn):\n \"\"\"\n This parses the hostname from a dn designator. They look like this:\n topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min\n \"\"\"\n pod = get_pod_from_dn(dn)\n node = get_node_from_dn(dn)\n if pod and node:\n return \"pod-{}-node-{}\".format(pod, node)\n else:\n return None",
"public static String decodeDN(Codec codec, byte[] encodedDN) throws SASException {\n String dn = null;\n\n try {\n Any any = codec.decode_value(encodedDN, X501DistinguishedNameHelper.type());\n byte[] asnX501DN = X501DistinguishedNameHelper.extract(any);\n\n X500Principal x500Principal = new X500Principal(asnX501DN);\n // To maintain compatibility with tWAS, the toString() method\n // \"is intentionally used because this is the only method which decodes extended attributes\"\n dn = x500Principal.toString();\n } catch (Exception e) {\n throw new SASException(1, e);\n }\n return dn;\n }"
] |
[
0.8442851305007935,
0.6996244788169861,
0.6977352499961853,
0.6818278431892395,
0.6808354258537292,
0.6749094724655151,
0.6710567474365234,
0.6669104695320129,
0.6640258431434631,
0.658560574054718,
0.6560439467430115,
0.6537912487983704
] |
explode_dn(dn [, notypes=0]) -> list
This function takes a DN and breaks it up into its component parts.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
|
def explode_dn(dn, notypes=0, flags=0):
"""
explode_dn(dn [, notypes=0]) -> list
This function takes a DN and breaks it up into its component parts.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
"""
if not dn:
return []
dn_decomp = str2dn(dn, flags)
rdn_list = []
for rdn in dn_decomp:
if notypes:
rdn_list.append('+'.join([
escape_dn_chars(avalue or '')
for atype, avalue, dummy in rdn
]))
else:
rdn_list.append('+'.join([
'='.join((atype, escape_dn_chars(avalue or '')))
for atype, avalue, dummy in rdn
]))
return rdn_list
|
[
"def explode_rdn(rdn, notypes=0, flags=0):\n \"\"\"\n explode_rdn(rdn [, notypes=0]) -> list\n\n This function takes a RDN and breaks it up into its component parts\n if it is a multi-valued RDN.\n The notypes parameter is used to specify that only the component's\n attribute values be returned and not the attribute types.\n \"\"\"\n if not rdn:\n return []\n rdn_decomp = str2dn(rdn, flags)[0]\n if notypes:\n return [avalue or '' for atype, avalue, dummy in rdn_decomp]\n else:\n return ['='.join((atype, escape_dn_chars(avalue or '')))\n for atype, avalue, dummy in rdn_decomp]",
"public static function explodeDn($dn, $removeAttributePrefixes = true)\n {\n $dn = ldap_explode_dn($dn, ($removeAttributePrefixes ? 1 : 0));\n\n if (is_array($dn) && array_key_exists('count', $dn)) {\n foreach ($dn as $rdn => $value) {\n $dn[$rdn] = self::unescape($value);\n }\n }\n\n return $dn;\n }",
"def explode_qname(ns_stack, qname, attr=false)\n if qname.is_a?(Array)\n local_part, prefix, uri = qname\n if uri\n raise \"invalid name: #{qname.inspect}\" if !prefix\n bound_uri = find_namespace_uri(ns_stack, prefix)\n raise \"namespace conflict: prefix '#{prefix}' refers to '#{uri}' and '#{bound_uri}'\" if bound_uri && uri != bound_uri\n return [local_part, prefix, uri]\n elsif prefix\n uri = find_namespace_uri(ns_stack, prefix)\n raise \"namespace prefix not bound: '#{prefix}'\" if !uri\n return [local_part, prefix, uri]\n else\n return local_part\n end\n end\n\n local_part, prefix = split_qname(qname)\n if prefix\n if prefix==\"xmlns\" && attr\n [local_part, prefix]\n else\n uri = find_namespace_uri(ns_stack, prefix)\n raise \"namespace prefix not bound: '#{prefix}'\" if ! uri\n [local_part, prefix, uri]\n end\n else\n if attr\n local_part\n else\n default_uri = find_namespace_uri(ns_stack, \"\")\n if default_uri\n [local_part, \"\", default_uri]\n else\n local_part\n end\n end\n end\n end",
"public static function explodeDn($dn, $withAttributes = 1)\n {\n $pieces = ldap_explode_dn($dn, $withAttributes);\n\n if ($pieces === false || !isset($pieces['count']) || $pieces['count'] == 0) {\n throw new InvalidArgumentException(sprintf('Unable to parse DN \"%s\".', $dn));\n }\n for ($i = 0; $i < $pieces['count']; $i++) {\n $pieces[$i] = self::unescapeValue($pieces[$i]);\n }\n unset($pieces['count']);\n\n return $pieces;\n }",
"function explode(s, d) {\n if (!s || is(s, 'array')) {\n return s;\n }\n\n return Arr.map(s.split(d || ','), trim);\n }",
"function explode(s, d) {\n\t\tif (!s || Arr.isArray(s)) {\n\t\t\treturn s;\n\t\t}\n\n\t\treturn Arr.map(s.split(d || ','), trim);\n\t}",
"public static function ldap_explode_dn($dn, $options = array('casefold' => 'upper'))\n {\n if (!isset($options['onlyvalues'])) $options['onlyvalues'] = false;\n if (!isset($options['reverse'])) $options['reverse'] = false;\n if (!isset($options['casefold'])) $options['casefold'] = 'upper';\n\n // Escaping of DN and stripping of \"OID.\"\n $dn = self::canonical_dn($dn, array('casefold' => $options['casefold']));\n\n // splitting the DN\n $dn_array = preg_split('/(?<=[^\\\\\\\\]),/', $dn);\n\n // clear wrong splitting (possibly we have split too much)\n // /!\\ Not clear, if this is neccessary here\n //$dn_array = self::correct_dn_splitting($dn_array, ',');\n\n // construct subarrays for multivalued RDNs and unescape DN value\n // also convert to output format and apply casefolding\n foreach ($dn_array as $key => $value) {\n $value_u = self::unescape_dn_value($value);\n $rdns = self::split_rdn_multival($value_u[0]);\n if (count($rdns) > 1) {\n // MV RDN!\n foreach ($rdns as $subrdn_k => $subrdn_v) {\n // Casefolding\n if ($options['casefold'] == 'upper') {\n $subrdn_v = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtoupper($matches[0]);\n },\n $subrdn_v\n );\n } else if ($options['casefold'] == 'lower') {\n $subrdn_v = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtolower($matches[0]);\n },\n $subrdn_v\n );\n }\n\n if ($options['onlyvalues']) {\n preg_match('/(.+?)(?<!\\\\\\\\)=(.+)/', $subrdn_v, $matches);\n $rdn_ocl = $matches[1];\n $rdn_val = $matches[2];\n $unescaped = self::unescape_dn_value($rdn_val);\n $rdns[$subrdn_k] = $unescaped[0];\n } else {\n $unescaped = self::unescape_dn_value($subrdn_v);\n $rdns[$subrdn_k] = $unescaped[0];\n }\n }\n\n $dn_array[$key] = $rdns;\n } else {\n // normal RDN\n\n // Casefolding\n if ($options['casefold'] == 'upper') {\n $value = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtoupper($matches[0]);\n },\n $value\n );\n } else if ($options['casefold'] == 'lower') {\n $value = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtolower($matches[0]);\n },\n $value\n );\n }\n\n if ($options['onlyvalues']) {\n preg_match('/(.+?)(?<!\\\\\\\\)=(.+)/', $value, $matches);\n $dn_ocl = $matches[1];\n $dn_val = $matches[2];\n $unescaped = self::unescape_dn_value($dn_val);\n $dn_array[$key] = $unescaped[0];\n } else {\n $unescaped = self::unescape_dn_value($value);\n $dn_array[$key] = $unescaped[0];\n }\n }\n }\n\n if ($options['reverse']) {\n return array_reverse($dn_array);\n } else {\n return $dn_array;\n }\n }",
"def explode_element(element)\n eelement = explode_node(element)\n\n eattrs = Hash[element.attributes.map do |name,attr|\n [explode_node(attr), attr.value]\n end]\n\n [eelement, eattrs]\n end",
"public static function explodeExchangeLegacyDn($dn, $withAttributes = false)\n {\n preg_match(self::MATCH_LEGACY_DN, $dn, $matches);\n\n if (!isset($matches[2])) {\n throw new InvalidArgumentException(sprintf('Unable to parse legacy exchange dn \"%s\".', $dn));\n }\n $pieces = [];\n for ($i = 3; $i < count($matches); $i += 3) {\n $pieces[] = $withAttributes ? $matches[$i - 1].'='.$matches[$i] : $matches[$i];\n }\n\n return $pieces;\n }",
"def explode(self):\n \"\"\"Use to fill groups values on hosts and create new services\n (for host group ones)\n\n :return: None\n \"\"\"\n # first elements, after groups\n self.contacts.explode(self.contactgroups, self.notificationways)\n self.contactgroups.explode()\n\n self.hosts.explode(self.hostgroups, self.contactgroups)\n\n self.hostgroups.explode()\n\n self.services.explode(self.hosts, self.hostgroups, self.contactgroups,\n self.servicegroups, self.servicedependencies)\n self.servicegroups.explode()\n\n self.timeperiods.explode()\n\n self.hostdependencies.explode(self.hostgroups)\n\n self.servicedependencies.explode(self.hostgroups)\n\n # Serviceescalations hostescalations will create new escalations\n self.serviceescalations.explode(self.escalations)\n self.hostescalations.explode(self.escalations)\n self.escalations.explode(self.hosts, self.hostgroups, self.contactgroups)\n\n # Now the architecture part\n self.realms.explode()",
"def explode_attr_qnames(ns_stack, attrs)\n Hash[attrs.map do |name, value|\n uq_name = explode_qname(ns_stack, name, true)\n local_part, prefix, uri = uq_name\n if !prefix || prefix==\"\"\n [local_part, value]\n else\n [uq_name, value]\n end\n end]\n end",
"static function dnLdapADominioDns($dn){\n $pattern = \"(dc=(?P<componentes>[A-Za-z]+))\";\n $matches = array();\n $dominio = \"\";\n preg_match_all($pattern, $dn, $matches );\n foreach ($matches['componentes'] as $componentes){\n $dominio .= $componentes . \".\";\n }\n return rtrim($dominio, \".\");\n }"
] |
[
0.8420254588127136,
0.7045586109161377,
0.6811243295669556,
0.6756733655929565,
0.6731144189834595,
0.6730970740318298,
0.6724711060523987,
0.6586148142814636,
0.6542986035346985,
0.653778076171875,
0.6530981659889221,
0.6507856249809265
] |
explode_rdn(rdn [, notypes=0]) -> list
This function takes a RDN and breaks it up into its component parts
if it is a multi-valued RDN.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
|
def explode_rdn(rdn, notypes=0, flags=0):
"""
explode_rdn(rdn [, notypes=0]) -> list
This function takes a RDN and breaks it up into its component parts
if it is a multi-valued RDN.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
"""
if not rdn:
return []
rdn_decomp = str2dn(rdn, flags)[0]
if notypes:
return [avalue or '' for atype, avalue, dummy in rdn_decomp]
else:
return ['='.join((atype, escape_dn_chars(avalue or '')))
for atype, avalue, dummy in rdn_decomp]
|
[
"def explode_dn(dn, notypes=0, flags=0):\n \"\"\"\n explode_dn(dn [, notypes=0]) -> list\n\n This function takes a DN and breaks it up into its component parts.\n The notypes parameter is used to specify that only the component's\n attribute values be returned and not the attribute types.\n \"\"\"\n if not dn:\n return []\n dn_decomp = str2dn(dn, flags)\n rdn_list = []\n for rdn in dn_decomp:\n if notypes:\n rdn_list.append('+'.join([\n escape_dn_chars(avalue or '')\n for atype, avalue, dummy in rdn\n ]))\n else:\n rdn_list.append('+'.join([\n '='.join((atype, escape_dn_chars(avalue or '')))\n for atype, avalue, dummy in rdn\n ]))\n return rdn_list",
"def _get_values(self, rdn):\n \"\"\"\n Returns a dict of prepped values contained in an RDN\n\n :param rdn:\n A RelativeDistinguishedName object\n\n :return:\n A dict object with unicode strings of NameTypeAndValue value field\n values that have been prepped for comparison\n \"\"\"\n\n output = {}\n [output.update([(ntv['type'].native, ntv.prepped_value)]) for ntv in rdn]\n return output",
"public static function split_rdn_multival($rdn)\n {\n $rdns = preg_split('/(?<!\\\\\\\\)\\+/', $rdn);\n $rdns = self::correct_dn_splitting($rdns, '+');\n return array_values($rdns);\n }",
"def explode_qname(ns_stack, qname, attr=false)\n if qname.is_a?(Array)\n local_part, prefix, uri = qname\n if uri\n raise \"invalid name: #{qname.inspect}\" if !prefix\n bound_uri = find_namespace_uri(ns_stack, prefix)\n raise \"namespace conflict: prefix '#{prefix}' refers to '#{uri}' and '#{bound_uri}'\" if bound_uri && uri != bound_uri\n return [local_part, prefix, uri]\n elsif prefix\n uri = find_namespace_uri(ns_stack, prefix)\n raise \"namespace prefix not bound: '#{prefix}'\" if !uri\n return [local_part, prefix, uri]\n else\n return local_part\n end\n end\n\n local_part, prefix = split_qname(qname)\n if prefix\n if prefix==\"xmlns\" && attr\n [local_part, prefix]\n else\n uri = find_namespace_uri(ns_stack, prefix)\n raise \"namespace prefix not bound: '#{prefix}'\" if ! uri\n [local_part, prefix, uri]\n end\n else\n if attr\n local_part\n else\n default_uri = find_namespace_uri(ns_stack, \"\")\n if default_uri\n [local_part, \"\", default_uri]\n else\n local_part\n end\n end\n end\n end",
"public static function ldap_explode_dn($dn, $options = array('casefold' => 'upper'))\n {\n if (!isset($options['onlyvalues'])) $options['onlyvalues'] = false;\n if (!isset($options['reverse'])) $options['reverse'] = false;\n if (!isset($options['casefold'])) $options['casefold'] = 'upper';\n\n // Escaping of DN and stripping of \"OID.\"\n $dn = self::canonical_dn($dn, array('casefold' => $options['casefold']));\n\n // splitting the DN\n $dn_array = preg_split('/(?<=[^\\\\\\\\]),/', $dn);\n\n // clear wrong splitting (possibly we have split too much)\n // /!\\ Not clear, if this is neccessary here\n //$dn_array = self::correct_dn_splitting($dn_array, ',');\n\n // construct subarrays for multivalued RDNs and unescape DN value\n // also convert to output format and apply casefolding\n foreach ($dn_array as $key => $value) {\n $value_u = self::unescape_dn_value($value);\n $rdns = self::split_rdn_multival($value_u[0]);\n if (count($rdns) > 1) {\n // MV RDN!\n foreach ($rdns as $subrdn_k => $subrdn_v) {\n // Casefolding\n if ($options['casefold'] == 'upper') {\n $subrdn_v = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtoupper($matches[0]);\n },\n $subrdn_v\n );\n } else if ($options['casefold'] == 'lower') {\n $subrdn_v = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtolower($matches[0]);\n },\n $subrdn_v\n );\n }\n\n if ($options['onlyvalues']) {\n preg_match('/(.+?)(?<!\\\\\\\\)=(.+)/', $subrdn_v, $matches);\n $rdn_ocl = $matches[1];\n $rdn_val = $matches[2];\n $unescaped = self::unescape_dn_value($rdn_val);\n $rdns[$subrdn_k] = $unescaped[0];\n } else {\n $unescaped = self::unescape_dn_value($subrdn_v);\n $rdns[$subrdn_k] = $unescaped[0];\n }\n }\n\n $dn_array[$key] = $rdns;\n } else {\n // normal RDN\n\n // Casefolding\n if ($options['casefold'] == 'upper') {\n $value = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtoupper($matches[0]);\n },\n $value\n );\n } else if ($options['casefold'] == 'lower') {\n $value = preg_replace_callback(\n \"/^\\w+=/\",\n function ($matches) {\n return strtolower($matches[0]);\n },\n $value\n );\n }\n\n if ($options['onlyvalues']) {\n preg_match('/(.+?)(?<!\\\\\\\\)=(.+)/', $value, $matches);\n $dn_ocl = $matches[1];\n $dn_val = $matches[2];\n $unescaped = self::unescape_dn_value($dn_val);\n $dn_array[$key] = $unescaped[0];\n } else {\n $unescaped = self::unescape_dn_value($value);\n $dn_array[$key] = $unescaped[0];\n }\n }\n }\n\n if ($options['reverse']) {\n return array_reverse($dn_array);\n } else {\n return $dn_array;\n }\n }",
"def explode(self):\n \"\"\"Use to fill groups values on hosts and create new services\n (for host group ones)\n\n :return: None\n \"\"\"\n # first elements, after groups\n self.contacts.explode(self.contactgroups, self.notificationways)\n self.contactgroups.explode()\n\n self.hosts.explode(self.hostgroups, self.contactgroups)\n\n self.hostgroups.explode()\n\n self.services.explode(self.hosts, self.hostgroups, self.contactgroups,\n self.servicegroups, self.servicedependencies)\n self.servicegroups.explode()\n\n self.timeperiods.explode()\n\n self.hostdependencies.explode(self.hostgroups)\n\n self.servicedependencies.explode(self.hostgroups)\n\n # Serviceescalations hostescalations will create new escalations\n self.serviceescalations.explode(self.escalations)\n self.hostescalations.explode(self.escalations)\n self.escalations.explode(self.hosts, self.hostgroups, self.contactgroups)\n\n # Now the architecture part\n self.realms.explode()",
"def explode(self):\n \"\"\"\n If the current Line entity consists of multiple line\n break it up into n Line entities.\n\n Returns\n ----------\n exploded: (n,) Line entities\n \"\"\"\n points = np.column_stack((\n self.points,\n self.points)).ravel()[1:-1].reshape((-1, 2))\n exploded = [Line(i) for i in points]\n return exploded",
"def explode(self):\n \"\"\"\n Fill members with hostgroup_members\n\n :return: None\n \"\"\"\n # We do not want a same hostgroup to be exploded again and again\n # so we tag it\n for tmp_hg in list(self.items.values()):\n tmp_hg.already_exploded = False\n\n for hostgroup in list(self.items.values()):\n if hostgroup.already_exploded:\n continue\n\n # get_hosts_by_explosion is a recursive\n # function, so we must tag hg so we do not loop\n for tmp_hg in list(self.items.values()):\n tmp_hg.rec_tag = False\n hostgroup.get_hosts_by_explosion(self)\n\n # We clean the tags\n for tmp_hg in list(self.items.values()):\n if hasattr(tmp_hg, 'rec_tag'):\n del tmp_hg.rec_tag\n del tmp_hg.already_exploded",
"def explode(self, contactgroups, notificationways):\n \"\"\"Explode all contact for each contactsgroup\n\n :param contactgroups: contactgroups to explode\n :type contactgroups: alignak.objects.contactgroup.Contactgroups\n :param notificationways: notificationways to explode\n :type notificationways: alignak.objects.notificationway.Notificationways\n :return: None\n \"\"\"\n # Contactgroups property need to be fulfill for got the information\n self.apply_partial_inheritance('contactgroups')\n # _special properties maybe came from a template, so\n # import them before grok ourselves\n for prop in Contact.special_properties:\n if prop == 'contact_name':\n continue\n self.apply_partial_inheritance(prop)\n\n # Register ourselves into the contactsgroups we are in\n for contact in self:\n if not (hasattr(contact, 'contact_name') and hasattr(contact, 'contactgroups')):\n continue\n for contactgroup in contact.contactgroups:\n contactgroups.add_member(contact.contact_name, contactgroup.strip())\n\n # Now create a notification way with the simple parameter of the\n # contacts\n for contact in self:\n need_notificationway = False\n params = {}\n for param in Contact.simple_way_parameters:\n if hasattr(contact, param):\n need_notificationway = True\n params[param] = getattr(contact, param)\n elif contact.properties[param].has_default: # put a default text value\n # Remove the value and put a default value\n setattr(contact, param, contact.properties[param].default)\n\n if need_notificationway:\n cname = getattr(contact, 'contact_name', getattr(contact, 'alias', ''))\n nw_name = cname + '_inner_nw'\n notificationways.new_inner_member(nw_name, params)\n\n if not hasattr(contact, 'notificationways'):\n contact.notificationways = [nw_name]\n else:\n contact.notificationways = list(contact.notificationways)\n contact.notificationways.append(nw_name)",
"def explode(self):\n \"\"\"\n Get services and put them in members container\n\n :return: None\n \"\"\"\n # We do not want a same service group to be exploded again and again\n # so we tag it\n for tmp_sg in list(self.items.values()):\n tmp_sg.already_exploded = False\n\n for servicegroup in list(self.items.values()):\n if servicegroup.already_exploded:\n continue\n\n # get_services_by_explosion is a recursive\n # function, so we must tag hg so we do not loop\n for tmp_sg in list(self.items.values()):\n tmp_sg.rec_tag = False\n servicegroup.get_services_by_explosion(self)\n\n # We clean the tags\n for tmp_sg in list(self.items.values()):\n if hasattr(tmp_sg, 'rec_tag'):\n del tmp_sg.rec_tag\n del tmp_sg.already_exploded",
"def explode_attr_qnames(ns_stack, attrs)\n Hash[attrs.map do |name, value|\n uq_name = explode_qname(ns_stack, name, true)\n local_part, prefix, uri = uq_name\n if !prefix || prefix==\"\"\n [local_part, value]\n else\n [uq_name, value]\n end\n end]\n end",
"def explode(self):\n \"\"\"\n Fill members with contactgroup_members\n\n :return:None\n \"\"\"\n # We do not want a same hg to be explode again and again\n # so we tag it\n for tmp_cg in list(self.items.values()):\n tmp_cg.already_exploded = False\n\n for contactgroup in list(self.items.values()):\n if contactgroup.already_exploded:\n continue\n\n # get_contacts_by_explosion is a recursive\n # function, so we must tag hg so we do not loop\n for tmp_cg in list(self.items.values()):\n tmp_cg.rec_tag = False\n contactgroup.get_contacts_by_explosion(self)\n\n # We clean the tags\n for tmp_cg in list(self.items.values()):\n if hasattr(tmp_cg, 'rec_tag'):\n del tmp_cg.rec_tag\n del tmp_cg.already_exploded"
] |
[
0.8594252467155457,
0.694362998008728,
0.6939125061035156,
0.6772039532661438,
0.6598398685455322,
0.658562958240509,
0.6491134762763977,
0.6490741968154907,
0.6488109827041626,
0.6463373899459839,
0.6416938900947571,
0.6357606053352356
] |
Crate or update labels in github
|
def labels(ctx):
"""Crate or update labels in github
"""
config = ctx.obj['agile']
repos = config.get('repositories')
labels = config.get('labels')
if not isinstance(repos, list):
raise CommandError(
'You need to specify the "repos" list in the config'
)
if not isinstance(labels, dict):
raise CommandError(
'You need to specify the "labels" dictionary in the config'
)
git = GithubApi()
for repo in repos:
repo = git.repo(repo)
for label, color in labels.items():
if repo.label(label, color):
click.echo('Created label "%s" @ %s' % (label, repo))
else:
click.echo('Updated label "%s" @ %s' % (label, repo))
|
[
"def update_labels(repo):\n \"\"\"Update labels.\"\"\"\n updated = set()\n for label in repo.get_labels():\n edit = find_label(label.name, label.color, label.description)\n if edit is not None:\n print(' Updating {}: #{} \"{}\"'.format(edit.new, edit.color, edit.description))\n label.edit(edit.new, edit.color, edit.description)\n updated.add(edit.old)\n updated.add(edit.new)\n else:\n if DELETE_UNSPECIFIED:\n print(' Deleting {}: #{} \"{}\"'.format(label.name, label.color, label.description))\n label.delete()\n else:\n print(' Skipping {}: #{} \"{}\"'.format(label.name, label.color, label.description))\n updated.add(label.name)\n for name, values in label_list.items():\n color, description = values\n if isinstance(name, tuple):\n new_name = name[1]\n else:\n new_name = name\n if new_name not in updated:\n print(' Creating {}: #{} \"{}\"'.format(new_name, color, description))\n repo.create_label(new_name, color, description)",
"def set_labels(self, *labels):\n \"\"\"\n :calls: `PUT /repos/:owner/:repo/issues/:number/labels <http://developer.github.com/v3/issues/labels>`_\n :param labels: list of :class:`github.Label.Label` or strings\n :rtype: None\n \"\"\"\n assert all(isinstance(element, (github.Label.Label, str, unicode)) for element in labels), labels\n post_parameters = [label.name if isinstance(label, github.Label.Label) else label for label in labels]\n headers, data = self._requester.requestJsonAndCheck(\n \"PUT\",\n self.issue_url + \"/labels\",\n input=post_parameters\n )",
"def autolabel_pull_request(issue_prefixes, project: true, components: true, labels: false)\n raise NotConfiguredError unless @jira_client\n raise(ArgumentError, \"issue_prefixes cannot be empty\") if issue_prefixes.empty?\n\n issue_keys = extract_issue_keys_from_pull_request(issue_prefixes)\n return if issue_keys.empty?\n\n labels = fetch_labels_from_issues(\n issue_keys,\n project: project,\n components: components,\n labels: labels\n )\n return if labels.empty?\n\n create_missing_github_labels(labels)\n add_labels_to_issue(labels)\n\n labels\n end",
"def label(self, name, color, update=True):\n \"\"\"Create or update a label\n \"\"\"\n url = '%s/labels' % self\n data = dict(name=name, color=color)\n response = self.http.post(\n url, json=data, auth=self.auth, headers=self.headers\n )\n if response.status_code == 201:\n return True\n elif response.status_code == 422 and update:\n url = '%s/%s' % (url, name)\n response = self.http.patch(\n url, json=data, auth=self.auth, headers=self.headers\n )\n response.raise_for_status()\n return False",
"def _set_labels(node, apiserver_url, labels):\n '''Replace labels dict by a new one'''\n # Prepare URL\n url = \"{0}/api/v1/nodes/{1}\".format(apiserver_url, node)\n # Prepare data\n data = [{\"op\": \"replace\", \"path\": \"/metadata/labels\", \"value\": labels}]\n # Make request\n ret = _kpatch(url, data)\n if ret.get(\"status\") == 404:\n return \"Node {0} doesn't exist\".format(node)\n return ret",
"def add_labels_to_pr(repo: GithubRepository,\n pull_id: int,\n *labels: str,\n override_token: str = None) -> None:\n \"\"\"\n References:\n https://developer.github.com/v3/issues/labels/#add-labels-to-an-issue\n \"\"\"\n url = (\"https://api.github.com/repos/{}/{}/issues/{}/labels\"\n \"?access_token={}\".format(repo.organization,\n repo.name,\n pull_id,\n override_token or repo.access_token))\n response = requests.post(url, json=list(labels))\n\n if response.status_code != 200:\n raise RuntimeError(\n 'Add labels failed. Code: {}. Content: {}.'.format(\n response.status_code, response.content))",
"def add_label(name, color = \"fef2c0\")\n puts \"color: #{color}\"\n github.api.add_label(repo, name, color)\n end",
"def make_label(self, path):\n \"\"\"\n this borrows too much from the internals of ofs\n maybe expose different parts of the api?\n \"\"\"\n from datetime import datetime\n from StringIO import StringIO\n path = path.lstrip(\"/\")\n bucket, label = path.split(\"/\", 1)\n\n bucket = self.ofs._require_bucket(bucket)\n key = self.ofs._get_key(bucket, label)\n if key is None:\n key = bucket.new_key(label)\n self.ofs._update_key_metadata(key, { '_creation_time': str(datetime.utcnow()) })\n key.set_contents_from_file(StringIO(''))\n key.close()",
"def _sync_labels(self, labels_json):\n \"\"\"\"Populate the user's labels from a JSON encoded list.\"\"\"\n for label_json in labels_json:\n label_id = label_json['id']\n self.labels[label_id] = Label(label_json, self)",
"def create(*args)\n arguments(args, required: [:user, :repo]) do\n permit VALID_LABEL_INPUTS\n assert_required VALID_LABEL_INPUTS\n end\n\n post_request(\"/repos/#{arguments.user}/#{arguments.repo}/labels\", arguments.params)\n end",
"def set(pr, name, color)\n message = \"\"\n if label?(name)\n message = \"Set #{name} label. (Color: #{color})\"\n else\n message = \"Add #{name} new label. (Color: #{color})\"\n add_label(name, color)\n end\n github.api.add_labels_to_an_issue(repo, pr, [name])\n puts message\n end",
"def update(*args)\n arguments(args, required: [:user, :repo, :label_name]) do\n permit VALID_LABEL_INPUTS\n assert_required VALID_LABEL_INPUTS\n end\n\n patch_request(\"/repos/#{arguments.user}/#{arguments.repo}/labels/#{arguments.label_name}\", arguments.params)\n end"
] |
[
0.7579184174537659,
0.7435874342918396,
0.7239165306091309,
0.7234540581703186,
0.7192164659500122,
0.7168595194816589,
0.707468569278717,
0.7042055726051331,
0.6989613771438599,
0.6976137757301331,
0.6921808123588562,
0.6910780668258667
] |
Get new access token.
|
def get_access_token(self, code):
"""Get new access token."""
try:
self._token = super().fetch_token(
MINUT_TOKEN_URL,
client_id=self._client_id,
client_secret=self._client_secret,
code=code,
)
# except Exception as e:
except MissingTokenError as error:
_LOGGER.debug("Token issues: %s", error)
return self._token
|
[
"def refresh(self):\n \"\"\"Obtain a new access token.\"\"\"\n grant_type = \"https://oauth.reddit.com/grants/installed_client\"\n self._request_token(grant_type=grant_type, device_id=self._device_id)",
"def access_token():\n \"\"\"Token view handles exchange/refresh access tokens.\"\"\"\n client = Client.query.filter_by(\n client_id=request.form.get('client_id')\n ).first()\n\n if not client:\n abort(404)\n\n if not client.is_confidential and \\\n 'client_credentials' == request.form.get('grant_type'):\n error = InvalidClientError()\n response = jsonify(dict(error.twotuples))\n response.status_code = error.status_code\n abort(response)\n\n # Return None or a dictionary. Dictionary will be merged with token\n # returned to the client requesting the access token.\n # Response is in application/json\n return None",
"def _get_access_from_refresh(self) -> Tuple[str, float]:\n \"\"\"Uses the stored refresh token to get a new access token.\n\n This method assumes that the refresh token exists.\n\n Args:\n None\n\n Returns:\n new access token and expiration time (from now)\n \"\"\"\n headers = self._get_authorization_headers()\n data = {\n 'grant_type': 'refresh_token',\n 'refresh_token': self.refresh_token\n }\n r = self.session.post(self.TOKEN_URL, headers=headers, data=data)\n response_data = r.json()\n return (response_data['access_token'], response_data['expires_in'])",
"def get_access_token(self):\n \"\"\"\n get a valid access token\n\n \"\"\"\n if self.is_access_token_expired():\n\n if is_debug_enabled():\n debug('requesting new access_token')\n\n token = get_access_token(username=self.username,\n password=self.password,\n client_id=self.client_id,\n client_secret=self.client_secret,\n app_url=self.app_url)\n\n # lets make sure to refresh before we're halfway to expiring\n self.expires_at = time.time() + token['expires_in']/2\n self.access_token = token['access_token']\n\n return self.access_token",
"def get_active_token(self):\n \"\"\"\n Getting the valid access token.\n\n Access token expires every 24 hours, It will expires then it will\n generate a new token.\n Return:\n active access token \n \"\"\"\n\n expire_time = self.store_handler.has_value(\"expires\")\n access_token = self.store_handler.has_value(\"access_token\")\n if expire_time and access_token:\n expire_time = self.store_handler.get_value(\"expires\")\n if not datetime.now() < datetime.fromtimestamp(float(expire_time)):\n self.store_handler.delete_value(\"access_token\")\n self.store_handler.delete_value(\"expires\")\n logger.info('Access token expired, going to get new token')\n self.auth()\n else:\n logger.info('Access token noy expired yet')\n else:\n self.auth()\n return self.store_handler.get_value(\"access_token\")",
"def access_token(self):\n \"\"\"\n Returns an access token created from this refresh token. Copies all\n claims present in this refresh token to the new access token except\n those claims listed in the `no_copy_claims` attribute.\n \"\"\"\n access = AccessToken()\n\n # Use instantiation time of refresh token as relative timestamp for\n # access token \"exp\" claim. This ensures that both a refresh and\n # access token expire relative to the same time if they are created as\n # a pair.\n access.set_exp(from_time=self.current_time)\n\n no_copy = self.no_copy_claims\n for claim, value in self.payload.items():\n if claim in no_copy:\n continue\n access[claim] = value\n\n return access",
"def refresh(self):\n \"\"\"Obtain a new personal-use script type access token.\"\"\"\n self._request_token(\n grant_type=\"password\",\n username=self._username,\n password=self._password,\n )",
"def access_token(self, request_token, request_secret):\n \"\"\"Returns access_token, access_secret\"\"\"\n logging.debug(\"Getting access token from %s:%d\",\n self.server, self.port)\n self.access_token, self.access_secret = \\\n self._token(\"/oauth/accessToken\", request_token, request_secret)\n return self.access_token, self.access_secret",
"def get_access_token(self, refresh_token):\n \"\"\"\n Use a refresh token to obtain a new access token\n \"\"\"\n\n token = requests.post(GOOGLE_OAUTH2_TOKEN_URL, data=dict(\n refresh_token=refresh_token,\n grant_type='refresh_token',\n client_id=self.client_id,\n client_secret=self.client_secret,\n )).json()\n\n if not token or token.get('error'):\n return\n\n return token",
"def refresh(self):\n \"\"\"Obtain a new access token from the refresh_token.\"\"\"\n if self.refresh_token is None:\n raise InvalidInvocation(\"refresh token not provided\")\n self._request_token(\n grant_type=\"refresh_token\", refresh_token=self.refresh_token\n )",
"def access_token(self):\n \"\"\"Get access_token.\"\"\"\n if self.cache_token:\n return self.access_token_ or \\\n self._resolve_credential('access_token')\n return self.access_token_",
"def get_token(self, code, headers=None, **kwargs):\n \"\"\"\n Requests an access token\n \"\"\"\n self._check_configuration(\"site\", \"token_url\", \"redirect_uri\",\n \"client_id\", \"client_secret\")\n url = \"%s%s\" % (self.site, quote(self.token_url))\n data = {\n 'redirect_uri': self.redirect_uri,\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'code': code,\n }\n data.update(kwargs)\n\n return self._make_request(url, data=data, headers=headers)"
] |
[
0.7532103657722473,
0.7490764260292053,
0.7446838617324829,
0.7380387187004089,
0.7371529340744019,
0.7366005778312683,
0.7333049774169922,
0.7314535975456238,
0.7277992367744446,
0.726302981376648,
0.7256467342376709,
0.7247545123100281
] |
Send a request to the Minut Point API.
|
def _request(self, url, request_type='GET', **params):
"""Send a request to the Minut Point API."""
try:
_LOGGER.debug('Request %s %s', url, params)
response = self.request(
request_type, url, timeout=TIMEOUT.seconds, **params)
response.raise_for_status()
_LOGGER.debug('Response %s %s %.200s', response.status_code,
response.headers['content-type'], response.json())
response = response.json()
if 'error' in response:
raise OSError(response['error'])
return response
except OSError as error:
_LOGGER.warning('Failed request: %s', error)
|
[
"async def request(self, command, payload, retry=3):\n \"\"\"Request data.\"\"\"\n # pylint: disable=too-many-return-statements\n\n if self._token is None:\n _LOGGER.error(\"No token\")\n return None\n\n _LOGGER.debug(command, payload)\n\n nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))\n url = API_ENDPOINT_2 + command\n timestamp = int(time.time())\n signature = hashlib.sha1(str(REQUEST_TIMEOUT\n + str(timestamp)\n + nonce\n + self._token).encode(\"utf-8\")).hexdigest()\n\n headers = {\n \"Content-Type\": \"application/x-zc-object\",\n \"Connection\": \"Keep-Alive\",\n \"X-Zc-Major-Domain\": \"seanywell\",\n \"X-Zc-Msg-Name\": \"millService\",\n \"X-Zc-Sub-Domain\": \"milltype\",\n \"X-Zc-Seq-Id\": \"1\",\n \"X-Zc-Version\": \"1\",\n \"X-Zc-Timestamp\": str(timestamp),\n \"X-Zc-Timeout\": REQUEST_TIMEOUT,\n \"X-Zc-Nonce\": nonce,\n \"X-Zc-User-Id\": str(self._user_id),\n \"X-Zc-User-Signature\": signature,\n \"X-Zc-Content-Length\": str(len(payload)),\n }\n try:\n with async_timeout.timeout(self._timeout):\n resp = await self.websession.post(url,\n data=json.dumps(payload),\n headers=headers)\n except asyncio.TimeoutError:\n if retry < 1:\n _LOGGER.error(\"Timed out sending command to Mill: %s\", command)\n return None\n return await self.request(command, payload, retry - 1)\n except aiohttp.ClientError:\n _LOGGER.error(\"Error sending command to Mill: %s\", command, exc_info=True)\n return None\n\n result = await resp.text()\n\n _LOGGER.debug(result)\n\n if not result or result == '{\"errorCode\":0}':\n return None\n\n if 'access token expire' in result or 'invalid signature' in result:\n if retry < 1:\n return None\n if not await self.connect():\n return None\n return await self.request(command, payload, retry - 1)\n\n if '\"error\":\"device offline\"' in result:\n if retry < 1:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n _LOGGER.debug(\"Failed to send request, %s. Retrying...\", result)\n await asyncio.sleep(3)\n return await self.request(command, payload, retry - 1)\n\n if 'errorCode' in result:\n _LOGGER.error(\"Failed to send request, %s\", result)\n return None\n data = json.loads(result)\n return data",
"def request(self, service, data):\n \"\"\"\n Makes a call to TinyLetter's __svcbus__ endpoint.\n \"\"\"\n _res = self._request(service, data)\n res = _res.json()[0][0]\n if res[\"success\"] == True:\n return res[\"result\"]\n else:\n err_msg = res[\"errmsg\"]\n raise Exception(\"Request not successful: '{0}'\".format(err_msg))",
"def _zm_request(self, method, api_url, data=None,\n timeout=DEFAULT_TIMEOUT) -> dict:\n \"\"\"Perform a request to the ZoneMinder API.\"\"\"\n try:\n # Since the API uses sessions that expire, sometimes we need to\n # re-auth if the call fails.\n for _ in range(ZoneMinder.LOGIN_RETRIES):\n req = requests.request(\n method, urljoin(self._server_url, api_url), data=data,\n cookies=self._cookies, timeout=timeout,\n verify=self._verify_ssl)\n\n if not req.ok:\n self.login()\n else:\n break\n\n else:\n _LOGGER.error('Unable to get API response from ZoneMinder')\n\n try:\n return req.json()\n except ValueError:\n _LOGGER.exception('JSON decode exception caught while'\n 'attempting to decode \"%s\"', req.text)\n return {}\n except requests.exceptions.ConnectionError:\n _LOGGER.exception('Unable to connect to ZoneMinder')\n return {}",
"def request(self, path, action, data=''):\n \"\"\"To make a request to the API.\"\"\"\n # Check if the path includes URL or not.\n head = self.base_url\n if path.startswith(head):\n path = path[len(head):]\n path = quote_plus(path, safe='/')\n if not path.startswith(self.api):\n path = self.api + path\n log.debug('Using path %s' % path)\n\n # If we have data, convert to JSON\n if data:\n data = json.dumps(data)\n log.debug('Data to sent: %s' % data)\n # In case of key authentication\n if self.private_key and self.public_key:\n timestamp = str(int(time.time()))\n log.debug('Using timestamp: {}'.format(timestamp))\n unhashed = path + timestamp + str(data)\n log.debug('Using message: {}'.format(unhashed))\n self.hash = hmac.new(str.encode(self.private_key),\n msg=unhashed.encode('utf-8'),\n digestmod=hashlib.sha256).hexdigest()\n log.debug('Authenticating with hash: %s' % self.hash)\n self.headers['X-Public-Key'] = self.public_key\n self.headers['X-Request-Hash'] = self.hash\n self.headers['X-Request-Timestamp'] = timestamp\n auth = False\n # In case of user credentials authentication\n elif self.username and self.password:\n auth = requests.auth.HTTPBasicAuth(self.username, self.password)\n # Set unlock reason\n if self.unlock_reason:\n self.headers['X-Unlock-Reason'] = self.unlock_reason\n log.info('Unlock Reason: %s' % self.unlock_reason)\n url = head + path\n # Try API request and handle Exceptions\n try:\n if action == 'get':\n log.debug('GET request %s' % url)\n self.req = requests.get(url, headers=self.headers, auth=auth,\n verify=False)\n elif action == 'post':\n log.debug('POST request %s' % url)\n self.req = requests.post(url, headers=self.headers, auth=auth,\n verify=False, data=data)\n elif action == 'put':\n log.debug('PUT request %s' % url)\n self.req = requests.put(url, headers=self.headers,\n auth=auth, verify=False,\n data=data)\n elif action == 'delete':\n log.debug('DELETE request %s' % url)\n self.req = requests.delete(url, headers=self.headers,\n verify=False, auth=auth)\n\n if self.req.content == b'':\n result = None\n log.debug('No result returned.')\n else:\n result = self.req.json()\n if 'error' in result and result['error']:\n raise TPMException(result['message'])\n\n except requests.exceptions.RequestException as e:\n log.critical(\"Connection error for \" + str(e))\n raise TPMException(\"Connection error for \" + str(e))\n\n except ValueError as e:\n if self.req.status_code == 403:\n log.warning(url + \" forbidden\")\n raise TPMException(url + \" forbidden\")\n elif self.req.status_code == 404:\n log.warning(url + \" forbidden\")\n raise TPMException(url + \" not found\")\n else:\n message = ('%s: %s %s' % (e, self.req.url, self.req.text))\n log.debug(message)\n raise ValueError(message)\n\n return result",
"def _api_request(self, uri, method, **kwargs):\n \"\"\"\n Manages the request by adding any auth information, and retries\n the request after authenticating if the initial request returned\n and Unauthorized exception.\n \"\"\"\n id_svc = self.identity\n if not all((self.management_url, id_svc.token, id_svc.tenant_id)):\n id_svc.authenticate()\n\n if not self.management_url:\n # We've authenticated but no management_url has been set. This\n # indicates that the service is not available.\n raise exc.ServiceNotAvailable(\"The '%s' service is not available.\"\n % self)\n if uri.startswith(\"http\"):\n parsed = list(urllib.parse.urlparse(uri))\n for pos, item in enumerate(parsed):\n if pos < 2:\n # Don't escape the scheme or netloc\n continue\n parsed[pos] = _safe_quote(parsed[pos])\n safe_uri = urllib.parse.urlunparse(parsed)\n else:\n safe_uri = \"%s%s\" % (self.management_url, _safe_quote(uri))\n # Perform the request once. If we get a 401 back then it\n # might be because the auth token expired, so try to\n # re-authenticate and try again. If it still fails, bail.\n try:\n kwargs.setdefault(\"headers\", {})[\"X-Auth-Token\"] = id_svc.token\n if id_svc.tenant_id:\n kwargs[\"headers\"][\"X-Auth-Project-Id\"] = id_svc.tenant_id\n resp, body = self._time_request(safe_uri, method, **kwargs)\n return resp, body\n except exc.Unauthorized as ex:\n try:\n id_svc.authenticate()\n kwargs[\"headers\"][\"X-Auth-Token\"] = id_svc.token\n resp, body = self._time_request(safe_uri, method, **kwargs)\n return resp, body\n except exc.Unauthorized:\n raise ex",
"def send_request(self, endpoint='ticker', coin_name=None, **kwargs):\n \"\"\": param string 'ticker', it's 'ticker' if we want info about coins,\n 'global' for global market's info.\n : param string 'coin_name', specify the name of the coin, if None,\n we'll retrieve info about all available coins.\n \"\"\"\n\n built_url = self._make_url(endpoint, coin_name)\n payload = dict(**kwargs)\n\n self._process_request(endpoint, built_url, payload)",
"def __api_request(self, method, endpoint, params={}, files={}, headers={}, access_token_override=None, do_ratelimiting=True, use_json = False):\n \"\"\"\n Internal API request helper.\n \"\"\"\n response = None\n remaining_wait = 0\n # \"pace\" mode ratelimiting: Assume constant rate of requests, sleep a little less long than it\n # would take to not hit the rate limit at that request rate.\n if do_ratelimiting and self.ratelimit_method == \"pace\":\n if self.ratelimit_remaining == 0:\n to_next = self.ratelimit_reset - time.time()\n if to_next > 0:\n # As a precaution, never sleep longer than 5 minutes\n to_next = min(to_next, 5 * 60)\n time.sleep(to_next)\n else:\n time_waited = time.time() - self.ratelimit_lastcall\n time_wait = float(self.ratelimit_reset - time.time()) / float(self.ratelimit_remaining)\n remaining_wait = time_wait - time_waited\n\n if remaining_wait > 0:\n to_next = remaining_wait / self.ratelimit_pacefactor\n to_next = min(to_next, 5 * 60)\n time.sleep(to_next)\n\n # Generate request headers\n headers = copy.deepcopy(headers)\n if not self.access_token is None:\n headers['Authorization'] = 'Bearer ' + self.access_token\n if not access_token_override is None:\n headers['Authorization'] = 'Bearer ' + access_token_override\n\n if self.debug_requests:\n print('Mastodon: Request to endpoint \"' + endpoint + '\" using method \"' + method + '\".')\n print('Parameters: ' + str(params))\n print('Headers: ' + str(headers))\n print('Files: ' + str(files))\n\n # Make request\n request_complete = False\n while not request_complete:\n request_complete = True\n\n response_object = None\n try:\n kwargs = dict(headers=headers, files=files,\n timeout=self.request_timeout)\n if use_json == False:\n if method == 'GET':\n kwargs['params'] = params\n else:\n kwargs['data'] = params\n else:\n kwargs['json'] = params\n \n response_object = self.session.request(\n method, self.api_base_url + endpoint, **kwargs)\n except Exception as e:\n raise MastodonNetworkError(\"Could not complete request: %s\" % e)\n\n if response_object is None:\n raise MastodonIllegalArgumentError(\"Illegal request.\")\n\n # Parse rate limiting headers\n if 'X-RateLimit-Remaining' in response_object.headers and do_ratelimiting:\n self.ratelimit_remaining = int(response_object.headers['X-RateLimit-Remaining'])\n self.ratelimit_limit = int(response_object.headers['X-RateLimit-Limit'])\n\n try:\n ratelimit_reset_datetime = dateutil.parser.parse(response_object.headers['X-RateLimit-Reset'])\n self.ratelimit_reset = self.__datetime_to_epoch(ratelimit_reset_datetime)\n\n # Adjust server time to local clock\n if 'Date' in response_object.headers:\n server_time_datetime = dateutil.parser.parse(response_object.headers['Date'])\n server_time = self.__datetime_to_epoch(server_time_datetime)\n server_time_diff = time.time() - server_time\n self.ratelimit_reset += server_time_diff\n self.ratelimit_lastcall = time.time()\n except Exception as e:\n raise MastodonRatelimitError(\"Rate limit time calculations failed: %s\" % e)\n\n # Handle response\n if self.debug_requests:\n print('Mastodon: Response received with code ' + str(response_object.status_code) + '.')\n print('response headers: ' + str(response_object.headers))\n print('Response text content: ' + str(response_object.text))\n\n if not response_object.ok:\n try:\n response = response_object.json(object_hook=self.__json_hooks)\n if isinstance(response, dict) and 'error' in response:\n error_msg = response['error']\n elif isinstance(response, str):\n error_msg = response\n else:\n error_msg = None\n except ValueError:\n error_msg = None\n\n # Handle rate limiting\n if response_object.status_code == 429:\n if self.ratelimit_method == 'throw' or not do_ratelimiting:\n raise MastodonRatelimitError('Hit rate limit.')\n elif self.ratelimit_method in ('wait', 'pace'):\n to_next = self.ratelimit_reset - time.time()\n if to_next > 0:\n # As a precaution, never sleep longer than 5 minutes\n to_next = min(to_next, 5 * 60)\n time.sleep(to_next)\n request_complete = False\n continue\n\n if response_object.status_code == 404:\n ex_type = MastodonNotFoundError\n if not error_msg:\n error_msg = 'Endpoint not found.'\n # this is for compatibility with older versions\n # which raised MastodonAPIError('Endpoint not found.')\n # on any 404\n elif response_object.status_code == 401:\n ex_type = MastodonUnauthorizedError\n elif response_object.status_code == 502:\n ex_type = MastodonServerError\n else:\n ex_type = MastodonAPIError\n\n raise ex_type(\n 'Mastodon API returned error',\n response_object.status_code,\n response_object.reason,\n error_msg)\n\n try:\n response = response_object.json(object_hook=self.__json_hooks)\n except:\n raise MastodonAPIError(\n \"Could not parse response as JSON, response code was %s, \"\n \"bad json content was '%s'\" % (response_object.status_code,\n response_object.content))\n\n # Parse link headers\n if isinstance(response, list) and \\\n 'Link' in response_object.headers and \\\n response_object.headers['Link'] != \"\":\n tmp_urls = requests.utils.parse_header_links(\n response_object.headers['Link'].rstrip('>').replace('>,<', ',<'))\n for url in tmp_urls:\n if 'rel' not in url:\n continue\n\n if url['rel'] == 'next':\n # Be paranoid and extract max_id specifically\n next_url = url['url']\n matchgroups = re.search(r\"[?&]max_id=([^&]+)\", next_url)\n\n if matchgroups:\n next_params = copy.deepcopy(params)\n next_params['_pagination_method'] = method\n next_params['_pagination_endpoint'] = endpoint\n max_id = matchgroups.group(1)\n if max_id.isdigit():\n next_params['max_id'] = int(max_id)\n else:\n next_params['max_id'] = max_id\n if \"since_id\" in next_params:\n del next_params['since_id']\n if \"min_id\" in next_params:\n del next_params['min_id']\n response[-1]._pagination_next = next_params\n\n if url['rel'] == 'prev':\n # Be paranoid and extract since_id or min_id specifically\n prev_url = url['url']\n \n # Old and busted (pre-2.6.0): since_id pagination\n matchgroups = re.search(r\"[?&]since_id=([^&]+)\", prev_url)\n if matchgroups:\n prev_params = copy.deepcopy(params)\n prev_params['_pagination_method'] = method\n prev_params['_pagination_endpoint'] = endpoint\n since_id = matchgroups.group(1)\n if since_id.isdigit():\n prev_params['since_id'] = int(since_id)\n else:\n prev_params['since_id'] = since_id\n if \"max_id\" in prev_params:\n del prev_params['max_id']\n response[0]._pagination_prev = prev_params\n \n # New and fantastico (post-2.6.0): min_id pagination\n matchgroups = re.search(r\"[?&]min_id=([^&]+)\", prev_url)\n if matchgroups:\n prev_params = copy.deepcopy(params)\n prev_params['_pagination_method'] = method\n prev_params['_pagination_endpoint'] = endpoint\n min_id = matchgroups.group(1)\n if min_id.isdigit():\n prev_params['min_id'] = int(min_id)\n else:\n prev_params['min_id'] = min_id\n if \"max_id\" in prev_params:\n del prev_params['max_id']\n response[0]._pagination_prev = prev_params\n\n return response",
"def request(self, method, endpoint, payload=None, timeout=5):\n \"\"\"Send request to API.\"\"\"\n url = self.api_url + endpoint\n data = None\n headers = {}\n\n if payload is not None:\n data = json.dumps(payload)\n headers['Content-Type'] = 'application/json'\n\n try:\n if self.auth_token is not None:\n headers[API_AUTH_HEADER] = self.auth_token\n response = self.session.request(method, url, data=data,\n headers=headers,\n timeout=timeout)\n if response.status_code != 401:\n return response\n\n _LOGGER.debug(\"Renewing auth token\")\n if not self.login(timeout=timeout):\n return None\n\n # Retry request\n headers[API_AUTH_HEADER] = self.auth_token\n return self.session.request(method, url, data=data,\n headers=headers,\n timeout=timeout)\n except requests.exceptions.ConnectionError:\n _LOGGER.warning(\"Unable to connect to %s\", url)\n except requests.exceptions.Timeout:\n _LOGGER.warning(\"No response from %s\", url)\n\n return None",
"def _request(self, service, **kw):\n \"\"\"Do the actual request to Fastbill's API server.\n\n If successful returns the RESPONSE section the of response, in\n case of an error raises a subclass of FastbillError.\n \"\"\"\n fb_request = {\n 'service': service,\n }\n for key in ['limit', 'offset', 'filter', 'data']:\n fb_request[key] = kw.pop(key, None)\n\n if kw:\n raise _exc.FastbillRequestError(\"Unknown arguments: %s\" %\n \", \".join(kw.keys()))\n\n data = _jsonencoder.dumps(fb_request)\n _logger.debug(\"Sending data: %r\", data)\n\n self._pre_request_callback(service, fb_request)\n # TODO: Retry when we hit a 404 (api not found). Probably a deploy.\n http_resp = self.session.post(self.SERVICE_URL,\n auth=self.auth,\n headers=self.headers,\n timeout=self.timeout,\n data=data)\n self._post_request_callback(service, fb_request, http_resp)\n\n try:\n json_resp = http_resp.json()\n except ValueError:\n _logger.debug(\"Got data: %r\", http_resp.content)\n _abort_http(service, http_resp)\n return # to make PyCharm happy\n else:\n _logger.debug(\"Got data: %r\", json_resp)\n\n errors = json_resp['RESPONSE'].get('ERRORS')\n if errors:\n _abort_api(service, json_resp, errors)\n\n # If Fastbill should ever remove the REQUEST or SERVICE section\n # from their responses, just remove the checks.\n if json_resp['REQUEST']['SERVICE'] != service:\n raise _exc.FastbillError(\n \"API Error: Got response from wrong service.\")\n\n return _response.FastbillResponse(json_resp['RESPONSE'], self)",
"def simple_request(self, request, *args, **kwargs):\n \"\"\"Create and send a request to the server.\n\n This method implements a very small subset of the options\n possible to send an request. It is provided as a shortcut to\n sending a simple request.\n\n Parameters\n ----------\n request : str\n The request to call.\n *args : list of objects\n Arguments to pass on to the request.\n\n Keyword Arguments\n -----------------\n timeout : float or None, optional\n Timeout after this amount of seconds (keyword argument).\n mid : None or int, optional\n Message identifier to use for the request message. If None, use either\n auto-incrementing value or no mid depending on the KATCP protocol version\n (mid's were only introduced with KATCP v5) and the value of the `use_mid`\n argument. Defaults to None\n use_mid : bool\n Use a mid for the request if True. Defaults to True if the server supports\n them.\n\n Returns\n -------\n future object.\n\n Example\n -------\n\n ::\n\n reply, informs = yield ic.simple_request('help', 'sensor-list')\n\n \"\"\"\n # TODO (NM 2016-11-03) This method should really live on the lower level\n # katcp_client in client.py, is generally useful IMHO\n use_mid = kwargs.get('use_mid')\n timeout = kwargs.get('timeout')\n mid = kwargs.get('mid')\n msg = katcp.Message.request(request, *args, mid=mid)\n return self.katcp_client.future_request(msg, timeout, use_mid)",
"async def request(\n self,\n method: str,\n endpoint: str,\n *,\n headers: dict = None,\n params: dict = None) -> dict:\n \"\"\"Make a request against air-matters.com.\"\"\"\n url = '{0}/{1}'.format(API_URL_SCAFFOLD, endpoint)\n\n if not headers:\n headers = {}\n headers.update({'x-access-token': self._api_key})\n\n if not params:\n params = {}\n params.update({\n 'lat': self.latitude,\n 'lng': self.longitude,\n 'alt': self.altitude\n })\n\n async with self._websession.request(method, url, headers=headers,\n params=params) as resp:\n try:\n resp.raise_for_status()\n return await resp.json(content_type=None)\n except client_exceptions.ClientError as err:\n if any(code in str(err) for code in ('401', '403')):\n raise InvalidApiKeyError('Invalid API key')\n raise RequestError(\n 'Error requesting data from {0}: {1}'.format(\n endpoint, err)) from None",
"def request(self, method, data=None, nid=None, nid_key='nid',\n api_type=\"logic\", return_response=False):\n \"\"\"Get data from arbitrary Piazza API endpoint `method` in network `nid`\n\n :type method: str\n :param method: An internal Piazza API method name like `content.get`\n or `network.get_users`\n :type data: dict\n :param data: Key-value data to pass to Piazza in the request\n :type nid: str\n :param nid: This is the ID of the network to which the request\n should be made. This is optional and only to override the\n existing `network_id` entered when creating the class\n :type nid_key: str\n :param nid_key: Name expected by Piazza for `nid` when making request.\n (Usually and by default \"nid\", but sometimes \"id\" is expected)\n :returns: Python object containing returned data\n :type return_response: bool\n :param return_response: If set, returns whole :class:`requests.Response`\n object rather than just the response body\n \"\"\"\n self._check_authenticated()\n\n nid = nid if nid else self._nid\n if data is None:\n data = {}\n\n headers = {}\n if \"session_id\" in self.session.cookies:\n headers[\"CSRF-Token\"] = self.session.cookies[\"session_id\"]\n\n # Adding a nonce to the request\n endpoint = self.base_api_urls[api_type]\n if api_type == \"logic\":\n endpoint += \"?method={}&aid={}\".format(\n method,\n _piazza_nonce()\n )\n\n response = self.session.post(\n endpoint,\n data=json.dumps({\n \"method\": method,\n \"params\": dict({nid_key: nid}, **data)\n }),\n headers=headers\n )\n return response if return_response else response.json()"
] |
[
0.7648521661758423,
0.7373426556587219,
0.7309609651565552,
0.7233394980430603,
0.7192167639732361,
0.7190421223640442,
0.7179194688796997,
0.7171496748924255,
0.7170108556747437,
0.7159733176231384,
0.7138870358467102,
0.7130632400512695
] |
Request list of devices.
|
def _request_devices(self, url, _type):
"""Request list of devices."""
res = self._request(url)
return res.get(_type) if res else {}
|
[
"def list_devices():\n \"\"\" List devices via HTTP GET. \"\"\"\n output = {}\n for device_id, device in devices.items():\n output[device_id] = {\n 'host': device.host,\n 'state': device.state\n }\n return jsonify(devices=output)",
"def get_devices(self):\n \"\"\"\n Return a list of devices.\n Deprecated, use get_actors instead.\n \"\"\"\n url = self.base_url + '/net/home_auto_query.lua'\n response = self.session.get(url, params={\n 'sid': self.sid,\n 'command': 'AllOutletStates',\n 'xhr': 0,\n }, timeout=15)\n response.raise_for_status()\n data = response.json()\n count = int(data[\"Outlet_count\"])\n devices = []\n for i in range(1, count + 1):\n device = Device(\n int(data[\"DeviceID_{0}\".format(i)]),\n int(data[\"DeviceConnectState_{0}\".format(i)]),\n int(data[\"DeviceSwitchState_{0}\".format(i)])\n )\n devices.append(device)\n return devices",
"async def fetch_device_list(self):\n \"\"\"Fetch list of devices.\"\"\"\n url = '{}/users/me'.format(API_URL)\n\n dlist = await self.api_get(url)\n if dlist is None:\n _LOGGER.error('Unable to fetch eight devices.')\n else:\n self._devices = dlist['user']['devices']\n _LOGGER.debug('Devices: %s', self._devices)",
"def get_devices(self):\n \"\"\"Execute the `get_devices` task and store the results in `self.devices`.\"\"\"\n utils.pending_message('Fetching device list...')\n\n get_devices_task = self.client.devices(\n account=self.account\n )\n\n # We wait for device list info as this sample relies on it next.\n get_devices_task.wait_for_result(timeout=self.timeout)\n\n get_devices_result = json.loads(get_devices_task.result)\n self.devices = get_devices_result['devices']\n\n utils.info_message('Get devices successful')",
"def devices(self, value):\n \"\"\"\n { \"PathOnHost\": \"/dev/deviceName\", \"PathInContainer\": \"/dev/deviceName\", \"CgroupPermissions\": \"mrw\"}\n \"\"\"\n\n if value is None:\n self._devices = None\n\n elif isinstance(value, list):\n results = []\n delimiter = ':'\n\n for device in value:\n if not isinstance(device, six.string_types):\n raise TypeError(\"each device must be a str. {0} was passed\".format(device))\n\n occurrences = device.count(delimiter)\n permissions = 'rwm'\n\n if occurrences is 0:\n path_on_host = device\n path_in_container = device\n\n elif occurrences is 1:\n path_on_host, path_in_container = device.split(delimiter)\n\n elif occurrences is 2:\n path_on_host, path_in_container, permissions = device.split(delimiter)\n\n if permissions not in 'rwm':\n raise ValueError(\"only permissions supported for devices are any combination of 'r' 'w' 'm'.\")\n else:\n raise ValueError(\n \"\"\"When passing devices they must be in one of the\n following formats: path_on_host, path_on_host:path_in_container,\n or path_on_host:path_in_container:permissions\"\"\"\n )\n\n results.append(\"{0}:{1}:{2}\".format(path_on_host, path_in_container, permissions))\n\n self._devices = results\n else:\n raise TypeError(\"devices must be a list or None.\")",
"def list_devices(self, **kwargs):\n \"\"\"List devices in the device catalog.\n\n Example usage, listing all registered devices in the catalog:\n\n .. code-block:: python\n\n filters = { 'state': {'$eq': 'registered' } }\n devices = api.list_devices(order='asc', filters=filters)\n for idx, d in enumerate(devices):\n print(idx, d.id)\n\n :param int limit: The number of devices to retrieve.\n :param str order: The ordering direction, ascending (asc) or\n descending (desc)\n :param str after: Get devices after/starting at given `device_id`\n :param filters: Dictionary of filters to apply.\n :returns: a list of :py:class:`Device` objects registered in the catalog.\n :rtype: PaginatedResponse\n \"\"\"\n kwargs = self._verify_sort_options(kwargs)\n kwargs = self._verify_filters(kwargs, Device, True)\n api = self._get_api(device_directory.DefaultApi)\n return PaginatedResponse(api.device_list, lwrap_type=Device, **kwargs)",
"def get_devices(self, refresh=False):\n \"\"\"Get all devices from Abode.\"\"\"\n if refresh or self._devices is None:\n if self._devices is None:\n self._devices = {}\n\n _LOGGER.info(\"Updating all devices...\")\n response = self.send_request(\"get\", CONST.DEVICES_URL)\n response_object = json.loads(response.text)\n\n _LOGGER.debug(\"Get Devices Response: %s\", response.text)\n\n for device_json in response_object:\n # Attempt to reuse an existing device\n device = self._devices.get(device_json['id'])\n\n # No existing device, create a new one\n if device:\n device.update(device_json)\n else:\n device = SkybellDevice(device_json, self)\n self._devices[device.device_id] = device\n\n return list(self._devices.values())",
"@Override\n public ListDevicesResult listDevices(ListDevicesRequest request) {\n request = beforeClientExecution(request);\n return executeListDevices(request);\n }",
"def poll_devices(self):\n \"\"\"Request status updates from each device.\"\"\"\n for addr in self.devices:\n device = self.devices[addr]\n if not device.address.is_x10:\n device.async_refresh_state()",
"def get_devices(self) -> list:\n \"\"\"Return list of VeSync devices\"\"\"\n\n if not self.enabled:\n return None\n\n self.in_process = True\n\n response, _ = helpers.call_api(\n '/cloud/v1/deviceManaged/devices',\n 'post',\n headers=helpers.req_headers(self),\n json=helpers.req_body(self, 'devicelist')\n )\n\n if response and helpers.check_response(response, 'get_devices'):\n if 'result' in response and 'list' in response['result']:\n device_list = response['result']['list']\n outlets, switches, fans = self.process_devices(device_list)\n else:\n logger.error('Device list in response not found')\n else:\n logger.error('Error retrieving device list')\n\n self.in_process = False\n\n return (outlets, switches, fans)",
"def list(self):\n \"\"\"\n List available block devices\n \"\"\"\n response = self._client.raw('disk.list', {})\n\n result = response.get()\n\n if result.state != 'SUCCESS':\n raise RuntimeError('failed to list disks: %s' % result.stderr)\n\n if result.level != 20: # 20 is JSON output.\n raise RuntimeError('invalid response type from disk.list command')\n\n data = result.data.strip()\n if data:\n return json.loads(data)\n else:\n return {}",
"def get_devicelist(home_hub_ip='192.168.1.254'):\n \"\"\"Retrieve data from BT Home Hub 5 and return parsed result.\n \"\"\"\n\n url = 'http://{}/'.format(home_hub_ip)\n\n try:\n response = requests.get(url, timeout=5)\n except requests.exceptions.Timeout:\n _LOGGER.exception(\"Connection to the router timed out\")\n return\n if response.status_code == 200:\n return parse_devicelist(response.text)\n else:\n _LOGGER.error(\"Invalid response from Home Hub: %s\", response)"
] |
[
0.825703501701355,
0.7932402491569519,
0.788608193397522,
0.7876166105270386,
0.7607917189598083,
0.7584074139595032,
0.7558024525642395,
0.7550823092460632,
0.7507629990577698,
0.7491199374198914,
0.7466188669204712,
0.7458732724189758
] |
Return sensor value based on sensor_uri.
|
def read_sensor(self, device_id, sensor_uri):
"""Return sensor value based on sensor_uri."""
url = MINUT_DEVICES_URL + "/{device_id}/{sensor_uri}".format(
device_id=device_id, sensor_uri=sensor_uri)
res = self._request(url, request_type='GET', data={'limit': 1})
if not res.get('values'):
return None
return res.get('values')[-1].get('value')
|
[
"def export_sensor(self, sensor):\n \"\"\"Return (value, unit) from a sensor node.\"\"\"\n value = None\n unit = None\n try:\n container = self.sensor_data.get(sensor)\n unit = container.get('unit')\n data_point = container.get('data', [[0, [0.0]]])\n if data_point and data_point[0]:\n value = data_point[0][-1][0]\n except (ValueError, KeyError, AttributeError):\n pass\n\n return (value, unit)",
"def get_value(self):\n \"\"\"Get a fresh sensor value from the KATCP resource\n\n Returns\n -------\n reply : tornado Future resolving with :class:`KATCPSensorReading` object\n\n Note\n ----\n\n As a side-effect this will update the reading stored in this object, and result in\n registered listeners being called.\n \"\"\"\n yield self._manager.poll_sensor(self._name)\n # By now the sensor manager should have set the reading\n raise Return(self._reading.value)",
"def sensor(self, sensor_type):\n \"\"\"Update and return sensor value.\"\"\"\n _LOGGER.debug(\"Reading %s sensor.\", sensor_type)\n return self._session.read_sensor(self.device_id, sensor_type)",
"def _get_sensor(self):\n \"\"\"Determine the sensor for this file.\"\"\"\n # sometimes Himawari-8 (or 9) data is stored in SCMI format\n is_h8 = 'H8' in self.platform_name\n is_h9 = 'H9' in self.platform_name\n is_ahi = is_h8 or is_h9\n return 'ahi' if is_ahi else 'abi'",
"def tdSensorValue(self, protocol, model, sid, datatype):\n \"\"\"Get the sensor value for a given sensor.\n\n :return: a dict with the keys: value, timestamp.\n \"\"\"\n value = create_string_buffer(20)\n timestamp = c_int()\n\n self._lib.tdSensorValue(protocol, model, sid, datatype,\n value, sizeof(value), byref(timestamp))\n return {'value': self._to_str(value), 'timestamp': timestamp.value}",
"def get(cls, sensor_type):\n \"\"\" Shortcut that acquires the default Sensor of a given type. \n \n Parameters\n ----------\n sensor_type: int\n Type of sensor to get.\n \n Returns\n -------\n result: Future \n A future that resolves to an instance of the Sensor or None\n if the sensor is not present or access is not allowed.\n \n \"\"\"\n app = AndroidApplication.instance()\n f = app.create_future()\n\n def on_sensor(sid, mgr):\n if sid is None:\n f.set_result(None)\n else:\n f.set_result(Sensor(__id__=sid, manager=mgr, type=sensor_type))\n\n SensorManager.get().then(\n lambda sm: sm.getDefaultSensor(sensor_type).then(\n lambda sid, sm=sm:on_sensor(sid, sm)))\n\n return f",
"def request_sensor_value(self, req, msg):\n \"\"\"Request the value of a sensor or sensors.\n\n A list of sensor values as a sequence of #sensor-value informs.\n\n Parameters\n ----------\n name : str, optional\n Name of the sensor to poll (the default is to send values for all\n sensors). If name starts and ends with '/' it is treated as a\n regular expression and all sensors whose names contain the regular\n expression are returned.\n\n Informs\n -------\n timestamp : float\n Timestamp of the sensor reading in seconds since the Unix\n epoch, or milliseconds for katcp versions <= 4.\n count : {1}\n Number of sensors described in this #sensor-value inform. Will\n always be one. It exists to keep this inform compatible with\n #sensor-status.\n name : str\n Name of the sensor whose value is being reported.\n value : object\n Value of the named sensor. Type depends on the type of the sensor.\n\n Returns\n -------\n success : {'ok', 'fail'}\n Whether sending the list of values succeeded.\n informs : int\n Number of #sensor-value inform messages sent.\n\n Examples\n --------\n ::\n\n ?sensor-value\n #sensor-value 1244631611.415231 1 psu.voltage 4.5\n #sensor-value 1244631611.415200 1 cpu.status off\n ...\n !sensor-value ok 5\n\n ?sensor-value cpu.power.on\n #sensor-value 1244631611.415231 1 cpu.power.on 0\n !sensor-value ok 1\n\n \"\"\"\n exact, name_filter = construct_name_filter(msg.arguments[0]\n if msg.arguments else None)\n sensors = [(name, sensor) for name, sensor in\n sorted(self._sensors.iteritems()) if name_filter(name)]\n\n if exact and not sensors:\n return req.make_reply(\"fail\", \"Unknown sensor name.\")\n\n katcp_version = self.PROTOCOL_INFO.major\n for name, sensor in sensors:\n timestamp, status, value = sensor.read_formatted(katcp_version)\n req.inform(timestamp, \"1\", name, status, value)\n return req.make_reply(\"ok\", str(len(sensors)))",
"def get(**kwargs):\n \"\"\"\n Safe sensor wrapper\n \"\"\"\n sensor = None\n tick = 0\n driver = DHTReader(**kwargs)\n while not sensor and tick < TIME_LIMIT:\n try:\n sensor = driver.receive_data()\n except DHTException:\n tick += 1\n return sensor",
"def future_get_sensor(self, name, update=None):\n \"\"\"Get the sensor object.\n\n Check if we have information for this sensor, if not connect to server\n and update (if allowed) to get information.\n\n Parameters\n ----------\n name : string\n Name of the sensor.\n update : bool or None, optional\n True allow inspect client to inspect katcp server if the sensor\n is not known.\n\n Returns\n -------\n Sensor created by :meth:`sensor_factory` or None if sensor not found.\n\n Notes\n -----\n Ensure that self.state.data_synced == True if yielding to future_get_sensor from\n a state-change callback, or a deadlock will occur.\n\n \"\"\"\n obj = None\n exist = yield self.future_check_sensor(name, update)\n if exist:\n sensor_info = self._sensors_index[name]\n obj = sensor_info.get('obj')\n if obj is None:\n sensor_type = katcp.Sensor.parse_type(\n sensor_info.get('sensor_type'))\n sensor_params = katcp.Sensor.parse_params(\n sensor_type,\n sensor_info.get('params'))\n obj = self.sensor_factory(\n name=name,\n sensor_type=sensor_type,\n description=sensor_info.get('description'),\n units=sensor_info.get('units'),\n params=sensor_params)\n self._sensors_index[name]['obj'] = obj\n self._sensor_object_cache[name] = obj\n\n raise tornado.gen.Return(obj)",
"def value(self, datatype):\n \"\"\"Return the :class:`SensorValue` for the given data type.\n\n sensor.value(TELLSTICK_TEMPERATURE) is identical to calling\n sensor.temperature().\n \"\"\"\n value = self.lib.tdSensorValue(\n self.protocol, self.model, self.id, datatype)\n return SensorValue(datatype, value['value'], value['timestamp'])",
"def get_sensor(self, sensor_name):\n \"\"\"Fetch the sensor with the given name.\n\n Parameters\n ----------\n sensor_name : str\n Name of the sensor to retrieve.\n\n Returns\n -------\n sensor : Sensor object\n The sensor with the given name.\n\n \"\"\"\n sensor = self._sensors.get(sensor_name, None)\n if not sensor:\n raise ValueError(\"Unknown sensor '%s'.\" % (sensor_name,))\n return sensor",
"def lru(cls, name, description=None, unit='',\n default=None, initial_status=None):\n \"\"\"Instantiate a new lru sensor object.\n\n Parameters\n ----------\n name : str\n The name of the sensor.\n description : str\n A short description of the sensor.\n units : str\n The units of the sensor value. May be the empty string\n if there are no applicable units.\n default : enum, Sensor.LRU_*\n An initial value for the sensor. Defaults to self.LRU_NOMINAL\n initial_status : int enum or None\n An initial status for the sensor. If None, defaults to\n Sensor.UNKNOWN. `initial_status` must be one of the keys in\n Sensor.STATUSES\n\n \"\"\"\n return cls(cls.LRU, name, description, unit, None,\n default, initial_status)"
] |
[
0.7502654790878296,
0.7147580981254578,
0.7144923210144043,
0.704089343547821,
0.7018816471099854,
0.7003004550933838,
0.6929284930229187,
0.689769983291626,
0.686083197593689,
0.68428635597229,
0.6825010776519775,
0.6811649203300476
] |
Register webhook.
|
def _register_webhook(self, webhook_url, events):
"""Register webhook."""
response = self._request(
MINUT_WEBHOOKS_URL,
request_type='POST',
json={
'url': webhook_url,
'events': events,
},
)
return response
|
[
"public function register()\n {\n Notification::resolved(function (ChannelManager $service) {\n $service->extend('slack', function ($app) {\n return new Channels\\SlackWebhookChannel(new HttpClient);\n });\n });\n }",
"def register_webhook(self, webhook, **kwargs): # noqa: E501\n \"\"\"Register a callback URL # noqa: E501\n\n Register a URL to which the server should deliver notifications of the subscribed resource changes. To get notifications pushed, you also need to place the subscriptions. The maximum length of the URL, header keys and values, all combined, is 400 characters. Notifications are delivered as PUT messages to the HTTP server defined by the client with a subscription server message. The given URL should be accessible and respond to the PUT request with response code of 200 or 204. Device Management Connect tests the callback URL with an empty payload when the URL is registered. For more information on callback notification, see [NotificationMessage](/docs/current/service-api-references/mbed-cloud-connect.html#models). **Optional headers in a callback message:** You can set optional headers to a callback in a **Webhook** object. Device Management Connect will include the header and key pairs to the notification messages send them to callback URL. As the callback URL's are API key specific also the headers are. One possible use for the additional headers is to check the origin of a PUT request and also distinguish the application (API key) to which the notification belongs to. **Note**: Only one callback URL per an API key can be active. If you register a new URL while another one is already active, it replaces the active one. There can be only one notification channel at a time. If the Long Poll notification is already present, you need to delete it before setting the callback URL. **Expiration of a callback URL:** A callback can expire when Device Management cannot deliver a notification due to a connection timeout or an error response (4xx or 5xx). After each delivery failure, Device Management sets an exponential back off time and makes a retry attempt after that. The first retry delay is 1 second, then 2s, 4s, 8s, ..., 2min, 2min. The maximum retry delay is 2 minutes. The callback URL will be removed if all retries fail withing 24 hours. More about [notification sending logic](/docs/current/integrate-web-app/event-notification.html#notification-sending-logic). **Supported callback URL protocols:** Currently, only HTTP and HTTPS protocols are supported. **HTTPS callback URLs:** When delivering a notification to an HTTPS based callback URL, Device Management Connect will present a valid client certificate to identify itself. The certificate is signed by a trusted certificate authorithy (GlobalSign) with a Common Name (CN) set to notifications.mbedcloud.com. **Example usage:** This example command shows how to set your callback URL and API key. It also sets an optional header authorization. When Device Management Connect calls your callback URL, the call contains the authorization header with the defined value. curl -X PUT \\\\ https://api.us-east-1.mbedcloud.com/v2/notification/callback \\\\ -H 'authorization: Bearer {api-key}' \\\\ -H 'content-type: application/json' \\\\ -d '{ \\\"url\\\": \\\"{callback-url}\\\", \\\"headers\\\": {\\\"authorization\\\" : \\\"f4b93d6e-4652-4874-82e4-41a3ced0cd56\\\"} }' # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.register_webhook(webhook, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param Webhook webhook: A json object that contains the optional headers and the URL to which the notifications need to be sent. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('asynchronous'):\n return self.register_webhook_with_http_info(webhook, **kwargs) # noqa: E501\n else:\n (data) = self.register_webhook_with_http_info(webhook, **kwargs) # noqa: E501\n return data",
"public void registerBot(WebhookBot bot) throws TelegramApiRequestException {\n if (useWebhook) {\n webhook.registerWebhook(bot);\n bot.setWebhook(externalUrl + bot.getBotPath(), pathToCertificate);\n }\n }",
"function(params, callback) {\n if (!params.url) return (callback || function() {})(new Error(utils.i18n.webhooks.url));\n if (!params.events || !params.events.length) return (callback || function() {})(new Error(utils.i18n.webhooks.events));\n if (!params.secret) return (callback || function() {})(new Error(utils.i18n.webhooks.secret));\n\n utils.debug('Webhooks register: ' + params.url);\n request.post({\n path: '/webhooks',\n body: {\n target_url: params.url,\n events: params.events,\n secret: params.secret,\n config: params.config || {},\n\t version: version,\n }\n }, function(err, res) {\n if (!err) res.body.secret = params.secret;\n if (callback) callback(err, res);\n });\n }",
"public function create(Webhook &$webhook)\n {\n $data = $webhook->exportData();\n $response = $this->request(\n '/admin/webhooks.json', 'POST', array(\n 'webhook' => $data\n )\n );\n $webhook->setData($response['webhook']);\n }",
"def update_webhook(self, webhook_url, webhook_id, events=None):\n \"\"\"Register webhook (if it doesn't exit).\"\"\"\n hooks = self._request(MINUT_WEBHOOKS_URL, request_type='GET')['hooks']\n try:\n self._webhook = next(\n hook for hook in hooks if hook['url'] == webhook_url)\n _LOGGER.debug(\"Webhook: %s\", self._webhook)\n except StopIteration: # Not found\n if events is None:\n events = [e for v in EVENTS.values() for e in v if e]\n self._webhook = self._register_webhook(webhook_url, events)\n _LOGGER.debug(\"Registered hook: %s\", self._webhook)\n return self._webhook",
"public function registerMainWebhook($listId, $webhookurl)\n {\n // Configure webhook\n $subscribeWebhook = [\n 'url' => $webhookurl,\n 'events' => [\n 'subscribe' => true,\n 'unsubscribe' => true,\n 'profile' => true,\n 'cleaned' => true,\n 'upemail' => true,\n 'campaign' => true\n ],\n 'sources' => [\n 'user' => true,\n 'admin' => true,\n 'api' => false // to avoid double (infinite loop) update (update an subscriber with the API and the webhook reupdate the user, ...)\n ]\n ];\n\n return $this->addWebhook($listId, $subscribeWebhook);\n }",
"public function boot()\n {\n if ($this->app->runningInConsole()) {\n $this->publishes([\n __DIR__.'/../config/ohdear-webhooks.php' => config_path('ohdear-webhooks.php'),\n ], 'config');\n }\n\n Route::macro('ohDearWebhooks', function ($url) {\n return Route::post($url, '\\OhDear\\LaravelWebhooks\\OhDearWebhooksController');\n });\n }",
"function() {\n\t\tvar args = Array.prototype.slice.call(arguments);\n\t\tvar type = args.shift();\n\t\tvar message = args.shift();\n\n\t\tif (_config.allow.indexOf(type) === -1) {\n\t\t\treturn true;\n\t\t}\n\n\t\tif (type === 'error') {\n\t\t\t// only one notify error message.\n\t\t\tif (_check(message) !== false) {\n\t\t\t\treturn _this;\n\t\t\t}\n\t\t\tmessage = _config.mention.join(' ') + ' ' + message;\n\t\t}\n\n\t\targs.unshift(message);\n\t\tif (typeof _config.prefix !== 'undefined') {\n\t\t\targs.unshift(_config.prefix);\n\t\t}\n\n\t\tvar payload = {\n\t\t\tchannel: _config.channel,\n\t\t\ttext: args.join(\"\\n\"),\n\t\t};\n\n\t\tif (typeof _config.name !== 'undefined') {\n\t\t\tpayload.username = _config.name;\n\t\t}\n\n\t\tif (typeof _config.icon !== 'undefined') {\n\t\t\tpayload.icon_emoji = _config.icon;\n\t\t}\n\n\t\t_client.webhook(payload, function(err) {\n\t\t\tif (err !== null) {\n\t\t\t\tconsole.error(err);\n\t\t\t}\n\t\t});\n\n\t\treturn _this;\n\t}",
"def set_webhook(self, *args, **kwargs):\n \"\"\"See :func:`set_webhook`\"\"\"\n return set_webhook(*args, **self._merge_overrides(**kwargs)).run()",
"public function register()\n {\n $this->prepare_hook = $this->prepare_hook ?: Hook::subscribe('Wedeto.HTTP.Forms.Form.prepare', [$this, 'hookPrepareForm']);\n $this->validate_hook = $this->validate_hook ?: Hook::subscribe('Wedeto.HTTP.Forms.Form.isValid', [$this, 'hookIsValid']);\n }",
"def add_webhook(self, name, metadata=None):\n \"\"\"\n Adds a webhook to this policy.\n \"\"\"\n return self.manager.add_webhook(self.scaling_group, self, name,\n metadata=metadata)"
] |
[
0.7788100242614746,
0.7587203979492188,
0.7560406923294067,
0.7534716129302979,
0.7527072429656982,
0.7495079636573792,
0.7474098205566406,
0.7359973192214966,
0.7276617884635925,
0.7261226177215576,
0.7255139350891113,
0.7233206629753113
] |
Remove webhook.
|
def remove_webhook(self):
"""Remove webhook."""
if self._webhook.get('hook_id'):
self._request(
"{}/{}".format(MINUT_WEBHOOKS_URL, self._webhook['hook_id']),
request_type='DELETE',
)
|
[
"def deleteWebhook(self, hook_id):\n \"\"\"Remove a webhook.\"\"\"\n path = '/'.join(['notification', 'webhook', hook_id])\n return self.rachio.delete(path)",
"def delete_webhook(webhook_id):\n \"\"\"Delete webhook.\"\"\"\n webhook = get_data_or_404('webhook', webhook_id)\n action = get_data_or_404('action', webhook['action_id'])\n project = get_data_or_404('project', action['project_id'])\n\n if project['owner_id'] != get_current_user_id():\n return jsonify(message='forbidden'), 403\n\n delete_instance('webhook', action['id'])\n\n return jsonify({})",
"def delete_webhook(self, policy, webhook):\n \"\"\"\n Deletes the specified webhook from the specified policy.\n \"\"\"\n return self.manager.delete_webhook(self, policy, webhook)",
"def delete_webhook(self):\n \"\"\"Delete/remove registered webhook.\n\n If no webhook is registered, an exception (404) will be raised.\n\n Note that every registered subscription will be deleted as part of\n deregistering a webhook.\n\n :return: void\n \"\"\"\n api = self._get_api(mds.NotificationsApi)\n api.deregister_webhook()\n\n # Every subscription will be deleted, so we can clear the queues too.\n self._queues.clear()\n return",
"def delete_webhook(self, webhook):\n \"\"\"\n Deletes the specified webhook from this policy.\n \"\"\"\n return self.manager.delete_webhook(self.scaling_group, self, webhook)",
"def delete_by_id(self, webhook, params={}, **options): \n \"\"\"This method permanently removes a webhook. Note that it may be possible\n to receive a request that was already in flight after deleting the\n webhook, but no further requests will be issued.\n\n Parameters\n ----------\n webhook : {Id} The webhook to delete.\n \"\"\"\n path = \"/webhooks/%s\" % (webhook)\n return self.client.delete(path, params, **options)",
"def remove(domain, action)\n fail Mailgun::ParameterError('Domain not provided to remove webhook from') unless domain\n fail Mailgun::ParameterError('Action not provided to identify webhook to remove') unless action\n @client.delete(\"domains/#{domain}/webhooks/#{action}\").to_h['message'] == 'Webhook has been deleted'\n rescue Mailgun::CommunicationError\n false\n end",
"async def delete_webhook(self) -> base.Boolean:\n \"\"\"\n Use this method to remove webhook integration if you decide to switch back to getUpdates.\n Returns True on success. Requires no parameters.\n\n Source: https://core.telegram.org/bots/api#deletewebhook\n\n :return: Returns True on success\n :rtype: :obj:`base.Boolean`\n \"\"\"\n payload = generate_payload(**locals())\n\n result = await self.request(api.Methods.DELETE_WEBHOOK, payload)\n return result",
"def delete_webhook(self, scaling_group, policy, webhook):\n \"\"\"\n Deletes the specified webhook from the policy.\n \"\"\"\n return self._manager.delete_webhook(scaling_group, policy, webhook)",
"def delete_webhook(self, id, **data):\n \"\"\"\n DELETE /webhooks/:id/\n Deletes the specified :format:`webhook` object.\n \"\"\"\n \n return self.delete(\"/webhooks/{0}/\".format(id), data=data)",
"public Integer delete(CMAWebhook webhook) {\n final String webhookId = getResourceIdOrThrow(webhook, \"webhook\");\n final String spaceId = getSpaceIdOrThrow(webhook, \"webhook\");\n\n return service.delete(spaceId, webhookId).blockingFirst().code();\n }",
"def delete_webhook(self, scaling_group, policy, webhook):\n \"\"\"\n Deletes the specified webhook from the specified policy.\n \"\"\"\n uri = \"/%s/%s/policies/%s/webhooks/%s\" % (self.uri_base,\n utils.get_id(scaling_group), utils.get_id(policy),\n utils.get_id(webhook))\n resp, resp_body = self.api.method_delete(uri)\n return None"
] |
[
0.8524535298347473,
0.8141569495201111,
0.8135190010070801,
0.8113977909088135,
0.8041204810142517,
0.8034901022911072,
0.8034071326255798,
0.7996309995651245,
0.7994512915611267,
0.7933698892593384,
0.7725521922111511,
0.7699292302131653
] |
Register webhook (if it doesn't exit).
|
def update_webhook(self, webhook_url, webhook_id, events=None):
"""Register webhook (if it doesn't exit)."""
hooks = self._request(MINUT_WEBHOOKS_URL, request_type='GET')['hooks']
try:
self._webhook = next(
hook for hook in hooks if hook['url'] == webhook_url)
_LOGGER.debug("Webhook: %s", self._webhook)
except StopIteration: # Not found
if events is None:
events = [e for v in EVENTS.values() for e in v if e]
self._webhook = self._register_webhook(webhook_url, events)
_LOGGER.debug("Registered hook: %s", self._webhook)
return self._webhook
|
[
"def _register_webhook(self, webhook_url, events):\n \"\"\"Register webhook.\"\"\"\n response = self._request(\n MINUT_WEBHOOKS_URL,\n request_type='POST',\n json={\n 'url': webhook_url,\n 'events': events,\n },\n )\n return response",
"public void registerBot(WebhookBot bot) throws TelegramApiRequestException {\n if (useWebhook) {\n webhook.registerWebhook(bot);\n bot.setWebhook(externalUrl + bot.getBotPath(), pathToCertificate);\n }\n }",
"public function register()\n {\n Notification::resolved(function (ChannelManager $service) {\n $service->extend('slack', function ($app) {\n return new Channels\\SlackWebhookChannel(new HttpClient);\n });\n });\n }",
"function(params, callback) {\n if (!params.url) return (callback || function() {})(new Error(utils.i18n.webhooks.url));\n if (!params.events || !params.events.length) return (callback || function() {})(new Error(utils.i18n.webhooks.events));\n if (!params.secret) return (callback || function() {})(new Error(utils.i18n.webhooks.secret));\n\n utils.debug('Webhooks register: ' + params.url);\n request.post({\n path: '/webhooks',\n body: {\n target_url: params.url,\n events: params.events,\n secret: params.secret,\n config: params.config || {},\n\t version: version,\n }\n }, function(err, res) {\n if (!err) res.body.secret = params.secret;\n if (callback) callback(err, res);\n });\n }",
"def set_webhook(self, *args, **kwargs):\n \"\"\"See :func:`set_webhook`\"\"\"\n return set_webhook(*args, **self._merge_overrides(**kwargs)).run()",
"public function boot()\n {\n if ($this->app->runningInConsole()) {\n $this->publishes([\n __DIR__.'/../config/ohdear-webhooks.php' => config_path('ohdear-webhooks.php'),\n ], 'config');\n }\n\n Route::macro('ohDearWebhooks', function ($url) {\n return Route::post($url, '\\OhDear\\LaravelWebhooks\\OhDearWebhooksController');\n });\n }",
"def register_webhook(self, webhook, **kwargs): # noqa: E501\n \"\"\"Register a callback URL # noqa: E501\n\n Register a URL to which the server should deliver notifications of the subscribed resource changes. To get notifications pushed, you also need to place the subscriptions. The maximum length of the URL, header keys and values, all combined, is 400 characters. Notifications are delivered as PUT messages to the HTTP server defined by the client with a subscription server message. The given URL should be accessible and respond to the PUT request with response code of 200 or 204. Device Management Connect tests the callback URL with an empty payload when the URL is registered. For more information on callback notification, see [NotificationMessage](/docs/current/service-api-references/mbed-cloud-connect.html#models). **Optional headers in a callback message:** You can set optional headers to a callback in a **Webhook** object. Device Management Connect will include the header and key pairs to the notification messages send them to callback URL. As the callback URL's are API key specific also the headers are. One possible use for the additional headers is to check the origin of a PUT request and also distinguish the application (API key) to which the notification belongs to. **Note**: Only one callback URL per an API key can be active. If you register a new URL while another one is already active, it replaces the active one. There can be only one notification channel at a time. If the Long Poll notification is already present, you need to delete it before setting the callback URL. **Expiration of a callback URL:** A callback can expire when Device Management cannot deliver a notification due to a connection timeout or an error response (4xx or 5xx). After each delivery failure, Device Management sets an exponential back off time and makes a retry attempt after that. The first retry delay is 1 second, then 2s, 4s, 8s, ..., 2min, 2min. The maximum retry delay is 2 minutes. The callback URL will be removed if all retries fail withing 24 hours. More about [notification sending logic](/docs/current/integrate-web-app/event-notification.html#notification-sending-logic). **Supported callback URL protocols:** Currently, only HTTP and HTTPS protocols are supported. **HTTPS callback URLs:** When delivering a notification to an HTTPS based callback URL, Device Management Connect will present a valid client certificate to identify itself. The certificate is signed by a trusted certificate authorithy (GlobalSign) with a Common Name (CN) set to notifications.mbedcloud.com. **Example usage:** This example command shows how to set your callback URL and API key. It also sets an optional header authorization. When Device Management Connect calls your callback URL, the call contains the authorization header with the defined value. curl -X PUT \\\\ https://api.us-east-1.mbedcloud.com/v2/notification/callback \\\\ -H 'authorization: Bearer {api-key}' \\\\ -H 'content-type: application/json' \\\\ -d '{ \\\"url\\\": \\\"{callback-url}\\\", \\\"headers\\\": {\\\"authorization\\\" : \\\"f4b93d6e-4652-4874-82e4-41a3ced0cd56\\\"} }' # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.register_webhook(webhook, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param Webhook webhook: A json object that contains the optional headers and the URL to which the notifications need to be sent. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('asynchronous'):\n return self.register_webhook_with_http_info(webhook, **kwargs) # noqa: E501\n else:\n (data) = self.register_webhook_with_http_info(webhook, **kwargs) # noqa: E501\n return data",
"def _run__hook(self, action, replace):\n \"\"\"Simple webhook\"\"\"\n url = action.get(\"url\")\n expected = action.get(\"expect\", {}).get(\"response-codes\", (200, 201, 202, 204))\n if replace and action.get(\"template\", True):\n url = self.rfxcfg.macro_expand(url, replace)\n self.logf(\"Action {} hook\\n\", action['name'])\n self.logf(\"{}\\n\", url, level=common.log_msg)\n result = requests.get(url)\n self.debug(\"Result={}\\n\", result.status_code)\n if result.status_code not in expected:\n self.die(\"Hook failed name={} result={}\", action['name'], result.status_code)\n self.logf(\"Success\\n\", level=common.log_good)",
"public function registerMainWebhook($listId, $webhookurl)\n {\n // Configure webhook\n $subscribeWebhook = [\n 'url' => $webhookurl,\n 'events' => [\n 'subscribe' => true,\n 'unsubscribe' => true,\n 'profile' => true,\n 'cleaned' => true,\n 'upemail' => true,\n 'campaign' => true\n ],\n 'sources' => [\n 'user' => true,\n 'admin' => true,\n 'api' => false // to avoid double (infinite loop) update (update an subscriber with the API and the webhook reupdate the user, ...)\n ]\n ];\n\n return $this->addWebhook($listId, $subscribeWebhook);\n }",
"function() {\n\t\tvar args = Array.prototype.slice.call(arguments);\n\t\tvar type = args.shift();\n\t\tvar message = args.shift();\n\n\t\tif (_config.allow.indexOf(type) === -1) {\n\t\t\treturn true;\n\t\t}\n\n\t\tif (type === 'error') {\n\t\t\t// only one notify error message.\n\t\t\tif (_check(message) !== false) {\n\t\t\t\treturn _this;\n\t\t\t}\n\t\t\tmessage = _config.mention.join(' ') + ' ' + message;\n\t\t}\n\n\t\targs.unshift(message);\n\t\tif (typeof _config.prefix !== 'undefined') {\n\t\t\targs.unshift(_config.prefix);\n\t\t}\n\n\t\tvar payload = {\n\t\t\tchannel: _config.channel,\n\t\t\ttext: args.join(\"\\n\"),\n\t\t};\n\n\t\tif (typeof _config.name !== 'undefined') {\n\t\t\tpayload.username = _config.name;\n\t\t}\n\n\t\tif (typeof _config.icon !== 'undefined') {\n\t\t\tpayload.icon_emoji = _config.icon;\n\t\t}\n\n\t\t_client.webhook(payload, function(err) {\n\t\t\tif (err !== null) {\n\t\t\t\tconsole.error(err);\n\t\t\t}\n\t\t});\n\n\t\treturn _this;\n\t}",
"def postWebhook(self, dev_id, external_id, url, event_types):\n \"\"\"Add a webhook to a device.\n\n externalId can be used as opaque data that\n is tied to your company, and passed back in each webhook event\n response.\n \"\"\"\n path = 'notification/webhook'\n payload = {'device': {'id': dev_id}, 'externalId': external_id,\n 'url': url, 'eventTypes': event_types}\n return self.rachio.post(path, payload)",
"def delete_webhook(self):\n \"\"\"Delete/remove registered webhook.\n\n If no webhook is registered, an exception (404) will be raised.\n\n Note that every registered subscription will be deleted as part of\n deregistering a webhook.\n\n :return: void\n \"\"\"\n api = self._get_api(mds.NotificationsApi)\n api.deregister_webhook()\n\n # Every subscription will be deleted, so we can clear the queues too.\n self._queues.clear()\n return"
] |
[
0.7781411409378052,
0.7490712404251099,
0.7211876511573792,
0.7160359621047974,
0.7136541604995728,
0.711796760559082,
0.7088648676872253,
0.7038928270339966,
0.703255295753479,
0.7003063559532166,
0.7002740502357483,
0.6991754770278931
] |
Update all devices from server.
|
def update(self):
"""Update all devices from server."""
with self._lock:
devices = self._request_devices(MINUT_DEVICES_URL, 'devices')
if devices:
self._state = {
device['device_id']: device
for device in devices
}
_LOGGER.debug("Found devices: %s", list(self._state.keys()))
# _LOGGER.debug("Device status: %s", devices)
homes = self._request_devices(MINUT_HOMES_URL, 'homes')
if homes:
self._homes = homes
return self.devices
|
[
"def update_from_devices(self):\n \"\"\"Retrieve a list of &devices and values.\"\"\"\n # _LOGGER.warning(\"update from devices\")\n try:\n rest = requests.get(URL_DEVICES.format(self._url))\n if rest.status_code != 200:\n _LOGGER.error(\"Devices returned %s\", rest.status_code)\n return False\n self.devices.update_devices(rest.json())\n return True\n except requests.exceptions.ConnectionError as conn_err:\n _LOGGER.error(\"Could not connect: %s\", conn_err)\n except Exception as err: # pylint: disable=broad-except\n _LOGGER.error(err)",
"async def update_from_devices(self):\n \"\"\"Retrieve a list of &devices and values.\"\"\"\n res = await self.get_json(URL_DEVICES.format(self._url))\n if res:\n self.devices.update_devices(res)\n return True\n return False",
"def update_device_list(self, sessions):\n \"\"\" Update device list. \"\"\"\n if sessions is None:\n _LOGGER.error('Error updating Emby devices.')\n return\n\n new_devices = []\n active_devices = []\n dev_update = False\n for device in sessions:\n dev_name = '{}.{}'.format(device['DeviceId'], device['Client'])\n\n try:\n _LOGGER.debug('Session msg on %s of type: %s, themeflag: %s',\n dev_name, device['NowPlayingItem']['Type'],\n device['NowPlayingItem']['IsThemeMedia'])\n except KeyError:\n pass\n\n active_devices.append(dev_name)\n if dev_name not in self._devices and \\\n device['DeviceId'] != str(self._api_id):\n _LOGGER.debug('New Emby DeviceID: %s. Adding to device list.',\n dev_name)\n new = EmbyDevice(device, self)\n self._devices[dev_name] = new\n new_devices.append(new)\n elif device['DeviceId'] != str(self._api_id):\n # Before we send in new data check for changes to state\n # to decide if we need to fire the update callback\n if not self._devices[dev_name].is_active:\n # Device wasn't active on the last update\n # We need to fire a device callback to let subs now\n dev_update = True\n\n do_update = self.update_check(\n self._devices[dev_name], device)\n self._devices[dev_name].update_data(device)\n self._devices[dev_name].set_active(True)\n if dev_update:\n self._do_new_devices_callback(0)\n dev_update = False\n if do_update:\n self._do_update_callback(dev_name)\n\n # Need to check for new inactive devices and flag\n for dev_id in self._devices:\n if dev_id not in active_devices:\n # Device no longer active\n if self._devices[dev_id].is_active:\n self._devices[dev_id].set_active(False)\n self._do_update_callback(dev_id)\n self._do_stale_devices_callback(dev_id)\n\n # Call device callback if new devices were found.\n if new_devices:\n self._do_new_devices_callback(0)",
"def poll_devices(self):\n \"\"\"Request status updates from each device.\"\"\"\n for addr in self.devices:\n device = self.devices[addr]\n if not device.address.is_x10:\n device.async_refresh_state()",
"def _update_all_devices(self):\n \"\"\"Update the all_devices list.\"\"\"\n self.all_devices = []\n self.all_devices.extend(self.keyboards)\n self.all_devices.extend(self.mice)\n self.all_devices.extend(self.gamepads)\n self.all_devices.extend(self.other_devices)",
"def _on_device_update(self, devid):\n \"\"\"Device callback from Abode SocketIO server.\"\"\"\n if isinstance(devid, (tuple, list)):\n devid = devid[0]\n\n if devid is None:\n _LOGGER.warning(\"Device update with no device id.\")\n return\n\n _LOGGER.debug(\"Device update event for device ID: %s\", devid)\n\n device = self._abode.get_device(devid, True)\n\n if not device:\n _LOGGER.debug(\"Got device update for unknown device: %s\", devid)\n return\n\n for callback in self._device_callbacks.get(device.device_id, ()):\n _execute_callback(callback, device)",
"def get_devices(self, refresh=False):\n \"\"\"Get all devices from Abode.\"\"\"\n if refresh or self._devices is None:\n if self._devices is None:\n self._devices = {}\n\n _LOGGER.info(\"Updating all devices...\")\n response = self.send_request(\"get\", CONST.DEVICES_URL)\n response_object = json.loads(response.text)\n\n _LOGGER.debug(\"Get Devices Response: %s\", response.text)\n\n for device_json in response_object:\n # Attempt to reuse an existing device\n device = self._devices.get(device_json['id'])\n\n # No existing device, create a new one\n if device:\n device.update(device_json)\n else:\n device = SkybellDevice(device_json, self)\n self._devices[device.device_id] = device\n\n return list(self._devices.values())",
"async def update_houses(self):\n \"\"\"Lookup details for devices on the plum servers\"\"\"\n houses = await self.fetch_houses()\n for house_id in houses:\n asyncio.Task(self.update_house(house_id))",
"def update(self):\n \"\"\"Fetch updated information about devices\"\"\"\n\n if self.device_time_check():\n\n if not self.in_process:\n outlets, switches, fans = self.get_devices()\n\n self.outlets = helpers.resolve_updates(self.outlets, outlets)\n self.switches = helpers.resolve_updates(\n self.switches, switches)\n self.fans = helpers.resolve_updates(self.fans, fans)\n\n self.last_update_ts = time.time()",
"def get_devices(self, refresh=False, generic_type=None):\n \"\"\"Get all devices from Abode.\"\"\"\n if refresh or self._devices is None:\n if self._devices is None:\n self._devices = {}\n\n _LOGGER.info(\"Updating all devices...\")\n response = self.send_request(\"get\", CONST.DEVICES_URL)\n response_object = json.loads(response.text)\n\n if (response_object and\n not isinstance(response_object, (tuple, list))):\n response_object = [response_object]\n\n _LOGGER.debug(\"Get Devices Response: %s\", response.text)\n\n for device_json in response_object:\n # Attempt to reuse an existing device\n device = self._devices.get(device_json['id'])\n\n # No existing device, create a new one\n if device:\n device.update(device_json)\n else:\n device = new_device(device_json, self)\n\n if not device:\n _LOGGER.debug(\n \"Skipping unknown device: %s\",\n device_json)\n\n continue\n\n self._devices[device.device_id] = device\n\n # We will be treating the Abode panel itself as an armable device.\n panel_response = self.send_request(\"get\", CONST.PANEL_URL)\n panel_json = json.loads(panel_response.text)\n\n self._panel.update(panel_json)\n\n _LOGGER.debug(\"Get Mode Panel Response: %s\", response.text)\n\n alarm_device = self._devices.get(CONST.ALARM_DEVICE_ID + '1')\n\n if alarm_device:\n alarm_device.update(panel_json)\n else:\n alarm_device = ALARM.create_alarm(panel_json, self)\n self._devices[alarm_device.device_id] = alarm_device\n\n if generic_type:\n devices = []\n for device in self._devices.values():\n if (device.generic_type is not None and\n device.generic_type in generic_type):\n devices.append(device)\n return devices\n\n return list(self._devices.values())",
"def update_devices(self, devices):\n \"\"\"Update values from response of URL_DEVICES, callback if changed.\"\"\"\n for qspacket in devices:\n try:\n qsid = qspacket[QS_ID]\n except KeyError:\n _LOGGER.debug(\"Device without ID: %s\", qspacket)\n continue\n\n if qsid not in self:\n self[qsid] = QSDev(data=qspacket)\n\n dev = self[qsid]\n dev.data = qspacket\n # Decode value from QSUSB\n newqs = _legacy_status(qspacket[QS_VALUE])\n if dev.is_dimmer:\n # Adjust dimmer exponentially to get a smoother effect\n newqs = min(round(math.pow(newqs, self.dim_adj)), 100)\n newin = round(newqs * _MAX / 100)\n if abs(dev.value - newin) > 1: # Significant change\n _LOGGER.debug(\"%s qs=%s --> %s\", qsid, newqs, newin)\n dev.value = newin\n self._cb_value_changed(self, qsid, newin)",
"def update_devices(devices)\n devices= [devices] unless devices.kind_of?(Array)\n\n # the list of ANDROID push_id to send the notification to\n @andr_ids= []\n @ios_devices= []\n devices.each do |device|\n if Platforms.android? device.platform\n @andr_ids << device.push_id\n elsif Platforms.ios? device.platform\n @ios_devices << device\n end\n end\n self\n end"
] |
[
0.8242406845092773,
0.8015351891517639,
0.7738180160522461,
0.7665979862213135,
0.7612438797950745,
0.7574371695518494,
0.7473741173744202,
0.7464259266853333,
0.7458534836769104,
0.7457598447799683,
0.7437586188316345,
0.7422157526016235
] |
Set alarm satus.
|
def _set_alarm(self, status, home_id):
"""Set alarm satus."""
response = self._request(
MINUT_HOMES_URL + "/{}".format(home_id),
request_type='PUT',
json={'alarm_status': status})
return response.get('alarm_status', '') == status
|
[
"def _set_alarm_sample(self, v, load=False):\n \"\"\"\n Setter method for alarm_sample, mapped from YANG variable /rmon/alarm_entry/alarm_sample (alarm-sample-type)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_alarm_sample is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_alarm_sample() directly.\n \"\"\"\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'delta': {'value': 2}, u'absolute': {'value': 1}},), is_leaf=True, yang_name=\"alarm-sample\", rest_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'type', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='alarm-sample-type', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"alarm_sample must be of a type compatible with alarm-sample-type\"\"\",\n 'defined-type': \"brocade-rmon:alarm-sample-type\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'delta': {'value': 2}, u'absolute': {'value': 1}},), is_leaf=True, yang_name=\"alarm-sample\", rest_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'type', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='alarm-sample-type', is_config=True)\"\"\",\n })\n\n self.__alarm_sample = t\n if hasattr(self, '_set'):\n self._set()",
"def set_basic_params(self, msg_size=None, cheap=None, anti_loop_timeout=None):\n \"\"\"\n :param int msg_size: Set the max size of an alarm message in bytes. Default: 8192.\n\n :param bool cheap: Use main alarm thread rather than create dedicated\n threads for curl-based alarms\n\n :param int anti_loop_timeout: Tune the anti-loop alarm system. Default: 3 seconds.\n\n \"\"\"\n self._set('alarm-msg-size', msg_size)\n self._set('alarm-cheap', cheap, cast=bool)\n self._set('alarm-freq', anti_loop_timeout)\n\n return self._section",
"def alarm_set(self, time, wake_with_radio=False):\n \"\"\"\n set the alarm clock\n\n :param str time: time of the alarm (format: %H:%M:%S)\n :param bool wake_with_radio: if True, radio will be used for the alarm\n instead of beep sound\n \"\"\"\n # TODO: check for correct time format\n log.debug(\"alarm => set...\")\n params = {\n \"enabled\": True,\n \"time\": time,\n \"wake_with_radio\": wake_with_radio\n }\n self._app_exec(\"com.lametric.clock\", \"clock.alarm\", params=params)",
"def _set_alarm_interval(self, v, load=False):\n \"\"\"\n Setter method for alarm_interval, mapped from YANG variable /rmon/alarm_entry/alarm_interval (uint32)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_alarm_interval is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_alarm_interval() directly.\n \"\"\"\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 2147483648']}), is_leaf=True, yang_name=\"alarm-interval\", rest_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alarm sample interval', u'alt-name': u'interval', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"alarm_interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 2147483648']}), is_leaf=True, yang_name=\"alarm-interval\", rest_name=\"interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alarm sample interval', u'alt-name': u'interval', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__alarm_interval = t\n if hasattr(self, '_set'):\n self._set()",
"def set_saturation(self, saturation, duration=0, rapid=False):\n \"\"\" saturation to set\n duration in ms\"\"\"\n color = self.get_color()\n color2 = (color[0], saturation, color[2], color[3])\n try:\n if rapid:\n self.fire_and_forget(LightSetColor, {\"color\": color2, \"duration\": duration}, num_repeats=1)\n else:\n self.req_with_ack(LightSetColor, {\"color\": color2, \"duration\": duration})\n except WorkflowException as e:\n raise",
"def set_value_alarm_ts(self, value, alarm, ts):\n \"\"\"Set value with pre-validated alarm and timeStamp\"\"\"\n # type: (Any, Alarm, TimeStamp) -> None\n with self.notifier.changes_squashed:\n # Assume they are of the right format\n self.value = value\n self.notifier.add_squashed_change(self.path + [\"value\"], value)\n if alarm is not self.alarm:\n self.alarm = alarm\n self.notifier.add_squashed_change(self.path + [\"alarm\"], alarm)\n self.timeStamp = ts\n self.notifier.add_squashed_change(self.path + [\"timeStamp\"], ts)",
"def _set_alarm_entry(self, v, load=False):\n \"\"\"\n Setter method for alarm_entry, mapped from YANG variable /rmon/alarm_entry (list)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_alarm_entry is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_alarm_entry() directly.\n \"\"\"\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"alarm_index\",alarm_entry.alarm_entry, yang_name=\"alarm-entry\", rest_name=\"alarm\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='alarm-index', extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}), is_container='list', yang_name=\"alarm-entry\", rest_name=\"alarm\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"alarm_entry must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"alarm_index\",alarm_entry.alarm_entry, yang_name=\"alarm-entry\", rest_name=\"alarm\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='alarm-index', extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}), is_container='list', yang_name=\"alarm-entry\", rest_name=\"alarm\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RMON alarm', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-full-no': None, u'alt-name': u'alarm', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'rmon_alarm'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__alarm_entry = t\n if hasattr(self, '_set'):\n self._set()",
"def set_state(options = {})\n options = options.merge(alarm_name: @name)\n resp = @client.set_alarm_state(options)\n resp.data\n end",
"def set_alarm_sensitivity(self, mode):\r\n \"\"\"\r\n :param mode: 1.0 for Very sensitive, 0.2 for not sensitive.\r\n Steps in values of 0.2.\r\n :return: nothing\r\n \"\"\"\r\n values = {\"desired_state\": {\"alarm_sensitivity\": mode}}\r\n response = self.api_interface.set_device_state(self, values)\r\n self._update_state_from_response(response)",
"def _set_alert(self, v, load=False):\n \"\"\"\n Setter method for alert, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert (container)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_alert is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_alert() directly.\n \"\"\"\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=alert.alert, is_container='container', presence=False, yang_name=\"alert\", rest_name=\"alert\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alert configuration', u'cli-suppress-show-conf-path': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"alert must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=alert.alert, is_container='container', presence=False, yang_name=\"alert\", rest_name=\"alert\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alert configuration', u'cli-suppress-show-conf-path': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__alert = t\n if hasattr(self, '_set'):\n self._set()",
"def _alarms_present(name, alarms, alarms_from_pillar,\n write_capacity_units, read_capacity_units,\n region, key, keyid, profile):\n '''helper method for present. ensure that cloudwatch_alarms are set'''\n # load data from alarms_from_pillar\n tmp = copy.deepcopy(\n __salt__['config.option'](alarms_from_pillar, {})\n )\n # merge with data from alarms\n if alarms:\n tmp = dictupdate.update(tmp, alarms)\n # set alarms, using boto_cloudwatch_alarm.present\n merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}}\n for _, info in six.iteritems(tmp):\n # add dynamodb table to name and description\n info[\"name\"] = name + \" \" + info[\"name\"]\n info[\"attributes\"][\"description\"] = name + \" \" + info[\"attributes\"][\"description\"]\n # add dimension attribute\n info[\"attributes\"][\"dimensions\"] = {\"TableName\": [name]}\n if info[\"attributes\"][\"metric\"] == \"ConsumedWriteCapacityUnits\" \\\n and \"threshold\" not in info[\"attributes\"]:\n info[\"attributes\"][\"threshold\"] = math.ceil(write_capacity_units * info[\"attributes\"][\"threshold_percent\"])\n del info[\"attributes\"][\"threshold_percent\"]\n # the write_capacity_units is given in unit / second. So we need\n # to multiply by the period to get the proper threshold.\n # http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html\n info[\"attributes\"][\"threshold\"] *= info[\"attributes\"][\"period\"]\n if info[\"attributes\"][\"metric\"] == \"ConsumedReadCapacityUnits\" \\\n and \"threshold\" not in info[\"attributes\"]:\n info[\"attributes\"][\"threshold\"] = math.ceil(read_capacity_units * info[\"attributes\"][\"threshold_percent\"])\n del info[\"attributes\"][\"threshold_percent\"]\n # the read_capacity_units is given in unit / second. So we need\n # to multiply by the period to get the proper threshold.\n # http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/MonitoringDynamoDB.html\n info[\"attributes\"][\"threshold\"] *= info[\"attributes\"][\"period\"]\n # set alarm\n kwargs = {\n \"name\": info[\"name\"],\n \"attributes\": info[\"attributes\"],\n \"region\": region,\n \"key\": key,\n \"keyid\": keyid,\n \"profile\": profile,\n }\n results = __states__['boto_cloudwatch_alarm.present'](**kwargs)\n if not results[\"result\"]:\n merged_return_value[\"result\"] = results[\"result\"]\n if results.get(\"changes\", {}) != {}:\n merged_return_value[\"changes\"][info[\"name\"]] = results[\"changes\"]\n if \"comment\" in results:\n merged_return_value[\"comment\"] += results[\"comment\"]\n return merged_return_value",
"def alarm():\n \"\"\".\"\"\"\n if request.method == 'POST':\n response = {'message': 'POST Accepted'}\n logging.info('alarm POSTED!')\n data = request.data\n logging.info(data)\n string = json.dumps(data)\n producer.send('SIP-alarms', string.encode())\n return response\n return \"\""
] |
[
0.7172811031341553,
0.7102866768836975,
0.7014048099517822,
0.696650505065918,
0.6947844624519348,
0.6852255463600159,
0.6841500401496887,
0.6795913577079773,
0.6775729656219482,
0.6772504448890686,
0.6769431233406067,
0.6752042770385742
] |
Update and return sensor value.
|
def sensor(self, sensor_type):
"""Update and return sensor value."""
_LOGGER.debug("Reading %s sensor.", sensor_type)
return self._session.read_sensor(self.device_id, sensor_type)
|
[
"def set_value(self, value, status=NOMINAL, timestamp=None,\n major=DEFAULT_KATCP_MAJOR):\n \"\"\"Check and then set the value of the sensor.\n\n Parameters\n ----------\n value : object\n Value of the appropriate type for the sensor.\n status : Sensor status constant\n Whether the value represents an error condition or not.\n timestamp : float in seconds or None\n The time at which the sensor value was determined.\n Uses current time if None.\n major : int\n Major version of KATCP to use when interpreting types.\n Defaults to latest implemented KATCP version.\n\n \"\"\"\n self._kattype.check(value, major)\n if timestamp is None:\n timestamp = time.time()\n self.set(timestamp, status, value)",
"def get_value(self):\n \"\"\"Get a fresh sensor value from the KATCP resource\n\n Returns\n -------\n reply : tornado Future resolving with :class:`KATCPSensorReading` object\n\n Note\n ----\n\n As a side-effect this will update the reading stored in this object, and result in\n registered listeners being called.\n \"\"\"\n yield self._manager.poll_sensor(self._name)\n # By now the sensor manager should have set the reading\n raise Return(self._reading.value)",
"def export_sensor(self, sensor):\n \"\"\"Return (value, unit) from a sensor node.\"\"\"\n value = None\n unit = None\n try:\n container = self.sensor_data.get(sensor)\n unit = container.get('unit')\n data_point = container.get('data', [[0, [0.0]]])\n if data_point and data_point[0]:\n value = data_point[0][-1][0]\n except (ValueError, KeyError, AttributeError):\n pass\n\n return (value, unit)",
"def value(self, datatype):\n \"\"\"Return the :class:`SensorValue` for the given data type.\n\n sensor.value(TELLSTICK_TEMPERATURE) is identical to calling\n sensor.temperature().\n \"\"\"\n value = self.lib.tdSensorValue(\n self.protocol, self.model, self.id, datatype)\n return SensorValue(datatype, value['value'], value['timestamp'])",
"def set_child_value(\n self, sensor_id, child_id, value_type, value, **kwargs):\n \"\"\"Add a command to set a sensor value, to the queue.\n\n A queued command will be sent to the sensor when the gateway\n thread has sent all previously queued commands.\n\n If the sensor attribute new_state returns True, the command will be\n buffered in a queue on the sensor, and only the internal sensor state\n will be updated. When a smartsleep message is received, the internal\n state will be pushed to the sensor, via _handle_smartsleep method.\n \"\"\"\n if not self.is_sensor(sensor_id, child_id):\n return\n if self.sensors[sensor_id].new_state:\n self.sensors[sensor_id].set_child_value(\n child_id, value_type, value,\n children=self.sensors[sensor_id].new_state)\n else:\n self.add_job(partial(\n self.sensors[sensor_id].set_child_value, child_id, value_type,\n value, **kwargs))",
"def value(self, n=0):\n \"\"\"\n Returns the value or values measured by the sensor. Check num_values to\n see how many values there are. Values with N >= num_values will return\n an error. The values are fixed point numbers, so check decimals to see\n if you need to divide to get the actual value.\n \"\"\"\n n = int(n)\n\n self._value[n], value = self.get_attr_int(self._value[n], 'value'+str(n))\n return value",
"def request_sensor_value(self, req, msg):\n \"\"\"Request the value of a sensor or sensors.\n\n A list of sensor values as a sequence of #sensor-value informs.\n\n Parameters\n ----------\n name : str, optional\n Name of the sensor to poll (the default is to send values for all\n sensors). If name starts and ends with '/' it is treated as a\n regular expression and all sensors whose names contain the regular\n expression are returned.\n\n Informs\n -------\n timestamp : float\n Timestamp of the sensor reading in seconds since the Unix\n epoch, or milliseconds for katcp versions <= 4.\n count : {1}\n Number of sensors described in this #sensor-value inform. Will\n always be one. It exists to keep this inform compatible with\n #sensor-status.\n name : str\n Name of the sensor whose value is being reported.\n value : object\n Value of the named sensor. Type depends on the type of the sensor.\n\n Returns\n -------\n success : {'ok', 'fail'}\n Whether sending the list of values succeeded.\n informs : int\n Number of #sensor-value inform messages sent.\n\n Examples\n --------\n ::\n\n ?sensor-value\n #sensor-value 1244631611.415231 1 psu.voltage 4.5\n #sensor-value 1244631611.415200 1 cpu.status off\n ...\n !sensor-value ok 5\n\n ?sensor-value cpu.power.on\n #sensor-value 1244631611.415231 1 cpu.power.on 0\n !sensor-value ok 1\n\n \"\"\"\n exact, name_filter = construct_name_filter(msg.arguments[0]\n if msg.arguments else None)\n sensors = [(name, sensor) for name, sensor in\n sorted(self._sensors.iteritems()) if name_filter(name)]\n\n if exact and not sensors:\n return req.make_reply(\"fail\", \"Unknown sensor name.\")\n\n katcp_version = self.PROTOCOL_INFO.major\n for name, sensor in sensors:\n timestamp, status, value = sensor.read_formatted(katcp_version)\n req.inform(timestamp, \"1\", name, status, value)\n return req.make_reply(\"ok\", str(len(sensors)))",
"def update(self, sensor, reading):\n \"\"\"Update callback used by sensors to notify obervers of changes.\n\n Parameters\n ----------\n sensor : :class:`katcp.Sensor` object\n The sensor whose value has changed.\n reading : (timestamp, status, value) tuple\n Sensor reading as would be returned by sensor.read()\n\n \"\"\"\n parents = list(self._child_to_parents[sensor])\n for parent in parents:\n self.recalculate(parent, (sensor,))",
"def sensorupdate(self, data):\n \"\"\"\n Given a dict of sensors and values, updates those sensors with the \n values in Scratch.\n \"\"\"\n if not isinstance(data, dict):\n raise TypeError('Expected a dict')\n msg = 'sensor-update '\n for key in data.keys():\n msg += '\"%s\" \"%s\" ' % (self._escape(str(key)), \n self._escape(str(data[key])))\n self._send(msg)",
"def set_value(self, value, status=Sensor.NOMINAL, timestamp=None):\n \"\"\"Set sensor value with optinal specification of status and timestamp\"\"\"\n if timestamp is None:\n timestamp = self._manager.time()\n self.set(timestamp, status, value)",
"def tdSensorValue(self, protocol, model, sid, datatype):\n \"\"\"Get the sensor value for a given sensor.\n\n :return: a dict with the keys: value, timestamp.\n \"\"\"\n value = create_string_buffer(20)\n timestamp = c_int()\n\n self._lib.tdSensorValue(protocol, model, sid, datatype,\n value, sizeof(value), byref(timestamp))\n return {'value': self._to_str(value), 'timestamp': timestamp.value}",
"def format_sensor(self, sensor):\n \"\"\" Format a sensor value. If pango is enabled color is per sensor. \"\"\"\n current_val = sensor.current\n if self.pango_enabled:\n percentage = self.percentage(sensor.current, sensor.critical)\n if self.dynamic_color:\n color = self.colors[int(percentage)]\n return self.format_pango(color, current_val)\n return current_val"
] |
[
0.7528722286224365,
0.7448306083679199,
0.7433300614356995,
0.7369399666786194,
0.7364034652709961,
0.7327523231506348,
0.7262842655181885,
0.7242055535316467,
0.7219521999359131,
0.7208823561668396,
0.7207825779914856,
0.7140793800354004
] |
Info about device.
|
def device_info(self):
"""Info about device."""
return {
'connections': {('mac', self.device['device_mac'])},
'identifieres': self.device['device_id'],
'manufacturer': 'Minut',
'model': 'Point v{}'.format(self.device['hardware_version']),
'name': self.device['description'],
'sw_version': self.device['firmware']['installed'],
}
|
[
"async def info(self):\n \"\"\"Return device info.\"\"\"\n \"\"\"\n {'MasterCapability': 9, 'TransportPort': 3975}\n \"\"\"\n act = self.service.action(\"X_GetDeviceInfo\")\n res = await act.async_call()\n return res",
"def device_info(self):\n \"\"\"Information to be pulled into controller info.\n\n The latest serial, model, and build_info are included. Additional info\n can be added via `add_device_info`.\n \"\"\"\n info = {\n 'serial': self.serial,\n 'model': self.model,\n 'build_info': self.build_info,\n 'user_added_info': self._user_added_device_info\n }\n return info",
"def device_info(self):\n \"\"\"Return device info dict.\"\"\"\n return {\n 'family': self.family,\n 'platform': self.platform,\n 'os_type': self.os_type,\n 'os_version': self.os_version,\n 'udi': self.udi,\n # TODO(klstanie): add property to make driver automatically\n 'driver_name': self.driver.platform,\n 'mode': self.mode,\n 'is_console': self.is_console,\n 'is_target': self.is_target,\n # 'prompt': self.driver.base_prompt(self.prompt),\n 'hostname': self.hostname,\n }",
"async def device_info():\n \"\"\"Get device info from GH.\"\"\"\n async with aiohttp.ClientSession() as session:\n ghlocalapi = DeviceInfo(LOOP, session, IPADDRESS)\n await ghlocalapi.get_device_info()\n\n print(\"Device info:\", ghlocalapi.device_info)",
"def device_info(self, device_id=None):\n \"\"\"\n Return device information, if device_id is not specified, return for this device\n\n :param device_id: id of device\n :type device_id: int\n :returns: dict containing information about device\n :rtype: dict\n \"\"\"\n if device_id is None:\n device_id = self.device_id\n return get_device_info(self.corsair_sdk, device_id)",
"def device_info(self):\n \"\"\" Pull basic device information.\n\n Purpose: This function grabs the hostname, model, running version, and\n | serial number of the device.\n\n @returns: The output that should be shown to the user.\n @rtype: str\n \"\"\"\n # get hostname, model, and version from 'show version'\n resp = self._session.get_software_information(format='xml')\n\n hostname = resp.xpath('//software-information/host-name')[0].text\n model = resp.xpath('//software-information/product-model')[0].text\n\n version = 'Unknown'\n if resp.xpath('//junos-version'):\n \"\"\" case:\n <junos-version>15.1</junos-version>\n \"\"\"\n try:\n version = resp.xpath('//junos-version')[0].text\n except IndexError:\n pass\n elif resp.xpath(\"//package-information[name = 'junos-version']\"):\n \"\"\" case:\n <package-information>\n <name>junos-version</name>\n <comment>Junos: 14.2R4</comment>\n </package-information>\n \"\"\"\n try:\n version = (resp.xpath(\n \"//package-information[name = 'junos-version']/comment\"\n )[0].text).split()[1]\n except IndexError:\n pass\n else:\n \"\"\" case:\n <package-information>\n <name>junos</name>\n <comment>JUNOS Base OS boot [12.3R5]</comment>\n </package-information>\n \"\"\"\n try:\n version = ((resp.xpath(\n '//software-information/package-information/comment'\n )[0].text.split('[')[1].split(']')[0]))\n except IndexError:\n pass\n\n # try looking for 'junos-version' for >= 14.2\n# for element in resp.xpath('//software-information'):\n# version = element.findtext('junos-version')\n\n# if not version:\n# try:\n# version = ((resp.xpath(\n# '//software-information/package-information/comment')\n# [0].text.split('[')[1].split(']')[0]))\n# except IndexError:\n# version = 'Unknown'\n\n # get uptime from 'show system uptime'\n resp = self._session.get_system_uptime_information(format='xml')\n try:\n current_time = resp.xpath('//current-time/date-time')[0].text\n except IndexError:\n current_time = 'Unknown'\n try:\n uptime = resp.xpath('//uptime-information/up-time')[0].text\n except IndexError:\n uptime = 'Unknown'\n # get serial number from 'show chassis hardware'\n show_hardware = self._session.get_chassis_inventory(format='xml')\n # If we're hitting an EX, grab each Routing Engine Serial number\n # to get all RE SNs in a VC\n try:\n chassis_module = show_hardware.xpath(\n '//chassis-inventory/chassis/chassis-module/description'\n )[0].text\n except IndexError:\n chassis_module = 'Unknown'\n\n if ('EX' or 'ex' or 'Ex') in chassis_module:\n serial_num = ''\n for eng in show_hardware.xpath(\n '//chassis-inventory/chassis/chassis-module'):\n if 'Routing Engine' in eng.xpath('name')[0].text:\n serial_num += (eng.xpath('name')[0].text + ' Serial #: ' +\n eng.xpath('serial-number')[0].text)\n else: # Any other device type, just grab chassis SN\n try:\n serial_num = ('Chassis Serial Number: ' + show_hardware.xpath(\n '//chassis-inventory/chassis/serial-number')[0].text)\n except IndexError:\n serial_num = 'Chassis Serial Number: ' \\\n + 'Unknown (virtual machine?)'\n return ('Hostname: %s\\nModel: %s\\nJunos Version: %s\\n%s\\nCurrent Time:'\n ' %s\\nUptime: %s\\n' %\n (hostname, model, version, serial_num, current_time, uptime))",
"def desc(self):\n \"\"\"Get a short description of the device.\"\"\"\n return '{0} (ID: {1}) - {2} - {3}'.format(\n self.name, self.device_id, self.type, self.status)",
"def device_information(name, identifier):\n \"\"\"Create a new DEVICE_INFO_MESSAGE.\"\"\"\n # pylint: disable=no-member\n message = create(protobuf.DEVICE_INFO_MESSAGE)\n info = message.inner()\n info.uniqueIdentifier = identifier\n info.name = name\n info.localizedModelName = 'iPhone'\n info.systemBuildVersion = '14G60'\n info.applicationBundleIdentifier = 'com.apple.TVRemote'\n info.applicationBundleVersion = '273.12'\n info.protocolVersion = 1\n info.lastSupportedMessageType = 58\n info.supportsExtendedMotion = True\n return message",
"def info(ctx, check_fips):\n \"\"\"\n Show general information.\n\n Displays information about the attached YubiKey such as serial number,\n firmware version, applications, etc.\n \"\"\"\n dev = ctx.obj['dev']\n\n if dev.is_fips and check_fips:\n fips_status = get_overall_fips_status(dev.serial, dev.config)\n\n click.echo('Device type: {}'.format(dev.device_name))\n click.echo('Serial number: {}'.format(\n dev.serial or 'Not set or unreadable'))\n if dev.version:\n f_version = '.'.join(str(x) for x in dev.version)\n click.echo('Firmware version: {}'.format(f_version))\n else:\n click.echo('Firmware version: Uncertain, re-run with only one '\n 'YubiKey connected')\n\n config = dev.config\n if config.form_factor:\n click.echo('Form factor: {!s}'.format(config.form_factor))\n click.echo('Enabled USB interfaces: {}'.format(dev.mode))\n if config.nfc_supported:\n f_nfc = 'enabled' if config.nfc_enabled else 'disabled'\n click.echo('NFC interface is {}.'.format(f_nfc))\n if config.configuration_locked:\n click.echo('Configured applications are protected by a lock code.')\n click.echo()\n\n print_app_status_table(config)\n\n if dev.is_fips and check_fips:\n click.echo()\n\n click.echo('FIPS Approved Mode: {}'.format(\n 'Yes' if all(fips_status.values()) else 'No'))\n\n status_keys = list(fips_status.keys())\n status_keys.sort()\n for status_key in status_keys:\n click.echo(' {}: {}'.format(\n status_key, 'Yes' if fips_status[status_key] else 'No'))",
"async def sysinfo(dev: Device):\n \"\"\"Print out system information (version, MAC addrs).\"\"\"\n click.echo(await dev.get_system_info())\n click.echo(await dev.get_interface_information())",
"async def device_info():\n \"\"\"Get device info from GH.\"\"\"\n async with aiohttp.ClientSession() as session:\n ghlocalapi = NetworkScan(LOOP, session)\n result = await ghlocalapi.scan_for_units(IPRANGE)\n print(result)",
"function DeviceInfo (deviceIndex, deviceName, deviceType) {\n this.index = deviceIndex;\n this.name = deviceName;\n this.type = deviceType;\n}"
] |
[
0.819084644317627,
0.7801724076271057,
0.7736119627952576,
0.7680312395095825,
0.7550100088119507,
0.7537354826927185,
0.7487383484840393,
0.748466432094574,
0.745265781879425,
0.7349479794502258,
0.734328031539917,
0.7315958738327026
] |
Status of device.
|
def device_status(self):
"""Status of device."""
return {
'active': self.device['active'],
'offline': self.device['offline'],
'last_update': self.last_update,
'battery_level': self.battery_level,
}
|
[
"def status(self):\n \"\"\"\n In most cases, reading status will return the same value as `mode`. In\n cases where there is an `auto` mode additional values may be returned,\n such as `no-device` or `error`. See individual port driver documentation\n for the full list of possible values.\n \"\"\"\n self._status, value = self.get_attr_string(self._status, 'status')\n return value",
"def device_status(self):\r\n \"\"\"Return the status of the device as string.\"\"\"\r\n try:\r\n return self.device_status_simple(\r\n self.data.get('status').get('status1'))\r\n except (KeyError, AttributeError):\r\n return self.device_status_simple('')",
"async def status(dev: Device):\n \"\"\"Display status information.\"\"\"\n power = await dev.get_power()\n click.echo(click.style(\"%s\" % power, bold=power))\n\n vol = await dev.get_volume_information()\n click.echo(vol.pop())\n\n play_info = await dev.get_play_info()\n if not play_info.is_idle:\n click.echo(\"Playing %s\" % play_info)\n else:\n click.echo(\"Not playing any media\")\n\n outs = await dev.get_inputs()\n for out in outs:\n if out.active:\n click.echo(\"Active output: %s\" % out)\n\n sysinfo = await dev.get_system_info()\n click.echo(\"System information: %s\" % sysinfo)",
"def status(self):\n \"\"\"\n Poll YubiKey for status.\n \"\"\"\n data = self._read()\n self._status = YubiKeyUSBHIDStatus(data)\n return self._status",
"def report_device_status(self, mode):\n \"\"\"Report terminal status or cursor position.\n\n :param int mode: if 5 -- terminal status, 6 -- cursor position,\n otherwise a noop.\n\n .. versionadded:: 0.5.0\n \"\"\"\n if mode == 5: # Request for terminal status.\n self.write_process_input(ctrl.CSI + \"0n\")\n elif mode == 6: # Request for cursor position.\n x = self.cursor.x + 1\n y = self.cursor.y + 1\n\n # \"Origin mode (DECOM) selects line numbering.\"\n if mo.DECOM in self.mode:\n y -= self.margins.top\n self.write_process_input(ctrl.CSI + \"{0};{1}R\".format(y, x))",
"def status(self, return_led=0):\n \"\"\"Get status from device\"\"\"\n status = self.hub.get_device_status(self.device_id, return_led)\n self.logger.info(\"Dimmer %s status: %s\", self.device_id,\n pprint.pformat(status))\n return status",
"public String status() {\n\tUtil.out4.println(\"DeviceImpl.status() (attibute) arrived\");\n\n\t//\n\t// Record attribute request in black box\n\t//\n\n\tblackbox.insert_attr(Attr_Status);\n\n\t//\n\t// Return data to caller. If the dev_status method throw exception,\n\t// catch it\n\t// and forget it because we are in a CORBA attribute implementation\n\t//\n\n\tString tmp = null;\n\ttry {\n\t tmp = dev_status();\n\t} catch (final DevFailed ex) {\n\t}\n\n\tUtil.out4.println(\"Leaving DeviceImpl.status() (attribute)\");\n\treturn tmp;\n }",
"def state(self):\n \"\"\"Compute and return the device state.\n\n :returns: Device state.\n \"\"\"\n # Check if device is disconnected.\n if not self.available:\n return STATE_UNKNOWN\n # Check if device is off.\n if not self.screen_on:\n return STATE_OFF\n # Check if screen saver is on.\n if not self.awake:\n return STATE_IDLE\n # Check if the launcher is active.\n if self.launcher or self.settings:\n return STATE_STANDBY\n # Check for a wake lock (device is playing).\n if self.wake_lock:\n return STATE_PLAYING\n # Otherwise, device is paused.\n return STATE_PAUSED",
"def get_status(self):\n \"\"\"\n Models \"T Command\" functionality of device.\n\n Returns all available status information about the device as single byte array.\n\n :return: Byte array consisting of 10 status bytes.\n \"\"\"\n\n # \"The first command sent must be a 'T' command\" from T95 manual\n self.device.serial_command_mode = True\n\n Tarray = [0x80] * 10\n\n # Status byte (SB1)\n Tarray[0] = {\n 'stopped': 0x01,\n 'heat': 0x10,\n 'cool': 0x20,\n 'hold': 0x30,\n }.get(self.device._csm.state, 0x01)\n\n if Tarray[0] == 0x30 and self.device.hold_commanded:\n Tarray[0] = 0x50\n\n # Error status byte (EB1)\n if self.device.pump_overspeed:\n Tarray[1] |= 0x01\n # TODO: Add support for other error conditions?\n\n # Pump status byte (PB1)\n Tarray[2] = 0x80 + self.device.pump_speed\n\n # Temperature\n Tarray[6:10] = [ord(x) for x in \"%04x\" % (int(self.device.temperature * 10) & 0xFFFF)]\n\n return ''.join(chr(c) for c in Tarray)",
"@Override\n public String status() {\n MDC.setContextMap(contextMap);\n xlogger.entry();\n try {\n status = getStatus();\n } catch (final DevFailed e) {\n try {\n stateImpl.stateMachine(DeviceState.UNKNOWN);\n statusImpl.statusMachine(DevFailedUtils.toString(e), DeviceState.UNKNOWN);\n status = DevFailedUtils.toString(e);\n } catch (final DevFailed e1) {\n logger.debug(NOT_IMPORTANT_ERROR, e1);\n }\n logger.debug(NOT_IMPORTANT_ERROR, e);\n }\n return status;\n }",
"def status(self):\n \"\"\" Status information (read-only).\n\n :rtype: :py:class:`SimpleNamespace`\n \"\"\"\n config = self._get_config()\n is_hex = lambda name: (len(name) == 4 and\n all(c in string.hexdigits for c in name))\n out = SimpleNamespace()\n for sect in config:\n for itm in config[sect].values():\n if (itm.readonly or sect == 'status') and not is_hex(itm.name):\n setattr(out, itm.name, itm.value)\n return out",
"def get_device_status(host, services=None, zconf=None):\n \"\"\"\n :param host: Hostname or ip to fetch status from\n :type host: str\n :return: The device status as a named tuple.\n :rtype: pychromecast.dial.DeviceStatus or None\n \"\"\"\n\n try:\n status = _get_status(\n host, services, zconf, \"/setup/eureka_info?options=detail\")\n\n friendly_name = status.get('name', \"Unknown Chromecast\")\n model_name = \"Unknown model name\"\n manufacturer = \"Unknown manufacturer\"\n if 'detail' in status:\n model_name = status['detail'].get('model_name', model_name)\n manufacturer = status['detail'].get('manufacturer', manufacturer)\n\n udn = status.get('ssdp_udn', None)\n\n cast_type = CAST_TYPES.get(model_name.lower(),\n CAST_TYPE_CHROMECAST)\n\n uuid = None\n if udn:\n uuid = UUID(udn.replace('-', ''))\n\n return DeviceStatus(friendly_name, model_name, manufacturer,\n uuid, cast_type)\n\n except (requests.exceptions.RequestException, OSError, ValueError):\n return None"
] |
[
0.8157129287719727,
0.7970032095909119,
0.7968286871910095,
0.7773072123527527,
0.7605599761009216,
0.7588527202606201,
0.7551490664482117,
0.7473092079162598,
0.7472646832466125,
0.7450510859489441,
0.7415158152580261,
0.740644633769989
] |
Template tag which renders the glitter CSS and JavaScript. Any resources
which need to be loaded should be added here. This is only shown to users
with permission to edit the page.
|
def glitter_head(context):
"""
Template tag which renders the glitter CSS and JavaScript. Any resources
which need to be loaded should be added here. This is only shown to users
with permission to edit the page.
"""
user = context.get('user')
rendered = ''
template_path = 'glitter/include/head.html'
if user is not None and user.is_staff:
template = context.template.engine.get_template(template_path)
rendered = template.render(context)
return rendered
|
[
"def glitter_startbody(context):\n \"\"\"\n Template tag which renders the glitter overlay and sidebar. This is only\n shown to users with permission to edit the page.\n \"\"\"\n user = context.get('user')\n path_body = 'glitter/include/startbody.html'\n path_plus = 'glitter/include/startbody_%s_%s.html'\n rendered = ''\n\n if user is not None and user.is_staff:\n templates = [path_body]\n # We've got a page with a glitter object:\n # - May need a different startbody template\n # - Check if user has permission to add\n glitter = context.get('glitter')\n if glitter is not None:\n opts = glitter.obj._meta.app_label, glitter.obj._meta.model_name\n template_path = path_plus % opts\n templates.insert(0, template_path)\n\n template = context.template.engine.select_template(templates)\n rendered = template.render(context)\n\n return rendered",
"def treebeard_js():\n \"\"\"\n Template tag to print out the proper <script/> tag to include a custom .js\n \"\"\"\n path = get_static_url()\n js_file = urljoin(path, 'treebeard/treebeard-admin.js')\n jquery_ui = urljoin(path, 'treebeard/jquery-ui-1.8.5.custom.min.js')\n\n # Jquery UI is needed to call disableSelection() on drag and drop so\n # text selections arent marked while dragging a table row\n # http://www.lokkju.com/blog/archives/143\n TEMPLATE = (\n '<script type=\"text/javascript\" src=\"{}\"></script>'\n '<script type=\"text/javascript\" src=\"{}\"></script>'\n '<script>'\n '(function($){{jQuery = $.noConflict(true);}})(django.jQuery);'\n '</script>'\n '<script type=\"text/javascript\" src=\"{}\"></script>')\n return format_html(\n TEMPLATE, \"jsi18n\", mark_safe(js_file), mark_safe(jquery_ui))",
"function shimTemplateStyles(template, tag) {\n window.WebComponents.ShadowCSS.shimStyling(template.content, tag);\n}",
"def javascript_tags(volt_app)\n @opal_tag_generator ||= Opal::Server::Index.new(nil, volt_app.opal_files.server)\n\n javascript_files = []\n @assets.each do |type, path|\n case type\n when :folder\n # for a folder, we search for all .js files and return a tag for them\n base_path = base(path)\n javascript_files += Dir[\"#{path}/**/*.js\"].sort.map do |folder|\n # Grab the component folder/assets/js/file.js\n local_path = folder[path.size..-1]\n @app_url + '/' + base_path + local_path\n end\n when :javascript_file\n # javascript_file is a cdn path to a JS file\n javascript_files << path\n end\n end\n\n javascript_files = javascript_files.uniq\n\n scripts = javascript_files.map {|url| \"<script src=\\\"#{url}\\\"></script>\" }\n\n # Include volt itself. Unless we are running with MAPS=all, just include\n # the main file without sourcemaps.\n volt_path = 'volt/volt/app'\n if ENV['MAPS'] == 'all'\n scripts << @opal_tag_generator.javascript_include_tag(volt_path)\n else\n scripts << \"<script src=\\\"#{volt_app.app_url}/#{volt_path}.js\\\"></script>\"\n scripts << \"<script>#{Opal::Processor.load_asset_code(volt_app.sprockets, volt_path)}</script>\"\n end\n\n scripts << @opal_tag_generator.javascript_include_tag('components/main')\n\n scripts.join(\"\\n\")\n end",
"public function renderJavascript()\n\t{\n\t\tif (!$this->isEnabled()) {\n\t\t\treturn;\n\t\t}\n\n\t\treturn $this->view->make('googlmapper::javascript')\n\t\t\t->withView($this->view)\n\t\t\t->withOptions($this->generateRenderOptions())\n\t\t\t->render();\n\t}",
"function cssTemplate (params) {\n // Localize parameters\n var items = params.items;\n var options = params.options;\n var tmplParams = {\n sprite: null,\n retina: null,\n items: [],\n options: options\n };\n\n var classFn = function (name, sep) {\n if (options.cssClass) {\n return '.' + cssesc(options.cssClass + sep + name, {isIdentifier: true});\n }\n else {\n return '.icon' + cssesc(sep + name, {isIdentifier: true});\n }\n };\n\n // Add class to each of the options\n items.forEach(function saveClass (item) {\n if (item.type === 'sprite') {\n item['class'] = classFn('', '');\n tmplParams.sprite = item;\n }\n else if (item.type === 'retina') {\n item['class'] = classFn('', '');\n tmplParams.retina = item;\n }\n else {\n item['class'] = classFn(item.name, '-');\n tmplParams.items.push(item);\n }\n });\n // Render and return CSS\n var tmplFile = options.template ?\n fs.readFileSync(path.resolve(process.cwd(), options.template), 'utf8') :\n tmpl[options.processor];\n var css = mustache.render(tmplFile, tmplParams);\n return css;\n}",
"function() {\n gulp.task('tpl-precompile', function() {\n return gulp.src([paths.src.partials])\n .pipe($.htmlmin({\n removeComments: true,\n collapseWhitespace: plugins.tpls.ext === '.jade' ? false : true\n }))\n .pipe(plugins.tpls.cmd(plugins.tpls.config))\n .on('error', handleError)\n .pipe($.defineModule('plain'))\n .pipe($.declare({\n namespace: 'R.templates',\n processName: function(file) {\n var dir = config.paths.src.partials || config.paths.src.templates;\n return file.slice(file.indexOf(dir) + dir.length).replace('.js', '');\n }\n }))\n .pipe($.concat('templates.js'))\n .pipe(gulp.dest(paths.out.js));\n });\n\n /* Template livereloading */\n gulp.task('tpl-reload', ['tpl-precompile'], function() {\n return gulp.src([paths.src.views])\n .pipe($.livereload(lrport));\n });\n}",
"def google_tag_manager_script_tag(label)\n container_id = GtmRails::Config.container_ids[label]\n\n return '' if container_id.blank?\n\n <<-HTML.strip_heredoc.html_safe\n <!-- Google Tag Manager -->\n <script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':\n new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],\n j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=\n 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);\n })(window,document,'script','dataLayer','#{container_id}');</script>\n <!-- End Google Tag Manager -->\n HTML\n end",
"def jquery_js(version=None, migrate=False):\n '''A shortcut to render a ``script`` tag for the packaged jQuery'''\n version = version or settings.JQUERY_VERSION\n suffix = '.min' if not settings.DEBUG else ''\n libs = [js_lib('jquery-%s%s.js' % (version, suffix))]\n if _boolean(migrate):\n libs.append(js_lib('jquery-migrate-%s%s.js' % (JQUERY_MIGRATE_VERSION, suffix)))\n return '\\n'.join(libs)",
"public function renderTag()\n {\n echo CHtml::tag($this->tagName, $this->htmlOptions, '<noscript>' . $this->noScriptText . '</noscript>', true);\n }",
"protected function renderJavascript()\n {\n $html = \"\";\n\n foreach( $this->javascript as $item )\n {\n $html .= sprintf( MPageController::JAVASCRIPT_TEMPLATE, $item[\"src\"] ) . \"\\n\";\n }\n\n $domQuery = $this->qp->find( \"head\" );\n if( $domQuery->count() <= 0 )\n {\n trigger_error( 'head tag not found in page', E_USER_WARNING );\n }\n else\n {\n $domQuery->append( $html );\n }\n }",
"protected function content_template() {\n\t\t?>\n\t\t<#\n\t\tdata.choices = data.choices || {};\n\t\tdata.choices.alpha = data.choices.alpha || false;\n\t\tdata.value = data.value.toString().toLowerCase();\n\t\tif ( 0 === data.value.indexOf( '#' ) && 4 === data.value.split( '' ).length ) {\n\t\t\tdata.value = '#' + data.value.split( '' )[1] + data.value.split( '' )[1] + data.value.split( '' )[2] + data.value.split( '' )[2] + data.value.split( '' )[3] + data.value.split( '' )[3]\n\t\t}\n\t\tvar hasPaletteColorSelected = false;\n\t\t#>\n\t\t<label>\n\t\t\t<# if ( data.label ) { #>\n\t\t\t\t<span class=\"customize-control-title\">{{{ data.label }}}</span>\n\t\t\t<# } #>\n\t\t\t<# if ( data.description ) { #>\n\t\t\t\t<span class=\"description customize-control-description\">{{{ data.description }}}</span>\n\t\t\t<# } #>\n\t\t</label>\n\n\t\t<!-- The palette. -->\n\t\t<div class=\"kirki-colorpicker-wrapper-palette\">\n\t\t\t<# if ( 'hue' !== data.mode && true === data.palette ) { #>\n\t\t\t\t<?php $editor_palette = current( (array) get_theme_support( 'editor-color-palette' ) ); ?>\n\t\t\t\t<?php if ( ! empty( $editor_palette ) ) : ?>\n\t\t\t\t\t<# var kirkiColorEditorPalette = <?php echo wp_strip_all_tags( wp_json_encode( $editor_palette ) ) // phpcs:ignore WordPress.Security.EscapeOutput ?>; #>\n\t\t\t\t<?php else : ?>\n\t\t\t\t\t<# var kirkiColorEditorPalette = data.defaultPalette; #>\n\t\t\t\t<?php endif; ?>\n\n\t\t\t\t<# _.each( kirkiColorEditorPalette, function( paletteColor ) { #>\n\t\t\t\t\t<#\n\t\t\t\t\tpaletteColor.color = paletteColor.color.toLowerCase();\n\t\t\t\t\tif ( 0 === paletteColor.color.indexOf( '#' ) && 4 === paletteColor.color.split( '' ).length ) {\n\t\t\t\t\t\tpaletteColor.color = '#' + paletteColor.color.split( '' )[1] + paletteColor.color.split( '' )[1] + paletteColor.color.split( '' )[2] + paletteColor.color.split( '' )[2] + paletteColor.color.split( '' )[3] + paletteColor.color.split( '' )[3]\n\t\t\t\t\t}\n\n\t\t\t\t\tvar selected = ( data.value === paletteColor.color );\n\t\t\t\t\tif ( selected ) {\n\t\t\t\t\t\thasPaletteColorSelected = true;\n\t\t\t\t\t}\n\t\t\t\t\t#>\n\t\t\t\t\t<button\n\t\t\t\t\t\tclass=\"palette-color palette-color-{{ paletteColor.slug }}\"\n\t\t\t\t\t\tdata-color=\"{{ paletteColor.color }}\"\n\t\t\t\t\t\ttitle=\"{{ paletteColor.name }}\"\n\t\t\t\t\t\taria-label=\"<?php printf(\n\t\t\t\t\t\t\t/* translators: the color name. */\n\t\t\t\t\t\t\tesc_attr_e( 'Color: %s', 'kirki' ),\n\t\t\t\t\t\t\t'{{ paletteColor.name }}'\n\t\t\t\t\t\t); ?>\"\n\t\t\t\t\t\taria-pressed=\"{{ selected }}\"\n\t\t\t\t\t\t>\n\t\t\t\t\t\t<span class=\"button-inner\" style=\"color:{{ paletteColor.color }};\">\n\t\t\t\t\t\t\t<svg aria-hidden=\"true\" role=\"img\" focusable=\"false\" class=\"dashicon dashicons-saved\" xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 20 20\"><path d=\"M15.3 5.3l-6.8 6.8-2.8-2.8-1.4 1.4 4.2 4.2 8.2-8.2\"></path></svg>\n\t\t\t\t\t\t</span>\n\t\t\t\t\t</button>\n\t\t\t\t<# }); #>\n\t\t\t<# } else if ( 'object' === typeof data.palette ) { #>\n\t\t\t\t<# _.each( data.palette, function( paletteColor ) { #>\n\t\t\t\t\t<#\n\t\t\t\t\tpaletteColor = paletteColor.toLowerCase();\n\t\t\t\t\tif ( 0 === paletteColor.indexOf( '#' ) && 4 === paletteColor.split( '' ).length ) {\n\t\t\t\t\t\tpaletteColor = '#' + paletteColor.split( '' )[1] + paletteColor.split( '' )[1] + paletteColor.split( '' )[2] + paletteColor.split( '' )[2] + paletteColor.split( '' )[3] + paletteColor.split( '' )[3]\n\t\t\t\t\t}\n\t\t\t\t\tvar selected = ( data.value === paletteColor );\n\t\t\t\t\tif ( selected ) {\n\t\t\t\t\t\thasPaletteColorSelected = true;\n\t\t\t\t\t}\n\t\t\t\t\t#>\n\t\t\t\t\t<button\n\t\t\t\t\t\tclass=\"palette-color palette-color-{{ paletteColor }}\"\n\t\t\t\t\t\tdata-color=\"{{ paletteColor }}\"\n\t\t\t\t\t\ttitle=\"{{ paletteColor }}\"\n\t\t\t\t\t\taria-label=\"<?php printf(\n\t\t\t\t\t\t\t/* translators: the color name. */\n\t\t\t\t\t\t\tesc_attr_e( 'Color: %s', 'kirki' ),\n\t\t\t\t\t\t\t'{{ paletteColor }}'\n\t\t\t\t\t\t); ?>\"\n\t\t\t\t\t\taria-pressed=\"{{ selected }}\"\n\t\t\t\t\t\t>\n\t\t\t\t\t\t<span class=\"button-inner\" style=\"color:{{ paletteColor }};\">\n\t\t\t\t\t\t\t<svg aria-hidden=\"true\" role=\"img\" focusable=\"false\" class=\"dashicon dashicons-saved\" xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 20 20\"><path d=\"M15.3 5.3l-6.8 6.8-2.8-2.8-1.4 1.4 4.2 4.2 8.2-8.2\"></path></svg>\n\t\t\t\t\t\t</span>\n\t\t\t\t\t</button>\n\t\t\t\t<# }); #>\n\t\t\t<# } #>\n\t\t</div>\n\n\t\t<details class=\"kirki-color-input-wrapper mode-{{ data.mode }}\" <# if ( 'hue' === data.mode ) { #>open<# } #>>\n\t\t\t<summary>\n\t\t\t\t<span>\n\t\t\t\t\t<button\n\t\t\t\t\t\tclass=\"palette-color placeholder color-preview\"\n\t\t\t\t\t\tdata-color=\"{{ data.value }}\"\n\t\t\t\t\t\taria-label=\"<?php printf(\n\t\t\t\t\t\t\tesc_attr_e( 'Color: %s', 'kirki' ),\n\t\t\t\t\t\t\t'{{ data.value }}'\n\t\t\t\t\t\t); ?>\"\n\t\t\t\t\t\taria-pressed=\"{{ ! hasPaletteColorSelected }}\"\n\t\t\t\t\t\t>\n\t\t\t\t\t\t<span class=\"button-inner\" style=\"color:{{ data.value }};\">\n\t\t\t\t\t\t\t<svg aria-hidden=\"true\" role=\"img\" focusable=\"false\" class=\"dashicon dashicons-saved\" xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 20 20\"><path d=\"M15.3 5.3l-6.8 6.8-2.8-2.8-1.4 1.4 4.2 4.2 8.2-8.2\"></path></svg>\n\t\t\t\t\t\t</span>\n\t\t\t\t\t</button>\n\t\t\t\t</span>\n\t\t\t\t<span class=\"summary-description\">\n\t\t\t\t\t<?php esc_html_e( 'Select Color', 'kirki' ); ?>\n\t\t\t\t</span>\n\t\t\t\t<input\n\t\t\t\t\ttype = \"text\"\n\t\t\t\t\tdata-type=\"{{ data.mode }}\"\n\t\t\t\t\t{{{ data.inputAttrs }}}\n\t\t\t\t\tdata-default-color=\"{{ data.default }}\"\n\t\t\t\t\tvalue=\"{{ data.value }}\"\n\t\t\t\t\tclass=\"kirki-color-control<# if ( 'hue' === data.mode ) {#> screen-reader-text<# } #>\"\n\t\t\t\t\tdata-id=\"{{ data.id }}\"\n\t\t\t\t\t{{ data.link }}\n\t\t\t\t/>\n\t\t\t</summary>\n\t\t\t<div class=\"kirki-colorpicker-wrapper colorpicker-{{ data.id.replace( '[', '--' ).replace( ']', '' ) }}\">\n\t\t</details>\n\t\t<?php\n\t}"
] |
[
0.7912496328353882,
0.691871702671051,
0.6841536164283752,
0.6833006739616394,
0.6795294284820557,
0.6763699054718018,
0.674513578414917,
0.6702942252159119,
0.6668079495429993,
0.6644245982170105,
0.6606930494308472,
0.6605449318885803
] |
Template tag which renders the glitter overlay and sidebar. This is only
shown to users with permission to edit the page.
|
def glitter_startbody(context):
"""
Template tag which renders the glitter overlay and sidebar. This is only
shown to users with permission to edit the page.
"""
user = context.get('user')
path_body = 'glitter/include/startbody.html'
path_plus = 'glitter/include/startbody_%s_%s.html'
rendered = ''
if user is not None and user.is_staff:
templates = [path_body]
# We've got a page with a glitter object:
# - May need a different startbody template
# - Check if user has permission to add
glitter = context.get('glitter')
if glitter is not None:
opts = glitter.obj._meta.app_label, glitter.obj._meta.model_name
template_path = path_plus % opts
templates.insert(0, template_path)
template = context.template.engine.select_template(templates)
rendered = template.render(context)
return rendered
|
[
"def glitter_head(context):\n \"\"\"\n Template tag which renders the glitter CSS and JavaScript. Any resources\n which need to be loaded should be added here. This is only shown to users\n with permission to edit the page.\n \"\"\"\n user = context.get('user')\n rendered = ''\n template_path = 'glitter/include/head.html'\n\n if user is not None and user.is_staff:\n template = context.template.engine.get_template(template_path)\n rendered = template.render(context)\n\n return rendered",
"function dotProp(name, forSidebar) {\n var re = /(.*)([.#~][\\w:]+)/g,\n match = re.exec(name);\n if (!match) return '<span class=\"fw-bold\">' + name + '</span>';\n if (forSidebar) {\n var cls = templateOpts.sidebar.animations ? ' trans-all-ease-fast' : '';\n return '<span class=\"color-gray symbol-memberof' + cls + '\">' + app.helper.colorOperators(match[1]) + '</span><span>' + app.helper.colorOperators(match[2]) + '</span>';\n }\n return '<span class=\"color-gray\">' + app.helper.colorOperators(match[1]) + '</span><span class=\"fw-bold\">' + app.helper.colorOperators(match[2]) + '</span>';\n }",
"def view_getSidebarLogo\n return VIEW_SIDEBARLOGO if defined? VIEW_SIDEBARLOGO\n dir = \"#{Rails.root}/app/assets/images/lato/\"\n if File.exist?(\"#{dir}/sidebar_logo.svg\")\n return \"lato/sidebar_logo.svg\"\n end\n if File.exist?(\"#{dir}/sidebar_logo.png\")\n return \"lato/sidebar_logo.png\"\n end\n if File.exist?(\"#{dir}/sidebar_logo.jpg\")\n return \"lato/sidebar_logo.jpg\"\n end\n if File.exist?(\"#{dir}/sidebar_logo.gif\")\n return \"lato/sidebar_logo.gif\"\n end\n return view_getApplicationLogo\n end",
"public function renderTemplate()\n {\n $options = array(\n 'id' => $this->htmlOptions['id'] . '-gallery',\n 'class' => 'blueimp-gallery'\n );\n if ($this->displayControls) {\n TbHtml::addCssClass('blueimp-gallery-controls', $options);\n }\n echo CHtml::openTag('div', $options);\n echo '<div class=\"slides\"></div>\n\t\t<h3 class=\"title\"></h3>\n\t\t<a class=\"prev\">‹</a>\n\t\t<a class=\"next\">›</a>\n\t\t<a class=\"close\">×</a>\n\t\t<a class=\"play-pause\"></a>\n\t\t<ol class=\"indicator\"></ol>';\n echo CHtml::closeTag('div');\n }",
"function shimTemplateStyles(template, tag) {\n window.WebComponents.ShadowCSS.shimStyling(template.content, tag);\n}",
"def render_head(self, ctx, data):\n \"\"\"\n Put liveglue content into the header of this page to activate it, but\n otherwise delegate to my parent's renderer for <head>.\n \"\"\"\n ctx.tag[tags.invisible(render=tags.directive('liveglue'))]\n return _PublicPageMixin.render_head(self, ctx, data)",
"def has_glitter_edit_permission(self, request, obj):\n \"\"\"\n Return a boolean if a user has edit access to the glitter object/page this object is on.\n \"\"\"\n\n # We're testing for the edit permission here with the glitter object - not the current\n # object, not the change permission. Once a user has edit access to an object they can edit\n # all content on it.\n permission_name = '{}.edit_{}'.format(\n obj._meta.app_label, obj._meta.model_name,\n )\n has_permission = (\n request.user.has_perm(permission_name) or\n request.user.has_perm(permission_name, obj=obj)\n )\n return has_permission",
"protected function content_template() {\n\t\t?>\n\t\t<#\n\t\tdata.choices = data.choices || {};\n\t\tdata.choices.alpha = data.choices.alpha || false;\n\t\tdata.value = data.value.toString().toLowerCase();\n\t\tif ( 0 === data.value.indexOf( '#' ) && 4 === data.value.split( '' ).length ) {\n\t\t\tdata.value = '#' + data.value.split( '' )[1] + data.value.split( '' )[1] + data.value.split( '' )[2] + data.value.split( '' )[2] + data.value.split( '' )[3] + data.value.split( '' )[3]\n\t\t}\n\t\tvar hasPaletteColorSelected = false;\n\t\t#>\n\t\t<label>\n\t\t\t<# if ( data.label ) { #>\n\t\t\t\t<span class=\"customize-control-title\">{{{ data.label }}}</span>\n\t\t\t<# } #>\n\t\t\t<# if ( data.description ) { #>\n\t\t\t\t<span class=\"description customize-control-description\">{{{ data.description }}}</span>\n\t\t\t<# } #>\n\t\t</label>\n\n\t\t<!-- The palette. -->\n\t\t<div class=\"kirki-colorpicker-wrapper-palette\">\n\t\t\t<# if ( 'hue' !== data.mode && true === data.palette ) { #>\n\t\t\t\t<?php $editor_palette = current( (array) get_theme_support( 'editor-color-palette' ) ); ?>\n\t\t\t\t<?php if ( ! empty( $editor_palette ) ) : ?>\n\t\t\t\t\t<# var kirkiColorEditorPalette = <?php echo wp_strip_all_tags( wp_json_encode( $editor_palette ) ) // phpcs:ignore WordPress.Security.EscapeOutput ?>; #>\n\t\t\t\t<?php else : ?>\n\t\t\t\t\t<# var kirkiColorEditorPalette = data.defaultPalette; #>\n\t\t\t\t<?php endif; ?>\n\n\t\t\t\t<# _.each( kirkiColorEditorPalette, function( paletteColor ) { #>\n\t\t\t\t\t<#\n\t\t\t\t\tpaletteColor.color = paletteColor.color.toLowerCase();\n\t\t\t\t\tif ( 0 === paletteColor.color.indexOf( '#' ) && 4 === paletteColor.color.split( '' ).length ) {\n\t\t\t\t\t\tpaletteColor.color = '#' + paletteColor.color.split( '' )[1] + paletteColor.color.split( '' )[1] + paletteColor.color.split( '' )[2] + paletteColor.color.split( '' )[2] + paletteColor.color.split( '' )[3] + paletteColor.color.split( '' )[3]\n\t\t\t\t\t}\n\n\t\t\t\t\tvar selected = ( data.value === paletteColor.color );\n\t\t\t\t\tif ( selected ) {\n\t\t\t\t\t\thasPaletteColorSelected = true;\n\t\t\t\t\t}\n\t\t\t\t\t#>\n\t\t\t\t\t<button\n\t\t\t\t\t\tclass=\"palette-color palette-color-{{ paletteColor.slug }}\"\n\t\t\t\t\t\tdata-color=\"{{ paletteColor.color }}\"\n\t\t\t\t\t\ttitle=\"{{ paletteColor.name }}\"\n\t\t\t\t\t\taria-label=\"<?php printf(\n\t\t\t\t\t\t\t/* translators: the color name. */\n\t\t\t\t\t\t\tesc_attr_e( 'Color: %s', 'kirki' ),\n\t\t\t\t\t\t\t'{{ paletteColor.name }}'\n\t\t\t\t\t\t); ?>\"\n\t\t\t\t\t\taria-pressed=\"{{ selected }}\"\n\t\t\t\t\t\t>\n\t\t\t\t\t\t<span class=\"button-inner\" style=\"color:{{ paletteColor.color }};\">\n\t\t\t\t\t\t\t<svg aria-hidden=\"true\" role=\"img\" focusable=\"false\" class=\"dashicon dashicons-saved\" xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 20 20\"><path d=\"M15.3 5.3l-6.8 6.8-2.8-2.8-1.4 1.4 4.2 4.2 8.2-8.2\"></path></svg>\n\t\t\t\t\t\t</span>\n\t\t\t\t\t</button>\n\t\t\t\t<# }); #>\n\t\t\t<# } else if ( 'object' === typeof data.palette ) { #>\n\t\t\t\t<# _.each( data.palette, function( paletteColor ) { #>\n\t\t\t\t\t<#\n\t\t\t\t\tpaletteColor = paletteColor.toLowerCase();\n\t\t\t\t\tif ( 0 === paletteColor.indexOf( '#' ) && 4 === paletteColor.split( '' ).length ) {\n\t\t\t\t\t\tpaletteColor = '#' + paletteColor.split( '' )[1] + paletteColor.split( '' )[1] + paletteColor.split( '' )[2] + paletteColor.split( '' )[2] + paletteColor.split( '' )[3] + paletteColor.split( '' )[3]\n\t\t\t\t\t}\n\t\t\t\t\tvar selected = ( data.value === paletteColor );\n\t\t\t\t\tif ( selected ) {\n\t\t\t\t\t\thasPaletteColorSelected = true;\n\t\t\t\t\t}\n\t\t\t\t\t#>\n\t\t\t\t\t<button\n\t\t\t\t\t\tclass=\"palette-color palette-color-{{ paletteColor }}\"\n\t\t\t\t\t\tdata-color=\"{{ paletteColor }}\"\n\t\t\t\t\t\ttitle=\"{{ paletteColor }}\"\n\t\t\t\t\t\taria-label=\"<?php printf(\n\t\t\t\t\t\t\t/* translators: the color name. */\n\t\t\t\t\t\t\tesc_attr_e( 'Color: %s', 'kirki' ),\n\t\t\t\t\t\t\t'{{ paletteColor }}'\n\t\t\t\t\t\t); ?>\"\n\t\t\t\t\t\taria-pressed=\"{{ selected }}\"\n\t\t\t\t\t\t>\n\t\t\t\t\t\t<span class=\"button-inner\" style=\"color:{{ paletteColor }};\">\n\t\t\t\t\t\t\t<svg aria-hidden=\"true\" role=\"img\" focusable=\"false\" class=\"dashicon dashicons-saved\" xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 20 20\"><path d=\"M15.3 5.3l-6.8 6.8-2.8-2.8-1.4 1.4 4.2 4.2 8.2-8.2\"></path></svg>\n\t\t\t\t\t\t</span>\n\t\t\t\t\t</button>\n\t\t\t\t<# }); #>\n\t\t\t<# } #>\n\t\t</div>\n\n\t\t<details class=\"kirki-color-input-wrapper mode-{{ data.mode }}\" <# if ( 'hue' === data.mode ) { #>open<# } #>>\n\t\t\t<summary>\n\t\t\t\t<span>\n\t\t\t\t\t<button\n\t\t\t\t\t\tclass=\"palette-color placeholder color-preview\"\n\t\t\t\t\t\tdata-color=\"{{ data.value }}\"\n\t\t\t\t\t\taria-label=\"<?php printf(\n\t\t\t\t\t\t\tesc_attr_e( 'Color: %s', 'kirki' ),\n\t\t\t\t\t\t\t'{{ data.value }}'\n\t\t\t\t\t\t); ?>\"\n\t\t\t\t\t\taria-pressed=\"{{ ! hasPaletteColorSelected }}\"\n\t\t\t\t\t\t>\n\t\t\t\t\t\t<span class=\"button-inner\" style=\"color:{{ data.value }};\">\n\t\t\t\t\t\t\t<svg aria-hidden=\"true\" role=\"img\" focusable=\"false\" class=\"dashicon dashicons-saved\" xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 20 20\"><path d=\"M15.3 5.3l-6.8 6.8-2.8-2.8-1.4 1.4 4.2 4.2 8.2-8.2\"></path></svg>\n\t\t\t\t\t\t</span>\n\t\t\t\t\t</button>\n\t\t\t\t</span>\n\t\t\t\t<span class=\"summary-description\">\n\t\t\t\t\t<?php esc_html_e( 'Select Color', 'kirki' ); ?>\n\t\t\t\t</span>\n\t\t\t\t<input\n\t\t\t\t\ttype = \"text\"\n\t\t\t\t\tdata-type=\"{{ data.mode }}\"\n\t\t\t\t\t{{{ data.inputAttrs }}}\n\t\t\t\t\tdata-default-color=\"{{ data.default }}\"\n\t\t\t\t\tvalue=\"{{ data.value }}\"\n\t\t\t\t\tclass=\"kirki-color-control<# if ( 'hue' === data.mode ) {#> screen-reader-text<# } #>\"\n\t\t\t\t\tdata-id=\"{{ data.id }}\"\n\t\t\t\t\t{{ data.link }}\n\t\t\t\t/>\n\t\t\t</summary>\n\t\t\t<div class=\"kirki-colorpicker-wrapper colorpicker-{{ data.id.replace( '[', '--' ).replace( ']', '' ) }}\">\n\t\t</details>\n\t\t<?php\n\t}",
"def core__get_application_logo_sidebar_path\n dir = \"#{core__get_application_root_path}/app/assets/images/lato/\"\n if File.exist?(\"#{dir}/logo_sidebar.svg\")\n return 'lato/logo_sidebar.svg'\n elsif File.exist?(\"#{dir}/logo_sidebar.png\")\n return 'lato/logo_sidebar.png'\n elsif File.exist?(\"#{dir}/logo_sidebar.jpg\")\n return 'lato/logo_sidebar.jpg'\n elsif File.exist?(\"#{dir}/logo_sidebar.gif\")\n return 'lato/logo_sidebar.gif'\n end\n core__get_application_logo_path\n end",
"public function sidebars()\n {\n $sidebars = $this->sidebars;\n uasort($sidebars, [$this, 'sortSidebarsByPriority']);\n foreach ($sidebars as $sidebarIdent => $sidebar) {\n if (!$sidebar->active()) {\n continue;\n }\n\n if ($sidebar->template()) {\n $template = $sidebar->template();\n } else {\n $template = 'charcoal/admin/widget/form.sidebar';\n }\n\n $GLOBALS['widget_template'] = $template;\n yield $sidebarIdent => $sidebar;\n $GLOBALS['widget_template'] = '';\n }\n }",
"public function renderSidebar()\n {\n $period = Common::getRequestVar('period');\n $date = Common::getRequestVar('date');\n $currentUrl = Common::getRequestVar('currentUrl');\n $segment = Request::getRawSegmentFromRequest();\n $currentUrl = Common::unsanitizeInputValue($currentUrl);\n $segmentSidebar = '';\n\n $normalizedCurrentUrl = PageUrl::excludeQueryParametersFromUrl($currentUrl, $this->idSite);\n $normalizedCurrentUrl = Common::unsanitizeInputValue($normalizedCurrentUrl);\n\n // load the appropriate row of the page urls report using the label filter\n ArchivingHelper::reloadConfig();\n $path = ArchivingHelper::getActionExplodedNames($normalizedCurrentUrl, Action::TYPE_PAGE_URL);\n $path = array_map('urlencode', $path);\n $label = implode('>', $path);\n\n $params = array(\n 'idSite' => $this->idSite,\n 'date' => $date,\n 'period' => $period,\n 'label' => $label,\n 'format' => 'original',\n 'format_metrics' => 0,\n );\n\n if (!empty($segment)) {\n $params['segment'] = $segment;\n }\n\n $dataTable = Request::processRequest('Actions.getPageUrls', $params);\n\n $formatter = new Metrics\\Formatter\\Html();\n\n $data = array();\n if ($dataTable->getRowsCount() > 0) {\n $row = $dataTable->getFirstRow();\n\n $translations = Metrics::getDefaultMetricTranslations();\n $showMetrics = array('nb_hits', 'nb_visits', 'nb_users', 'nb_uniq_visitors',\n 'bounce_rate', 'exit_rate', 'avg_time_on_page');\n\n $segmentSidebar = $row->getMetadata('segment');\n if (!empty($segmentSidebar) && !empty($segment)) {\n $segmentSidebar = $segment . ';' . $segmentSidebar;\n }\n\n foreach ($showMetrics as $metric) {\n $value = $row->getColumn($metric);\n if ($value === false) {\n // skip unique visitors for period != day\n continue;\n }\n\n if ($metric == 'bounce_rate'\n || $metric == 'exit_rate'\n ) {\n $value = $formatter->getPrettyPercentFromQuotient($value);\n } else if ($metric == 'avg_time_on_page') {\n $value = $formatter->getPrettyTimeFromSeconds($value, $displayAsSentence = true);\n }\n\n $data[] = array(\n 'name' => $translations[$metric],\n 'value' => $value\n );\n }\n }\n\n // generate page url string\n foreach ($path as &$part) {\n $part = preg_replace(';^/;', '', urldecode($part));\n }\n $page = '/' . implode('/', $path);\n $page = preg_replace(';/index$;', '/', $page);\n if ($page == '/') {\n $page = '/index';\n }\n\n // render template\n $view = new View('@Overlay/renderSidebar');\n $view->data = $data;\n $view->location = $page;\n $view->normalizedUrl = $normalizedCurrentUrl;\n $view->label = $label;\n $view->idSite = $this->idSite;\n $view->period = $period;\n $view->date = $date;\n $view->segment = $segmentSidebar;\n $view->segmentDescription = $this->segmentFormatter->getHumanReadable($segment, $this->idSite);\n\n $this->outputCORSHeaders();\n return $view->render();\n }",
"function prettyName(name, sidebar) {\n var adjustedName = name;\n if (sidebar) {\n var adjustedName = name;\n var parts = name.split('.');\n if (parts.length > 0) {\n adjustedName = parts[parts.length - 1];\n }\n }\n return adjustedName.replace(/\\./, '.<wbr>');\n}"
] |
[
0.7922534346580505,
0.6768049001693726,
0.6754957437515259,
0.6693488955497742,
0.6644914150238037,
0.659820556640625,
0.6557756662368774,
0.655105710029602,
0.6513023376464844,
0.6508985161781311,
0.6441714763641357,
0.6431770324707031
] |
Replace all special characters found in assertion_value
by quoted notation.
escape_mode
If 0 only special chars mentioned in RFC 4515 are escaped.
If 1 all NON-ASCII chars are escaped.
If 2 all chars are escaped.
|
def escape_filter_chars(assertion_value, escape_mode=0):
"""
Replace all special characters found in assertion_value
by quoted notation.
escape_mode
If 0 only special chars mentioned in RFC 4515 are escaped.
If 1 all NON-ASCII chars are escaped.
If 2 all chars are escaped.
"""
if isinstance(assertion_value, six.text_type):
assertion_value = assertion_value.encode("utf_8")
s = []
for c in assertion_value:
do_escape = False
if str != bytes: # Python 3
pass
else: # Python 2
c = ord(c)
if escape_mode == 0:
if c == ord('\\') or c == ord('*') \
or c == ord('(') or c == ord(')') \
or c == ord('\x00'):
do_escape = True
elif escape_mode == 1:
if c < '0' or c > 'z' or c in "\\*()":
do_escape = True
elif escape_mode == 2:
do_escape = True
else:
raise ValueError('escape_mode must be 0, 1 or 2.')
if do_escape:
s.append(b"\\%02x" % c)
else:
b = None
if str != bytes: # Python 3
b = bytes([c])
else: # Python 2
b = chr(c)
s.append(b)
return b''.join(s)
|
[
"def optimize_quoted_uri_escapes!(escaped)\n escaped.gsub!('%3D', '=')\n escaped.gsub!('%3A', ':')\n escaped.gsub!('%2F', '/')\n escaped.gsub!('%27', \"'\")\n escaped.tr!('+', ' ')\n end",
"def escape(value, quote=True):\n '''\n If the value is a string, escapes any special characters and optionally\n surrounds it with single quotes. If the value is not a string (e.g. a number),\n converts it to one.\n '''\n def escape_one(match):\n return SPECIAL_CHARS[match.group(0)]\n\n if isinstance(value, string_types):\n value = SPECIAL_CHARS_REGEX.sub(escape_one, value)\n if quote:\n value = \"'\" + value + \"'\"\n return text_type(value)",
"def quote(value)\n string = value.to_s\n unquoted = unquote(string)\n escaped = unquoted.gsub(QuoteNotEscapedMatcher, \"\\\\\\\"\")\n %Q{\"#{escaped}\"}\n end",
"public static String escapeToSoyString(\n String value, boolean shouldEscapeToAscii, QuoteStyle quoteStyle) {\n\n // StringUtil.javaScriptEscape() is meant to be compatible with JS string syntax, which is a\n // superset of the Soy expression string syntax, so we can't depend on it to properly escape a\n // Soy expression string literal. For example, they switched the default character escaping\n // to octal to save a few bytes, but octal escapes are not allowed in Soy syntax. I'm rewriting\n // the code here in a correct way for Soy.\n\n int len = value.length();\n StringBuilder out = new StringBuilder(len * 9 / 8);\n out.append(quoteStyle.getQuoteChar());\n\n int codePoint;\n for (int i = 0; i < len; i += Character.charCount(codePoint)) {\n codePoint = value.codePointAt(i);\n\n switch (codePoint) {\n case '\\n':\n out.append(\"\\\\n\");\n break;\n case '\\r':\n out.append(\"\\\\r\");\n break;\n case '\\t':\n out.append(\"\\\\t\");\n break;\n case '\\b':\n out.append(\"\\\\b\");\n break;\n case '\\f':\n out.append(\"\\\\f\");\n break;\n case '\\\\':\n out.append(\"\\\\\\\\\");\n break;\n case '\\'':\n out.append(quoteStyle == QuoteStyle.DOUBLE ? \"'\" : \"\\\\'\");\n break;\n case '\"':\n out.append(quoteStyle == QuoteStyle.DOUBLE ? \"\\\\\\\"\" : '\"');\n break;\n default:\n // If shouldEscapeToAscii, then hex escape characters outside the range 0x20 to 0x7F.\n if (shouldEscapeToAscii && (codePoint < 0x20 || codePoint >= 0x7F)) {\n appendHexEscape(out, codePoint);\n } else {\n out.appendCodePoint(codePoint);\n }\n break;\n }\n }\n\n out.append(quoteStyle.getQuoteChar());\n return out.toString();\n }",
"def escape_quotes(self, val):\n \"\"\"\n Escape any quotes in a value\n \"\"\"\n if self.is_string(val) and self._in_quotes(val, self.quote):\n # make sure any previously escaped quotes are not re-escaped\n middle = self.remove_quotes(val).replace(\"\\\\\" + self.quote, self.quote)\n middle = middle.replace(self.quote, \"\\\\\" + self.quote)\n val = self.add_quotes(middle)\n\n return val",
"def escape_extension_value(val)\n escapes = {\n %r{=} => '\\=',\n %r{\\n} => ' ',\n %r{\\\\} => '\\\\'\n }\n escapes.reduce(val) do |memo,replace|\n memo=memo.gsub(*replace)\n end\n end",
"def _processEscapeSequences(replaceText):\n \"\"\"Replace symbols like \\n \\\\, etc\n \"\"\"\n def _replaceFunc(escapeMatchObject):\n char = escapeMatchObject.group(0)[1]\n if char in _escapeSequences:\n return _escapeSequences[char]\n\n return escapeMatchObject.group(0) # no any replacements, return original value\n\n return _seqReplacer.sub(_replaceFunc, replaceText)",
"public static String escapeAssertion(String s) {\n //Replace the first dot, because the string doesn't start with \"m=\"\n // and is not covered by the regex.\n if (s.startsWith(\"r\") || s.startsWith(\"p\")) {\n s = s.replaceFirst(\"\\\\.\", \"_\");\n }\n String regex = \"(\\\\|| |=|\\\\)|\\\\(|&|<|>|,|\\\\+|-|!|\\\\*|\\\\/)(r|p)\\\\.\";\n Pattern p = Pattern.compile(regex);\n Matcher m = p.matcher(s);\n StringBuffer sb = new StringBuffer();\n\n while (m.find()) {\n m.appendReplacement(sb, m.group().replace(\".\", \"_\") );\n }\n \n m.appendTail(sb);\n return sb.toString();\n }",
"protected function quote_escaped()\n\t{\n\t\t$this->value .= $this->data[$this->position];\n\t\t$this->position++;\n\t\t$this->state = 'quote';\n\t}",
"@Override\r\n public String getEscapedValue(String value, boolean toQuote) {\r\n // Escape special characters\r\n StringBuilder buf = new StringBuilder(value.length());\r\n int idx = 0;\r\n int ch;\r\n\r\n while (idx < value.length()) {\r\n ch = value.charAt(idx++);\r\n if (ch == 0) {\r\n buf.append(\"\\\\0\");\r\n\r\n } else if (ch == 92) { // backslash\r\n buf.append(\"\\\\\\\\\");\r\n\r\n } else if (ch == 124) { // vertical bar\r\n // 124 = \"|\" = AbstractSerializationStream.RPC_SEPARATOR_CHAR\r\n buf.append(\"\\\\!\");\r\n\r\n } else if ((ch >= 0xD800) && (ch < 0xFFFF)) {\r\n buf.append(String.format(\"\\\\u%04x\", ch));\r\n\r\n } else {\r\n buf.append((char) ch);\r\n }\r\n }\r\n\r\n return buf.toString();\r\n }",
"def quote_token( str )\n if str.respond_to?(:force_encoding)\n original_encoding = str.encoding\n ascii_str = str.to_s.dup.force_encoding('ASCII-8BIT')\n if token_safe?( ascii_str )\n str\n else\n dquote(ascii_str).force_encoding(original_encoding)\n end\n else\n token_safe?( str ) ? str : dquote(str)\n end\n end",
"def escape_prefix_value(val)\n escapes={\n %r{(\\||\\\\)} => '\\\\\\\\\\&'\n }\n escapes.reduce(val) do|memo,replace|\n memo=memo.gsub(*replace)\n end\n end"
] |
[
0.674048125743866,
0.6649617552757263,
0.6573070883750916,
0.6528140306472778,
0.6516905426979065,
0.6515794992446899,
0.6510338187217712,
0.6507191061973572,
0.6485170722007751,
0.6480382680892944,
0.6476830840110779,
0.6469260454177856
] |
filter_template
String containing %s as placeholder for assertion values.
assertion_values
List or tuple of assertion values. Length must match
count of %s in filter_template.
|
def filter_format(filter_template, assertion_values):
"""
filter_template
String containing %s as placeholder for assertion values.
assertion_values
List or tuple of assertion values. Length must match
count of %s in filter_template.
"""
assert isinstance(filter_template, bytes)
return filter_template % (
tuple(map(escape_filter_chars, assertion_values)))
|
[
"def expected_param_keys(self):\r\n \"\"\"returns a list of params that this ConfigTemplate expects to receive\"\"\"\r\n expected_keys = []\r\n r = re.compile('%\\(([^\\)]+)\\)s')\r\n for block in self.keys():\r\n for key in self[block].keys():\r\n s = self[block][key]\r\n if type(s)!=str: continue\r\n md = re.search(r, s)\r\n while md is not None:\r\n k = md.group(1)\r\n if k not in expected_keys:\r\n expected_keys.append(k)\r\n s = s[md.span()[1]:]\r\n md = re.search(r, s)\r\n return expected_keys",
"def _comparator_presence(_, tested_value):\n \"\"\"\n Tests a filter which simply a joker, i.e. a value presence test\n \"\"\"\n # The filter value is a joker : simple presence test\n if tested_value is None:\n return False\n elif hasattr(tested_value, \"__len__\"):\n # Refuse empty values\n # pylint: disable=C1801\n return len(tested_value) != 0\n\n # Presence validated\n return True",
"def validate(self):\n \"\"\" Validate filter condition (template method).\n \"\"\"\n from pyrocore.torrent import formatting\n\n super(PatternFilter, self).validate()\n self._value = self._value.lower()\n self._template = None\n self._is_regex = self._value.startswith('/') and self._value.endswith('/')\n if self._is_regex:\n self._matcher = re.compile(self._value[1:-1]).search\n elif self._value.startswith('{{') or self._value.endswith('}}'):\n def _template_globber(val, item):\n \"\"\"Helper.\"\"\"\n pattern = formatting.format_item(self._template, item).replace('[', '[[]')\n ##print('!!!', val, '~~~', pattern, '???')\n return fnmatch.fnmatchcase(val, pattern.lower())\n\n self._template = formatting.preparse(self._value)\n self._matcher = _template_globber\n else:\n self._matcher = lambda val, _: fnmatch.fnmatchcase(val, self._value)",
"def build_assert(cls: Type[_Block], nodes: List[ast.stmt], min_line_number: int) -> _Block:\n \"\"\"\n Assert block is all nodes that are after the Act node.\n\n Note:\n The filtering is *still* running off the line number of the Act\n node, when instead it should be using the last line of the Act\n block.\n \"\"\"\n return cls(filter_assert_nodes(nodes, min_line_number), LineType._assert)",
"def escape_filter_chars(assertion_value, escape_mode=0):\n \"\"\"\n Replace all special characters found in assertion_value\n by quoted notation.\n\n escape_mode\n If 0 only special chars mentioned in RFC 4515 are escaped.\n If 1 all NON-ASCII chars are escaped.\n If 2 all chars are escaped.\n \"\"\"\n\n if isinstance(assertion_value, six.text_type):\n assertion_value = assertion_value.encode(\"utf_8\")\n\n s = []\n for c in assertion_value:\n do_escape = False\n\n if str != bytes: # Python 3\n pass\n else: # Python 2\n c = ord(c)\n\n if escape_mode == 0:\n if c == ord('\\\\') or c == ord('*') \\\n or c == ord('(') or c == ord(')') \\\n or c == ord('\\x00'):\n do_escape = True\n elif escape_mode == 1:\n if c < '0' or c > 'z' or c in \"\\\\*()\":\n do_escape = True\n elif escape_mode == 2:\n do_escape = True\n else:\n raise ValueError('escape_mode must be 0, 1 or 2.')\n\n if do_escape:\n s.append(b\"\\\\%02x\" % c)\n else:\n b = None\n if str != bytes: # Python 3\n b = bytes([c])\n else: # Python 2\n b = chr(c)\n s.append(b)\n\n return b''.join(s)",
"def visit_Assert(self, assert_):\n \"\"\"Return the AST statements to replace the ast.Assert instance.\n\n This re-writes the test of an assertion to provide\n intermediate values and replace it with an if statement which\n raises an assertion error with a detailed explanation in case\n the expression is false.\n\n \"\"\"\n if isinstance(assert_.test, ast.Tuple) and self.config is not None:\n fslocation = (self.module_path, assert_.lineno)\n self.config.warn('R1', 'assertion is always true, perhaps '\n 'remove parentheses?', fslocation=fslocation)\n self.statements = []\n self.variables = []\n self.variable_counter = itertools.count()\n self.stack = []\n self.on_failure = []\n self.push_format_context()\n # Rewrite assert into a bunch of statements.\n top_condition, explanation = self.visit(assert_.test)\n # Create failure message.\n body = self.on_failure\n negation = ast.UnaryOp(ast.Not(), top_condition)\n self.statements.append(ast.If(negation, body, []))\n if assert_.msg:\n assertmsg = self.helper('format_assertmsg', assert_.msg)\n explanation = \"\\n>assert \" + explanation\n else:\n assertmsg = ast.Str(\"\")\n explanation = \"assert \" + explanation\n\n if _MARK_ASSERTION_INTROSPECTION:\n explanation = 'dessert* ' + explanation\n\n template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))\n msg = self.pop_format_context(template)\n fmt = self.helper(\"format_explanation\", msg, assertmsg)\n err_name = ast.Name(\"AssertionError\", ast.Load())\n exc = ast_Call(err_name, [fmt], [])\n if sys.version_info[0] >= 3:\n raise_ = ast.Raise(exc, None)\n else:\n raise_ = ast.Raise(exc, None, None)\n body.append(raise_)\n # Clear temporary variables by setting them to None.\n if self.variables:\n variables = [ast.Name(name, ast.Store())\n for name in self.variables]\n clear = ast.Assign(variables, _NameConstant(None))\n self.statements.append(clear)\n # Fix line numbers.\n for stmt in self.statements:\n set_location(stmt, assert_.lineno, assert_.col_offset)\n return self.statements",
"function _abstract_solr_filter_template(filters){\n\t\n\tvar allbuf = new Array();\n\tfor( var filter_key in filters ){\n\n\t var filter_val = filters[filter_key];\n\n\t // If the value looks like an array, iterate over it and\n\t // collect.\n\t if( filter_val &&\n\t\tfilter_val != null &&\n\t\ttypeof filter_val == 'object' &&\n\t\tfilter_val.length ){\n\n\t\t for( var i = 0; i < filter_val.length; i++ ){\n\t\t\tvar minibuffer = new Array();\n\t\t\tvar try_val = filter_val[i];\n\t\t\tif( typeof(try_val) != 'undefined' &&\n\t\t\ttry_val != '' ){\n\t\t\t minibuffer.push('fq=');\n\t\t\t minibuffer.push(filter_key);\n\t\t\t minibuffer.push(':');\n\t\t\t minibuffer.push('\"');\n\t\t\t minibuffer.push(filter_val[i]);\n\t\t\t minibuffer.push('\"');\n\t\t\t allbuf.push(minibuffer.join(''));\n\t\t\t}\n\t\t }\t\t \n\t\t}else{\n\t\t var minibuf = new Array();\n\t\t if( typeof(filter_val) != 'undefined' &&\n\t\t\tfilter_val != '' ){\n\t\t\t minibuf.push('fq=');\n\t\t\t minibuf.push(filter_key);\n\t\t\t minibuf.push(':');\n\t\t\t minibuf.push('\"');\n\t\t\t minibuf.push(filter_val);\n\t\t\t minibuf.push('\"');\n\t\t\t allbuf.push(minibuf.join(''));\n\t\t\t}\n\t\t}\n\t}\n\treturn allbuf.join('&');\n }",
"def get_waveform_filter_length_in_time(approximant, template=None, **kwargs):\n \"\"\"For filter templates, return the length in time of the template.\n \"\"\"\n kwargs = props(template, **kwargs)\n\n if approximant in _filter_time_lengths:\n return _filter_time_lengths[approximant](**kwargs)\n else:\n return None",
"def pre_filter(self):\n \"\"\" Return rTorrent condition to speed up data transfer.\n \"\"\"\n if self._name not in self.PRE_FILTER_FIELDS or self._template:\n return ''\n if not self._value:\n return '\"equal={},cat=\"'.format(self.PRE_FILTER_FIELDS[self._name])\n\n if self._is_regex:\n needle = self._value[1:-1]\n needle = self.CLEAN_PRE_VAL_RE.sub(' ', needle)\n needle = self.SPLIT_PRE_VAL_RE.split(needle)\n else:\n needle = self.CLEAN_PRE_VAL_RE.sub(' ', self._value)\n needle = self.SPLIT_PRE_GLOB_RE.split(needle)\n needle = list(sorted(needle, key=len))[-1]\n\n if needle:\n try:\n needle.encode('ascii')\n except UnicodeEncodeError:\n return ''\n else:\n return r'\"string.contains_i=${},\\\"{}\\\"\"'.format(\n self.PRE_FILTER_FIELDS[self._name], needle.replace('\"', r'\\\\\\\"'))\n\n return ''",
"def _escape_filterargs(self, filterargs):\n \"\"\"\n Escapes values in filterargs.\n\n filterargs is a value suitable for Django's string formatting operator\n (%), which means it's either a tuple or a dict. This return a new tuple\n or dict with all values escaped for use in filter strings.\n\n \"\"\"\n if isinstance(filterargs, tuple):\n filterargs = tuple(self.ldap.filter.escape_filter_chars(value)\n for value in filterargs)\n elif isinstance(filterargs, dict):\n filterargs = dict((key, self.ldap.filter.escape_filter_chars(value))\n for key, value in filterargs.items())\n else:\n raise TypeError(\"filterargs must be a tuple or dict.\")\n\n return filterargs",
"def asserts(self, fn, msg_or_fn):\n ''' Assert that prepared values satisfy given conditions.\n\n Assertions are intended in enforce conditions beyond simple value\n type validation. For instance, this method can be use to assert that\n the columns of a ``ColumnDataSource`` all collectively have the same\n length at all times.\n\n Args:\n fn (callable) :\n A function accepting ``(obj, value)`` that returns True if the value\n passes the assertion, or False otherwise.\n\n msg_or_fn (str or callable) :\n A message to print in case the assertion fails, or a function\n accepting ``(obj, name, value)`` to call in in case the assertion\n fails.\n\n Returns:\n self\n\n '''\n self.assertions.append((fn, msg_or_fn))\n return self",
"def filter(name, # pylint: disable=redefined-builtin\n filter_name,\n filter_options=None,\n terms=None,\n prepend=True,\n pillar_key='acl',\n pillarenv=None,\n saltenv=None,\n merge_pillar=False,\n only_lower_merge=False,\n revision_id=None,\n revision_no=None,\n revision_date=True,\n revision_date_format='%Y/%m/%d',\n test=False,\n commit=True,\n debug=False):\n '''\n Generate and load the configuration of a policy filter.\n\n filter_name\n The name of the policy filter.\n\n filter_options\n Additional filter options. These options are platform-specific.\n See the complete list of options_.\n\n .. _options: https://github.com/google/capirca/wiki/Policy-format#header-section\n\n terms\n Dictionary of terms for this policy filter.\n If not specified or empty, will try to load the configuration from the pillar,\n unless ``merge_pillar`` is set as ``False``.\n\n prepend: ``True``\n When ``merge_pillar`` is set as ``True``, the final list of terms generated by merging\n the terms from ``terms`` with those defined in the pillar (if any): new terms are prepended\n at the beginning, while existing ones will preserve the position. To add the new terms\n at the end of the list, set this argument to ``False``.\n\n pillar_key: ``acl``\n The key in the pillar containing the default attributes values. Default: ``acl``.\n\n pillarenv\n Query the master to generate fresh pillar data on the fly,\n specifically from the requested pillar environment.\n\n saltenv\n Included only for compatibility with\n :conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.\n\n merge_pillar: ``False``\n Merge ``terms`` with the corresponding value from the pillar. Default: ``False``.\n\n .. note::\n By default this state does not merge, to avoid any unexpected behaviours.\n\n The merge logic depends on the ``prepend`` argument.\n\n The terms specified through the ``terms`` argument have higher priority\n than the pillar.\n\n only_lower_merge: ``False``\n Specify if it should merge only the terms fields. Otherwise it will try\n to merge also filters fields. Default: ``False``.\n This option requires ``merge_pillar``, otherwise it is ignored.\n\n revision_id\n Add a comment in the filter config having the description for the changes applied.\n\n revision_no\n The revision count.\n\n revision_date: ``True``\n Boolean flag: display the date when the filter configuration was generated. Default: ``True``.\n\n revision_date_format: ``%Y/%m/%d``\n The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).\n\n test: ``False``\n Dry run? If set as ``True``, will apply the config, discard and return the changes.\n Default: ``False`` and will commit the changes on the device.\n\n commit: ``True``\n Commit? Default: ``True``.\n\n debug: ``False``\n Debug mode. Will insert a new key under the output dictionary,\n as ``loaded_config`` containing the raw configuration loaded on the device.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'edge01.flw01' state.sls router.acl test=True\n\n Output Example:\n\n .. code-block:: text\n\n edge01.flw01:\n ----------\n ID: my-filter\n Function: netacl.filter\n Result: None\n Comment: Testing mode: Configuration discarded.\n Started: 12:24:40.598232\n Duration: 2437.139 ms\n Changes:\n ----------\n diff:\n ---\n +++\n @@ -1228,9 +1228,24 @@\n !\n +ipv4 access-list my-filter\n + 10 remark $Id: my-filter_state $\n + 20 remark $Revision: 5 $\n + 30 remark my-other-term\n + 40 permit tcp any range 5678 5680 any\n +!\n +!\n loaded:\n ! $Id: my-filter_state $\n ! $Revision: 5 $\n no ipv6 access-list my-filter\n ipv6 access-list my-filter\n remark $Id: my-filter_state $\n remark $Revision: 5 $\n remark my-other-term\n permit tcp any range 5678 5680 any\n exit\n\n Summary for edge01.flw01\n ------------\n Succeeded: 1 (unchanged=1, changed=1)\n Failed: 0\n ------------\n Total states run: 1\n Total run time: 2.437 s\n\n Pillar example:\n\n .. code-block:: yaml\n\n acl:\n - my-filter:\n options:\n - inet6\n terms:\n - my-term:\n source_port: [1234, 1235]\n protocol:\n - tcp\n - udp\n source_address: 1.2.3.4\n action: reject\n - my-other-term:\n source_port:\n - [5678, 5680]\n protocol: tcp\n action: accept\n\n State SLS Example:\n\n .. code-block:: jinja\n\n {%- set filter_name = 'my-filter' -%}\n {%- set my_filter_cfg = salt.netacl.get_filter_pillar(filter_name, pillar_key='firewall') -%}\n my_first_filter_state:\n netacl.filter:\n - filter_name: {{ filter_name }}\n - options: {{ my_filter_cfg['options'] | json }}\n - terms: {{ my_filter_cfg['terms'] | json }}\n - revision_date: false\n - revision_no: 5\n - debug: true\n\n Or:\n\n .. code-block:: yaml\n\n my_first_filter_state:\n netacl.filter:\n - filter_name: my-filter\n - merge_pillar: true\n - pillar_key: firewall\n - revision_date: false\n - revision_no: 5\n - debug: true\n\n In the example above, as ``inet6`` has been specified in the ``filter_options``,\n the configuration chunk referring to ``my-term`` has been ignored as it referred to\n IPv4 only (from ``source_address`` field).\n\n .. note::\n The first method allows the user to eventually apply complex manipulation\n and / or retrieve the data from external services before passing the\n data to the state. The second one is more straightforward, for less\n complex cases when loading the data directly from the pillar is sufficient.\n\n .. note::\n When passing retrieved pillar data into the state file, it is strongly\n recommended to use the json serializer explicitly (`` | json``),\n instead of relying on the default Python serializer.\n '''\n ret = salt.utils.napalm.default_ret(name)\n test = __opts__['test'] or test\n if not filter_options:\n filter_options = []\n if not terms:\n terms = []\n loaded = __salt__['netacl.load_filter_config'](filter_name,\n filter_options=filter_options,\n terms=terms,\n prepend=prepend,\n pillar_key=pillar_key,\n pillarenv=pillarenv,\n saltenv=saltenv,\n merge_pillar=merge_pillar,\n only_lower_merge=only_lower_merge,\n revision_id=revision_id if revision_id else name,\n revision_no=revision_no,\n revision_date=revision_date,\n revision_date_format=revision_date_format,\n test=test,\n commit=commit,\n debug=debug)\n return salt.utils.napalm.loaded_ret(ret, loaded, test, debug)"
] |
[
0.6595491170883179,
0.6499298214912415,
0.6482378840446472,
0.6475902199745178,
0.6449537873268127,
0.6423252820968628,
0.6325455904006958,
0.632215678691864,
0.6271587014198303,
0.6241557598114014,
0.6220623850822449,
0.6182300448417664
] |
Args:
checker_cls: Class performing the check to be passed back to
flake8.
|
def to_flake8(self, checker_cls: type) -> Flake8Error:
"""
Args:
checker_cls: Class performing the check to be passed back to
flake8.
"""
return Flake8Error(
line_number=self.line_number,
offset=self.offset,
text=self.text,
checker_cls=checker_cls,
)
|
[
"def _register_pyflakes_check():\n \"\"\"Register the pyFlakes checker into PEP8 set of checks.\"\"\"\n from flake8_isort import Flake8Isort\n from flake8_blind_except import check_blind_except\n\n # Resolving conflicts between pep8 and pyflakes.\n codes = {\n \"UnusedImport\": \"F401\",\n \"ImportShadowedByLoopVar\": \"F402\",\n \"ImportStarUsed\": \"F403\",\n \"LateFutureImport\": \"F404\",\n \"Redefined\": \"F801\",\n \"RedefinedInListComp\": \"F812\",\n \"UndefinedName\": \"F821\",\n \"UndefinedExport\": \"F822\",\n \"UndefinedLocal\": \"F823\",\n \"DuplicateArgument\": \"F831\",\n \"UnusedVariable\": \"F841\",\n }\n\n for name, obj in vars(pyflakes.messages).items():\n if name[0].isupper() and obj.message:\n obj.tpl = \"{0} {1}\".format(codes.get(name, \"F999\"), obj.message)\n\n pep8.register_check(_PyFlakesChecker, codes=['F'])\n # FIXME parser hack\n parser = pep8.get_parser('', '')\n Flake8Isort.add_options(parser)\n options, args = parser.parse_args([])\n # end of hack\n pep8.register_check(Flake8Isort, codes=['I'])\n pep8.register_check(check_blind_except, codes=['B90'])",
"def load_all_available_checkers(cls):\n \"\"\"\n Helper method to retrieve all sub checker classes derived from various\n base classes.\n \"\"\"\n for x in working_set.iter_entry_points('compliance_checker.suites'):\n try:\n xl = x.resolve()\n cls.checkers[':'.join((xl._cc_spec, xl._cc_spec_version))] = xl\n # TODO: remove this once all checkers move over to the new\n # _cc_spec, _cc_spec_version\n except AttributeError:\n # if there are versioned classes, it will get overwritten by the\n # latest version later. If there are not, it will be assigned\n # the checker as the main class\n # TODO: nix name attribute in plugins. Keeping in for now\n # to provide backwards compatibility\n cls.checkers[getattr(xl, 'name', None) or xl._cc_spec] = xl\n\n except Exception as e:\n print(\"Could not load\", x, \":\", e, file=sys.stderr)\n # find the latest version of versioned checkers and set that as the\n # default checker for compliance checker if no version is specified\n ver_checkers = sorted([c.split(':', 1) for c\n in cls.checkers if ':' in c])\n for spec, versions in itertools.groupby(ver_checkers, itemgetter(0)):\n version_nums = [v[-1] for v in versions]\n try:\n latest_version = str(max(StrictVersion(v) for v\n in version_nums))\n # if the version can't be parsed as a StrictVersion, parse\n # according to character collation\n except ValueError:\n latest_version = max(version_nums)\n cls.checkers[spec] = cls.checkers[spec + ':latest'] = \\\n cls.checkers[':'.join((spec, latest_version))]",
"def checker_from_dict(self, dct):\n \"\"\"Return a checker instance from a dict object.\"\"\"\n checker_identifier = list(dct.keys())[0]\n checker_class = self.get_checker(checker_identifier)\n if checker_class:\n return checker_class(**dct[checker_identifier])\n return None",
"async def self_check(cls):\n \"\"\"\n Check that the configuration is correct\n\n - Presence of \"token\" in the settings\n - Presence of \"BERNARD_BASE_URL\" in the global configuration\n \"\"\"\n\n # noinspection PyTypeChecker\n async for check in super(Telegram, cls).self_check():\n yield check\n\n s = cls.settings()\n\n try:\n assert isinstance(s['token'], str)\n except (KeyError, TypeError, AssertionError):\n yield HealthCheckFail(\n '00005',\n 'Missing \"token\" for Telegram platform. You can obtain one by'\n 'registering your bot in Telegram.',\n )\n\n if not hasattr(settings, 'BERNARD_BASE_URL'):\n yield HealthCheckFail(\n '00005',\n '\"BERNARD_BASE_URL\" cannot be found in the configuration. The'\n 'Telegram platform needs it because it uses it to '\n 'automatically register its hook.'\n )\n\n if not hasattr(settings, 'WEBVIEW_SECRET_KEY'):\n yield HealthCheckFail(\n '00005',\n '\"WEBVIEW_SECRET_KEY\" cannot be found in the configuration. '\n 'It is required in order to be able to create secure postback '\n 'URLs.'\n )",
"def is_correct(self):\n \"\"\"Check if this object configuration is correct ::\n\n * Check our own specific properties\n * Call our parent class is_correct checker\n\n :return: True if the configuration is correct, otherwise False\n :rtype: bool\n \"\"\"\n state = True\n\n # Internal checks before executing inherited function...\n if not hasattr(self, 'check_command'):\n msg = \"[checkmodulation::%s] do not have any check_command defined\" % (\n self.get_name()\n )\n self.add_error(msg)\n state = False\n else:\n if self.check_command is None:\n msg = \"[checkmodulation::%s] a check_command is missing\" % (self.get_name())\n self.add_error(msg)\n state = False\n if self.check_command and not self.check_command.is_valid():\n msg = \"[checkmodulation::%s] a check_command is invalid\" % (self.get_name())\n self.add_error(msg)\n state = False\n\n # Ok just put None as check_period, means 24x7\n if not hasattr(self, 'check_period'):\n self.check_period = None\n\n return super(CheckModulation, self).is_correct() and state",
"def launch_check(self, timestamp, hosts, services, timeperiods,\n macromodulations, checkmodulations, checks, ref_check=None, force=False,\n dependent=False):\n # pylint: disable=too-many-locals, too-many-arguments\n # pylint: disable=too-many-branches, too-many-return-statements\n \"\"\"Launch a check (command)\n\n :param timestamp:\n :type timestamp: int\n :param checkmodulations: Checkmodulations objects, used to change check command if necessary\n :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations\n :param ref_check:\n :type ref_check:\n :param force:\n :type force: bool\n :param dependent:\n :type dependent: bool\n :return: None or alignak.check.Check\n :rtype: None | alignak.check.Check\n \"\"\"\n chk = None\n cls = self.__class__\n\n # Look if we are in check or not\n self.update_in_checking()\n\n # the check is being forced, so we just replace next_chk time by now\n if force and self.in_checking:\n try:\n c_in_progress = checks[self.checks_in_progress[0]]\n c_in_progress.t_to_go = time.time()\n return c_in_progress\n except KeyError:\n pass\n\n # If I'm already in checking, Why launch a new check?\n # If ref_check_id is not None , this is a dependency_ check\n # If none, it might be a forced check, so OK, I do a new\n\n # Dependency check, we have to create a new check that will be launched only once (now)\n # Otherwise it will delay the next real check. this can lead to an infinite SOFT state.\n if not force and (self.in_checking and ref_check is not None):\n\n c_in_progress = checks[self.checks_in_progress[0]]\n\n # c_in_progress has almost everything we need but we cant copy.deepcopy() it\n # we need another c.uuid\n data = {\n 'command': c_in_progress.command,\n 'timeout': c_in_progress.timeout,\n 'poller_tag': c_in_progress.poller_tag,\n 'env': c_in_progress.env,\n 'module_type': c_in_progress.module_type,\n 't_to_go': timestamp,\n 'depend_on_me': [ref_check],\n 'ref': self.uuid,\n 'ref_type': self.my_type,\n 'dependency_check': True,\n 'internal': self.got_business_rule or c_in_progress.command.startswith('_')\n }\n chk = Check(data)\n\n self.actions.append(chk)\n\n if os.getenv('ALIGNAK_LOG_CHECKS', None):\n logger.info(\"--ALC-- -> added a check action for %s (%s)\",\n self.get_full_name(), chk.uuid)\n return chk\n\n if force or (not self.is_no_check_dependent(hosts, services, timeperiods)):\n if self.my_type == 'service' and not self.check_command:\n # This should never happen because of configuration check!\n logger.debug(\"Service check is for a service that has no check command (%s/%s), \"\n \"do not launch the check !\", self.host_name, self.service_description)\n return None\n\n if self.my_type == 'host' and not self.check_command:\n if self.active_checks_enabled:\n logger.debug(\"Host check is for an host that has no check command (%s), \"\n \"do not launch the check !\", self.host_name)\n return None\n\n # Fred : passive only checked host dependency\n if dependent and self.my_type == 'host' and \\\n self.passive_checks_enabled and not self.active_checks_enabled:\n logger.debug(\"Host check (dependent) is for an host that is only passively \"\n \"checked (%s), do not launch the check !\", self.host_name)\n return None\n\n # By default env is void\n env = {}\n poller_tag = u'None'\n module_type = None\n\n # By default we will use our default check_command\n self.last_check_command = None\n check_command = self.check_command\n command_line = ''\n if check_command:\n poller_tag = check_command.poller_tag\n module_type = check_command.module_type\n\n # But if a checkway is available, use this one instead.\n # Take the first available\n for chkmod_id in self.checkmodulations:\n chkmod = checkmodulations[chkmod_id]\n c_cw = chkmod.get_check_command(timeperiods, timestamp)\n if c_cw:\n check_command = c_cw\n break\n\n # Get the command to launch\n macroresolver = MacroResolver()\n data = self.get_data_for_checks(hosts)\n command_line = macroresolver.resolve_command(check_command, data,\n macromodulations, timeperiods)\n\n # remember it, for pure debugging purpose\n self.last_check_command = command_line\n\n # And get all environment variables only if needed\n if cls.enable_environment_macros or (check_command and\n check_command.enable_environment_macros):\n env = macroresolver.get_env_macros(data)\n\n # By default we take the global timeout, but we use the command one if it\n # is defined (default is -1 for no timeout)\n timeout = cls.check_timeout\n if check_command and check_command.timeout != -1:\n timeout = check_command.timeout\n\n # Make the Check object and put the service in checking\n # Make the check inherit poller_tag from the command\n # And reactionner_tag too\n data = {\n 'command': command_line,\n 'timeout': timeout,\n 'poller_tag': poller_tag,\n 'env': env,\n 'module_type': module_type,\n 't_to_go': timestamp,\n 'depend_on_me': [ref_check] if ref_check else [],\n 'ref': self.uuid,\n 'ref_type': self.my_type,\n 'internal': self.got_business_rule or command_line.startswith('_')\n }\n chk = Check(data)\n\n self.checks_in_progress.append(chk.uuid)\n\n self.update_in_checking()\n\n # We need to put this new check in our actions queue\n # so scheduler can take it\n if chk is not None:\n self.actions.append(chk)\n\n if os.getenv('ALIGNAK_LOG_CHECKS', None):\n logger.info(\"--ALC-- -> added a check action for %s (%s)\",\n self.get_full_name(), chk.uuid)\n return chk\n # None mean I already take it into account\n return None",
"def _get_valid_checkers(self, ds, checker_names):\n \"\"\"\n Returns a filtered list of 2-tuples: (name, valid checker) based on the ds object's type and\n the user selected names.\n \"\"\"\n\n assert len(self.checkers) > 0, \"No checkers could be found.\"\n\n if len(checker_names) == 0:\n checker_names = list(self.checkers.keys())\n\n args = [(name, self.checkers[name]) for name in checker_names if name in self.checkers]\n valid = []\n\n all_checked = set([a[1] for a in args]) # only class types\n checker_queue = set(args)\n while len(checker_queue):\n name, a = checker_queue.pop()\n # is the current dataset type in the supported filetypes\n # for the checker class?\n if type(ds) in a().supported_ds:\n valid.append((name, a))\n\n # add any subclasses of the checker class\n for subc in a.__subclasses__():\n if subc not in all_checked:\n all_checked.add(subc)\n checker_queue.add((name, subc))\n\n return valid",
"def check(self, check_req):\n \"\"\"Process a check_request.\n\n The req is first passed to the check_aggregator. If there is a valid\n cached response, that is returned, otherwise a response is obtained from\n the transport.\n\n Args:\n check_req (``ServicecontrolServicesCheckRequest``): to be sent to\n the service control service\n\n Returns:\n ``CheckResponse``: either the cached response if one is applicable\n or a response from making a transport request, or None if\n if the request to the transport fails\n\n \"\"\"\n\n self.start()\n res = self._check_aggregator.check(check_req)\n if res:\n _logger.debug(u'using cached check response for %s: %s',\n check_request, res)\n return res\n\n # Application code should not fail because check request's don't\n # complete, They should fail open, so here simply log the error and\n # return None to indicate that no response was obtained\n try:\n transport = self._create_transport()\n resp = transport.services.Check(check_req)\n self._check_aggregator.add_response(check_req, resp)\n return resp\n except exceptions.Error: # only sink apitools errors\n _logger.error(u'direct send of check request failed %s',\n check_request, exc_info=True)\n return None",
"def check(name, type=None, context=None, position=None):\n \"\"\"https://github.com/frictionlessdata/goodtables-py#custom-checks\n \"\"\"\n def decorator(func):\n registry.register_check(func, name, type, context, position)\n return func\n return decorator",
"def _do_check(self, obj, check_module, check_str):\n '''Run a check function on obj'''\n opts = self._config['options']\n if check_str in opts:\n fargs = opts[check_str]\n if isinstance(fargs, list):\n out = check_wrapper(getattr(check_module, check_str))(obj, *fargs)\n else:\n out = check_wrapper(getattr(check_module, check_str))(obj, fargs)\n else:\n out = check_wrapper(getattr(check_module, check_str))(obj)\n\n try:\n if out.info:\n L.debug('%s: %d failing ids detected: %s',\n out.title, len(out.info), out.info)\n except TypeError: # pragma: no cover\n pass\n\n return out",
"def get_to_run_checks(self, do_checks=False, do_actions=False,\n poller_tags=None, reactionner_tags=None,\n worker_name='none', module_types=None):\n # pylint: disable=too-many-branches\n \"\"\"Get actions/checks for reactionner/poller\n\n Called by the poller to get checks (do_checks=True) and\n by the reactionner (do_actions=True) to get actions\n\n :param do_checks: do we get checks ?\n :type do_checks: bool\n :param do_actions: do we get actions ?\n :type do_actions: bool\n :param poller_tags: poller tags to filter\n :type poller_tags: list\n :param reactionner_tags: reactionner tags to filter\n :type reactionner_tags: list\n :param worker_name: worker name to fill check/action (to remember it)\n :type worker_name: str\n :param module_types: module type to filter\n :type module_types: list\n :return: Check/Action list with poller/reactionner tags matching and module type matching\n :rtype: list\n \"\"\"\n res = []\n now = time.time()\n\n if poller_tags is None:\n poller_tags = ['None']\n if reactionner_tags is None:\n reactionner_tags = ['None']\n if module_types is None:\n module_types = ['fork']\n if not isinstance(module_types, list):\n module_types = [module_types]\n\n # If a poller wants its checks\n if do_checks:\n if self.checks:\n logger.debug(\"I have %d prepared checks\", len(self.checks))\n\n for check in list(self.checks.values()):\n logger.debug(\"Check: %s (%s / %s)\", check.uuid, check.poller_tag, check.module_type)\n\n if check.internal:\n # Do not care about Alignak internally executed checks\n continue\n\n # If the command is untagged, and the poller too, or if both are tagged\n # with same name, go for it\n # if do_check, call for poller, and so poller_tags by default is ['None']\n # by default poller_tag is 'None' and poller_tags is ['None']\n # and same for module_type, the default is the 'fork' type\n if check.poller_tag not in poller_tags:\n logger.debug(\" -> poller tag do not match\")\n continue\n if check.module_type not in module_types:\n logger.debug(\" -> module type do not match\")\n continue\n\n logger.debug(\" -> : %s %s (%s)\",\n 'worker' if not check.internal else 'internal',\n check.status,\n 'now' if check.is_launchable(now) else 'not yet')\n if check._is_orphan and check.status == ACT_STATUS_SCHEDULED \\\n and os.getenv('ALIGNAK_LOG_CHECKS', None):\n logger.info(\"--ALC-- orphan check: %s -> : %s %s (%s)\",\n check, 'worker' if not check.internal else 'internal',\n check.status, 'now' if check.is_launchable(now) else 'not yet')\n\n # must be ok to launch, and not an internal one (business rules based)\n if check.status == ACT_STATUS_SCHEDULED and check.is_launchable(now):\n logger.debug(\"Check to run: %s\", check)\n check.status = ACT_STATUS_POLLED\n check.my_worker = worker_name\n res.append(check)\n\n # Stats\n self.nb_checks_launched += 1\n\n if 'ALIGNAK_LOG_ACTIONS' in os.environ:\n if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':\n logger.warning(\"Check to run: %s\", check)\n else:\n logger.info(\"Check to run: %s\", check)\n\n if res:\n logger.debug(\"-> %d checks to start now\", len(res))\n else:\n logger.debug(\"-> no checks to start now\")\n\n # If a reactionner wants its actions\n if do_actions:\n if self.actions:\n logger.debug(\"I have %d prepared actions\", len(self.actions))\n\n for action in list(self.actions.values()):\n logger.debug(\"Action: %s (%s / %s)\",\n action.uuid, action.reactionner_tag, action.module_type)\n\n if action.internal:\n # Do not care about Alignak internally executed checks\n continue\n\n is_master = (action.is_a == 'notification' and not action.contact)\n if is_master:\n continue\n\n # if do_action, call the reactionner,\n # and so reactionner_tags by default is ['None']\n # by default reactionner_tag is 'None' and reactionner_tags is ['None'] too\n # So if not the good one, loop for next :)\n if action.reactionner_tag not in reactionner_tags:\n logger.debug(\" -> reactionner tag do not match\")\n continue\n\n # same for module_type\n if action.module_type not in module_types:\n logger.debug(\" -> module type do not match\")\n continue\n\n # And now look if we can launch or not :)\n logger.debug(\" -> : worker %s (%s)\",\n action.status, 'now' if action.is_launchable(now) else 'not yet')\n if action._is_orphan and action.status == ACT_STATUS_SCHEDULED and \\\n os.getenv('ALIGNAK_LOG_CHECKS', None):\n logger.info(\"--ALC-- orphan action: %s\", action)\n\n if action.status == ACT_STATUS_SCHEDULED and action.is_launchable(now):\n # This is for child notifications and eventhandlers\n action.status = ACT_STATUS_POLLED\n action.my_worker = worker_name\n res.append(action)\n\n # Stats\n self.nb_actions_launched += 1\n\n if 'ALIGNAK_LOG_ACTIONS' in os.environ:\n if os.environ['ALIGNAK_LOG_ACTIONS'] == 'WARNING':\n logger.warning(\"Action to run: %s\", action)\n else:\n logger.info(\"Action to run: %s\", action)\n\n if res:\n logger.debug(\"-> %d actions to start now\", len(res))\n else:\n logger.debug(\"-> no actions to start now\")\n\n return res",
"async def health_check(cls) -> Iterator[HealthCheckFail]:\n \"\"\"\n Perform checks of the state itself. So far:\n\n - For each method of the class, check for the presence of a\n health_check() method. If the method is present then call it. This is\n used to allow the context decorator to make some checks on the\n structure of the class.\n \"\"\"\n\n for k, v in cls.__dict__.items():\n if hasattr(v, 'health_check') and callable(v.health_check):\n async for check in v.health_check(cls):\n yield check"
] |
[
0.7212687730789185,
0.7063631415367126,
0.6970942616462708,
0.6942752599716187,
0.6874454617500305,
0.6854748725891113,
0.6773062348365784,
0.6765158772468567,
0.672455906867981,
0.6719608306884766,
0.6686707735061646,
0.6638014316558838
] |
:param chassis: chassis object
|
def add_chassis(self, chassis):
"""
:param chassis: chassis object
"""
res = self._request(RestMethod.post, self.user_url, params={'ip': chassis.ip, 'port': chassis.port})
assert(res.status_code == 201)
|
[
"def add_chassis(self, chassis, port=22611, password='xena'):\n \"\"\" Add chassis.\n\n XenaManager-2G -> Add Chassis.\n\n :param chassis: chassis IP address\n :param port: chassis port number\n :param password: chassis password\n :return: newly created chassis\n :rtype: xenamanager.xena_app.XenaChassis\n \"\"\"\n\n if chassis not in self.chassis_list:\n try:\n XenaChassis(self, chassis, port, password)\n except Exception as error:\n self.objects.pop('{}/{}'.format(self.owner, chassis))\n raise error\n return self.chassis_list[chassis]",
"def chassis_info(self, chassis):\n \"\"\"Get information about the specified chassis.\"\"\"\n if not chassis or not isinstance(chassis, str):\n raise RuntimeError('missing chassis address')\n self._check_session()\n status, data = self._rest.get_request('chassis', chassis)\n return data",
"def get_chassis_location(host=None,\n admin_username=None,\n admin_password=None):\n '''\n Get the location of the chassis.\n\n host\n The chassis host.\n\n admin_username\n The username used to access the chassis.\n\n admin_password\n The password used to access the chassis.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' dracr.set_chassis_location host=111.222.333.444\n admin_username=root admin_password=secret\n\n '''\n return system_info(host=host,\n admin_username=admin_username,\n admin_password=admin_password)['Chassis Information']['Chassis Location']",
"def chassis(name, chassis_name=None, password=None, datacenter=None,\n location=None, mode=None, idrac_launch=None, slot_names=None,\n blade_power_states=None):\n '''\n Manage a Dell Chassis.\n\n chassis_name\n The name of the chassis.\n\n datacenter\n The datacenter in which the chassis is located\n\n location\n The location of the chassis.\n\n password\n Password for the chassis. Note: If this password is set for the chassis,\n the current implementation of this state will set this password both on\n the chassis and the iDrac passwords on any configured blades. If the\n password for the blades should be distinct, they should be set separately\n with the blade_idrac function.\n\n mode\n The management mode of the chassis. Viable options are:\n\n - 0: None\n - 1: Monitor\n - 2: Manage and Monitor\n\n idrac_launch\n The iDRAC launch method of the chassis. Viable options are:\n\n - 0: Disabled (launch iDRAC using IP address)\n - 1: Enabled (launch iDRAC using DNS name)\n\n slot_names\n The names of the slots, provided as a list identified by\n their slot numbers.\n\n blade_power_states\n The power states of a blade server, provided as a list and\n identified by their server numbers. Viable options are:\n\n - on: Ensure the blade server is powered on.\n - off: Ensure the blade server is powered off.\n - powercycle: Power cycle the blade server.\n\n Example:\n\n .. code-block:: yaml\n\n my-dell-chassis:\n dellchassis.chassis:\n - chassis_name: my-dell-chassis\n - location: my-location\n - datacenter: london\n - mode: 2\n - idrac_launch: 1\n - slot_names:\n - 1: my-slot-name\n - 2: my-other-slot-name\n - blade_power_states:\n - server-1: on\n - server-2: off\n - server-3: powercycle\n '''\n ret = {'name': chassis_name,\n 'chassis_name': chassis_name,\n 'result': True,\n 'changes': {},\n 'comment': ''}\n\n chassis_cmd = 'chassis.cmd'\n cfg_tuning = 'cfgRacTuning'\n mode_cmd = 'cfgRacTuneChassisMgmtAtServer'\n launch_cmd = 'cfgRacTuneIdracDNSLaunchEnable'\n\n inventory = __salt__[chassis_cmd]('inventory')\n\n if idrac_launch:\n idrac_launch = six.text_type(idrac_launch)\n\n current_name = __salt__[chassis_cmd]('get_chassis_name')\n if chassis_name != current_name:\n ret['changes'].update({'Name':\n {'Old': current_name,\n 'New': chassis_name}})\n\n current_dc = __salt__[chassis_cmd]('get_chassis_datacenter')\n if datacenter and datacenter != current_dc:\n ret['changes'].update({'Datacenter':\n {'Old': current_dc,\n 'New': datacenter}})\n\n if password:\n ret['changes'].update({'Password':\n {'Old': '******',\n 'New': '******'}})\n if location:\n current_location = __salt__[chassis_cmd]('get_chassis_location')\n if location != current_location:\n ret['changes'].update({'Location':\n {'Old': current_location,\n 'New': location}})\n if mode:\n current_mode = __salt__[chassis_cmd]('get_general', cfg_tuning, mode_cmd)\n if mode != current_mode:\n ret['changes'].update({'Management Mode':\n {'Old': current_mode,\n 'New': mode}})\n\n if idrac_launch:\n current_launch_method = __salt__[chassis_cmd]('get_general', cfg_tuning, launch_cmd)\n if idrac_launch != current_launch_method:\n ret['changes'].update({'iDrac Launch Method':\n {'Old': current_launch_method,\n 'New': idrac_launch}})\n\n if slot_names:\n current_slot_names = __salt__[chassis_cmd]('list_slotnames')\n for s in slot_names:\n key = s.keys()[0]\n new_name = s[key]\n if key.startswith('slot-'):\n key = key[5:]\n\n current_slot_name = current_slot_names.get(key).get('slotname')\n if current_slot_name != new_name:\n old = {key: current_slot_name}\n new = {key: new_name}\n if ret['changes'].get('Slot Names') is None:\n ret['changes'].update({'Slot Names':\n {'Old': {},\n 'New': {}}})\n ret['changes']['Slot Names']['Old'].update(old)\n ret['changes']['Slot Names']['New'].update(new)\n\n current_power_states = {}\n target_power_states = {}\n if blade_power_states:\n for b in blade_power_states:\n key = b.keys()[0]\n status = __salt__[chassis_cmd]('server_powerstatus', module=key)\n current_power_states[key] = status.get('status', -1)\n if b[key] == 'powerdown':\n if current_power_states[key] != -1 and current_power_states[key]:\n target_power_states[key] = 'powerdown'\n if b[key] == 'powerup':\n if current_power_states[key] != -1 and not current_power_states[key]:\n target_power_states[key] = 'powerup'\n if b[key] == 'powercycle':\n if current_power_states[key] != -1 and not current_power_states[key]:\n target_power_states[key] = 'powerup'\n if current_power_states[key] != -1 and current_power_states[key]:\n target_power_states[key] = 'powercycle'\n for k, v in six.iteritems(target_power_states):\n old = {k: current_power_states[k]}\n new = {k: v}\n if ret['changes'].get('Blade Power States') is None:\n ret['changes'].update({'Blade Power States':\n {'Old': {},\n 'New': {}}})\n ret['changes']['Blade Power States']['Old'].update(old)\n ret['changes']['Blade Power States']['New'].update(new)\n\n if ret['changes'] == {}:\n ret['comment'] = 'Dell chassis is already in the desired state.'\n return ret\n\n if __opts__['test']:\n ret['result'] = None\n ret['comment'] = 'Dell chassis configuration will change.'\n return ret\n\n # Finally, set the necessary configurations on the chassis.\n name = __salt__[chassis_cmd]('set_chassis_name', chassis_name)\n if location:\n location = __salt__[chassis_cmd]('set_chassis_location', location)\n pw_result = True\n if password:\n pw_single = True\n if __salt__[chassis_cmd]('change_password', username='root', uid=1,\n password=password):\n for blade in inventory['server']:\n pw_single = __salt__[chassis_cmd]('deploy_password',\n username='root',\n password=password,\n module=blade)\n if not pw_single:\n pw_result = False\n else:\n pw_result = False\n\n if datacenter:\n datacenter_result = __salt__[chassis_cmd]('set_chassis_datacenter',\n datacenter)\n if mode:\n mode = __salt__[chassis_cmd]('set_general', cfg_tuning, mode_cmd, mode)\n if idrac_launch:\n idrac_launch = __salt__[chassis_cmd]('set_general', cfg_tuning, launch_cmd, idrac_launch)\n if ret['changes'].get('Slot Names') is not None:\n slot_rets = []\n for s in slot_names:\n key = s.keys()[0]\n new_name = s[key]\n if key.startswith('slot-'):\n key = key[5:]\n slot_rets.append(__salt__[chassis_cmd]('set_slotname', key, new_name))\n\n if any(slot_rets) is False:\n slot_names = False\n else:\n slot_names = True\n\n powerchange_all_ok = True\n for k, v in six.iteritems(target_power_states):\n powerchange_ok = __salt__[chassis_cmd]('server_power', v, module=k)\n if not powerchange_ok:\n powerchange_all_ok = False\n\n if any([name, location, mode, idrac_launch,\n slot_names, powerchange_all_ok]) is False:\n ret['result'] = False\n ret['comment'] = 'There was an error setting the Dell chassis.'\n\n ret['comment'] = 'Dell chassis was updated.'\n return ret",
"def set_chassis(self, chassis):\n \"\"\"\n Sets the chassis.\n\n :param: chassis string:\n 1720, 1721, 1750, 1751 or 1760\n \"\"\"\n\n yield from self._hypervisor.send('c1700 set_chassis \"{name}\" {chassis}'.format(name=self._name, chassis=chassis))\n\n log.info('Router \"{name}\" [{id}]: chassis set to {chassis}'.format(name=self._name,\n id=self._id,\n chassis=chassis))\n\n self._chassis = chassis\n self._setup_chassis()",
"def add_chassis(self, chassis):\n \"\"\"\n :param ip: chassis object\n \"\"\"\n\n self.chassis_list[chassis] = XenaSocket(self.logger, chassis.ip, chassis.port)\n self.chassis_list[chassis].connect()\n KeepAliveThread(self.chassis_list[chassis]).start()\n self.send_command(chassis, 'c_logon', '\"{}\"'.format(chassis.password))\n self.send_command(chassis, 'c_owner', '\"{}\"'.format(chassis.owner))",
"def set_chassis_location(location,\n host=None,\n admin_username=None,\n admin_password=None):\n '''\n Set the location of the chassis.\n\n location\n The name of the location to be set on the chassis.\n\n host\n The chassis host.\n\n admin_username\n The username used to access the chassis.\n\n admin_password\n The password used to access the chassis.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' dracr.set_chassis_location location-name host=111.222.333.444\n admin_username=root admin_password=secret\n\n '''\n return __execute_cmd('setsysinfo -c chassislocation {0}'.format(location),\n host=host, admin_username=admin_username,\n admin_password=admin_password)",
"def chassis(self):\n \"\"\"Get list of chassis known to test session.\"\"\"\n self._check_session()\n status, data = self._rest.get_request('chassis')\n return data",
"def set_chassis_name(name,\n host=None,\n admin_username=None,\n admin_password=None):\n '''\n Set the name of the chassis.\n\n name\n The name to be set on the chassis.\n\n host\n The chassis host.\n\n admin_username\n The username used to access the chassis.\n\n admin_password\n The password used to access the chassis.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' dracr.set_chassis_name my-chassis host=111.222.333.444\n admin_username=root admin_password=secret\n\n '''\n return __execute_cmd('setsysinfo -c chassisname {0}'.format(name),\n host=host, admin_username=admin_username,\n admin_password=admin_password)",
"def add(self, chassis):\n \"\"\" add chassis.\n\n :param chassis: chassis IP address.\n \"\"\"\n\n self.chassis_chain[chassis] = IxeChassis(self.session, chassis, len(self.chassis_chain) + 1)\n self.chassis_chain[chassis].connect()",
"def get_chassis_datacenter(host=None,\n admin_username=None,\n admin_password=None):\n '''\n Get the datacenter of the chassis.\n\n host\n The chassis host.\n\n admin_username\n The username used to access the chassis.\n\n admin_password\n The password used to access the chassis.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' dracr.set_chassis_location host=111.222.333.444\n admin_username=root admin_password=secret\n\n '''\n return get_general('cfgLocation', 'cfgLocationDatacenter', host=host,\n admin_username=admin_username, admin_password=admin_password)",
"def get_chassis_name(host=None, admin_username=None, admin_password=None):\n '''\n Get the name of a chassis.\n\n host\n The chassis host.\n\n admin_username\n The username used to access the chassis.\n\n admin_password\n The password used to access the chassis.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' dracr.get_chassis_name host=111.222.333.444\n admin_username=root admin_password=secret\n\n '''\n return bare_rac_cmd('getchassisname', host=host,\n admin_username=admin_username,\n admin_password=admin_password)"
] |
[
0.790794312953949,
0.7876158356666565,
0.7736592292785645,
0.7695014476776123,
0.7676397562026978,
0.7613735198974609,
0.7605212330818176,
0.7515163421630859,
0.7454978823661804,
0.7447369694709778,
0.7427849769592285,
0.7379995584487915
] |
Send command with no output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
|
def send_command(self, obj, command, *arguments):
""" Send command with no output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
"""
self._perform_command('{}/{}'.format(self.session_url, obj.ref), command, OperReturnType.no_output, *arguments)
|
[
"def send_command(self, command, *arguments):\n \"\"\" Send command with no output.\n\n :param command: command to send.\n :param arguments: list of command arguments.\n \"\"\"\n self.api.send_command(self, command, *arguments)",
"def send_command_return(self, obj, command, *arguments):\n \"\"\" Send command with single line output.\n\n :param obj: requested object.\n :param command: command to send.\n :param arguments: list of command arguments.\n :return: command output.\n \"\"\"\n return self._perform_command('{}/{}'.format(self.session_url, obj.ref), command, OperReturnType.line_output,\n *arguments).json()",
"def send_command_return_multilines(self, obj, command, *arguments):\n \"\"\" Send command with no output.\n\n :param obj: requested object.\n :param command: command to send.\n :param arguments: list of command arguments.\n :return: list of command output lines.\n :rtype: list(str)\n \"\"\"\n return self._perform_command('{}/{}'.format(self.session_url, obj.ref), command,\n OperReturnType.multiline_output, *arguments).json()",
"def send_command(self, obj, command, *arguments):\n \"\"\" Send command and do not parse output (except for communication errors).\n\n :param obj: requested object.\n :param command: command to send.\n :param arguments: list of command arguments.\n \"\"\"\n index_command = obj._build_index_command(command, *arguments)\n self.chassis_list[obj.chassis].sendQueryVerify(index_command)",
"def __send_command(self, command, args=[]):\n '''Send a raw command.'''\n self.ws.send(json.dumps({\"op\": command, \"args\": args}))",
"def send_command(self, command, arg=None):\n \"\"\"Sends a command to the device.\n\n Args:\n command: The command to send.\n arg: Optional argument to the command.\n \"\"\"\n if arg is not None:\n command = '%s:%s' % (command, arg)\n self._write(six.StringIO(command), len(command))",
"def SendCommand(self, command, arg=None):\n \"\"\"Sends a command to the device.\n\n Args:\n command: The command to send.\n arg: Optional argument to the command.\n \"\"\"\n if arg is not None:\n if not isinstance(arg, bytes):\n arg = arg.encode('utf8')\n command = b'%s:%s' % (command, arg)\n\n self._Write(io.BytesIO(command), len(command))",
"def set_attributes(self, obj, **attributes):\n \"\"\" Set attributes.\n\n :param obj: requested object.\n :param attributes: dictionary of {attribute: value} to set\n \"\"\"\n for attribute, value in attributes.items():\n self.send_command(obj, attribute, value)",
"def send_command(self, command, args=None):\n \"\"\"\n Send a command to VNDB and then get the result.\n\n :param command: What command are we sending\n :param args: What are the json args for this command\n :return: Servers Response\n :rtype: Dictionary (See D11 docs on VNDB)\n \"\"\"\n if args:\n if isinstance(args, str):\n final_command = command + ' ' + args + '\\x04'\n else:\n # We just let ujson propogate the error here if it can't parse the arguments\n final_command = command + ' ' + ujson.dumps(args) + '\\x04'\n else:\n final_command = command + '\\x04'\n self.sslwrap.sendall(final_command.encode('utf-8'))\n return self._recv_data()",
"async def send_command(self, command, args, validator, timeout=10.0):\n \"\"\"Send a command and synchronously wait for a single response.\n\n Args:\n command (string): The command name\n args (dict): Optional arguments.\n validator (Verifier): A SchemaVerifier to verify the response\n payload.\n timeout (float): The maximum time to wait for a response.\n Defaults to 10 seconds.\n\n Returns:\n dict: The response payload\n\n Raises:\n ExternalError: If the server is not connected or the command\n fails.\n asyncio.TimeoutError: If the command times out.\n ValidationError: If the response payload does not match the\n given validator.\n \"\"\"\n\n if self._con is None:\n raise ExternalError(\"No websock connection established\")\n\n cmd_uuid = str(uuid.uuid4())\n msg = dict(type='command', operation=command, uuid=cmd_uuid,\n payload=args)\n\n packed = pack(msg)\n\n # Note: register future before sending to avoid race conditions\n response_future = self._manager.wait_for(type=\"response\", uuid=cmd_uuid,\n timeout=timeout)\n\n await self._con.send(packed)\n\n response = await response_future\n\n if response.get('success') is False:\n self._raise_error(command, response)\n\n if validator is None:\n return response.get('payload')\n\n return validator.verify(response.get('payload'))",
"def send_command(self, cmd, sudo=False, stderr=None, stdout=None):\n '''send command is a non interactive version of run_command, meaning\n that we execute the command and return the return value, but don't\n attempt to stream any content (text from the screen) back to the\n user. This is useful for commands interacting with OCI bundles.\n\n Parameters\n ==========\n cmd: the list of commands to send to the terminal\n sudo: use sudo (or not)\n '''\n \n if sudo is True:\n cmd = ['sudo'] + cmd\n\n process = subprocess.Popen(cmd, stderr=stderr, stdout=stdout)\n result = process.communicate()\n return result",
"def send_command_return(self, obj, command, *arguments):\n \"\"\" Send command and wait for single line output. \"\"\"\n index_command = obj._build_index_command(command, *arguments)\n return obj._extract_return(command, self.chassis_list[obj.chassis].sendQuery(index_command))"
] |
[
0.8184460997581482,
0.807597815990448,
0.783747136592865,
0.7724220156669617,
0.7577327489852905,
0.7346464395523071,
0.7282400727272034,
0.7163318395614624,
0.7157878875732422,
0.7135836482048035,
0.7042279839515686,
0.7005967497825623
] |
Send command with single line output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: command output.
|
def send_command_return(self, obj, command, *arguments):
""" Send command with single line output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: command output.
"""
return self._perform_command('{}/{}'.format(self.session_url, obj.ref), command, OperReturnType.line_output,
*arguments).json()
|
[
"def send_command_return_multilines(self, obj, command, *arguments):\n \"\"\" Send command with no output.\n\n :param obj: requested object.\n :param command: command to send.\n :param arguments: list of command arguments.\n :return: list of command output lines.\n :rtype: list(str)\n \"\"\"\n return self._perform_command('{}/{}'.format(self.session_url, obj.ref), command,\n OperReturnType.multiline_output, *arguments).json()",
"def send_command(self, obj, command, *arguments):\n \"\"\" Send command with no output.\n\n :param obj: requested object.\n :param command: command to send.\n :param arguments: list of command arguments.\n \"\"\"\n self._perform_command('{}/{}'.format(self.session_url, obj.ref), command, OperReturnType.no_output, *arguments)",
"def send_command(self, command, *arguments):\n \"\"\" Send command with no output.\n\n :param command: command to send.\n :param arguments: list of command arguments.\n \"\"\"\n self.api.send_command(self, command, *arguments)",
"def send_command_return(self, command, *arguments):\n \"\"\" Send command and wait for single line output. \"\"\"\n return self.api.send_command_return(self, command, *arguments)",
"def send_command(self, obj, command, *arguments):\n \"\"\" Send command and do not parse output (except for communication errors).\n\n :param obj: requested object.\n :param command: command to send.\n :param arguments: list of command arguments.\n \"\"\"\n index_command = obj._build_index_command(command, *arguments)\n self.chassis_list[obj.chassis].sendQueryVerify(index_command)",
"def __send_command(self, command, args=[]):\n '''Send a raw command.'''\n self.ws.send(json.dumps({\"op\": command, \"args\": args}))",
"def send_command_return(self, obj, command, *arguments):\n \"\"\" Send command and wait for single line output. \"\"\"\n index_command = obj._build_index_command(command, *arguments)\n return obj._extract_return(command, self.chassis_list[obj.chassis].sendQuery(index_command))",
"def send_command(self, command, arg=None):\n \"\"\"Sends a command to the device.\n\n Args:\n command: The command to send.\n arg: Optional argument to the command.\n \"\"\"\n if arg is not None:\n command = '%s:%s' % (command, arg)\n self._write(six.StringIO(command), len(command))",
"def sendline(self, cmd):\n '''\n Send this command to the server and\n return a tuple of the output and the stderr.\n\n The format for parameters is:\n\n cmd (string): The command to send to the sever.\n '''\n self.conn.sendline(cmd, self.linesep)\n\n # saw_prompt = False\n ret_stdout = []\n ret_stderr = []\n while self.conn.has_unread_data:\n stdout, stderr = self.conn.recv()\n\n if stdout:\n ret_stdout.append(stdout)\n if stderr:\n log.debug('Error while executing command.')\n ret_stderr.append(stderr)\n\n if stdout and self.prompt_re.search(stdout):\n break\n\n return ''.join(ret_stdout), ''.join(ret_stderr)",
"def _send(self, command):\n \"\"\"\n Sends a raw line to the server.\n\n :param command: line to send.\n :type command: unicode\n \"\"\"\n command = command.encode('utf-8')\n log.debug('>> ' + command)\n self.conn.oqueue.put(command)",
"def SendCommand(self, command, arg=None):\n \"\"\"Sends a command to the device.\n\n Args:\n command: The command to send.\n arg: Optional argument to the command.\n \"\"\"\n if arg is not None:\n if not isinstance(arg, bytes):\n arg = arg.encode('utf8')\n command = b'%s:%s' % (command, arg)\n\n self._Write(io.BytesIO(command), len(command))",
"def send_command(self, command, args=None):\n \"\"\"\n Send a command to VNDB and then get the result.\n\n :param command: What command are we sending\n :param args: What are the json args for this command\n :return: Servers Response\n :rtype: Dictionary (See D11 docs on VNDB)\n \"\"\"\n if args:\n if isinstance(args, str):\n final_command = command + ' ' + args + '\\x04'\n else:\n # We just let ujson propogate the error here if it can't parse the arguments\n final_command = command + ' ' + ujson.dumps(args) + '\\x04'\n else:\n final_command = command + '\\x04'\n self.sslwrap.sendall(final_command.encode('utf-8'))\n return self._recv_data()"
] |
[
0.8330472707748413,
0.8227455615997314,
0.7587301135063171,
0.7569341659545898,
0.745957612991333,
0.7302963137626648,
0.7235915064811707,
0.7208687663078308,
0.7167407274246216,
0.7095765471458435,
0.7067358493804932,
0.7044870257377625
] |
Send command with no output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: list of command output lines.
:rtype: list(str)
|
def send_command_return_multilines(self, obj, command, *arguments):
""" Send command with no output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: list of command output lines.
:rtype: list(str)
"""
return self._perform_command('{}/{}'.format(self.session_url, obj.ref), command,
OperReturnType.multiline_output, *arguments).json()
|
[
"def send_command(self, obj, command, *arguments):\n \"\"\" Send command with no output.\n\n :param obj: requested object.\n :param command: command to send.\n :param arguments: list of command arguments.\n \"\"\"\n self._perform_command('{}/{}'.format(self.session_url, obj.ref), command, OperReturnType.no_output, *arguments)",
"def send_command_return(self, obj, command, *arguments):\n \"\"\" Send command with single line output.\n\n :param obj: requested object.\n :param command: command to send.\n :param arguments: list of command arguments.\n :return: command output.\n \"\"\"\n return self._perform_command('{}/{}'.format(self.session_url, obj.ref), command, OperReturnType.line_output,\n *arguments).json()",
"def send_command(self, command, *arguments):\n \"\"\" Send command with no output.\n\n :param command: command to send.\n :param arguments: list of command arguments.\n \"\"\"\n self.api.send_command(self, command, *arguments)",
"def send_command(self, obj, command, *arguments):\n \"\"\" Send command and do not parse output (except for communication errors).\n\n :param obj: requested object.\n :param command: command to send.\n :param arguments: list of command arguments.\n \"\"\"\n index_command = obj._build_index_command(command, *arguments)\n self.chassis_list[obj.chassis].sendQueryVerify(index_command)",
"def sendline(self, cmd):\n '''\n Send this command to the server and\n return a tuple of the output and the stderr.\n\n The format for parameters is:\n\n cmd (string): The command to send to the sever.\n '''\n self.conn.sendline(cmd, self.linesep)\n\n # saw_prompt = False\n ret_stdout = []\n ret_stderr = []\n while self.conn.has_unread_data:\n stdout, stderr = self.conn.recv()\n\n if stdout:\n ret_stdout.append(stdout)\n if stderr:\n log.debug('Error while executing command.')\n ret_stderr.append(stderr)\n\n if stdout and self.prompt_re.search(stdout):\n break\n\n return ''.join(ret_stdout), ''.join(ret_stderr)",
"def send_command_return(self, obj, command, *arguments):\n \"\"\" Send command and wait for single line output. \"\"\"\n index_command = obj._build_index_command(command, *arguments)\n return obj._extract_return(command, self.chassis_list[obj.chassis].sendQuery(index_command))",
"def __send_command(self, command, args=[]):\n '''Send a raw command.'''\n self.ws.send(json.dumps({\"op\": command, \"args\": args}))",
"def send_command_return_multilines(self, obj, command, *arguments):\n \"\"\" Send command and wait for multiple lines output. \"\"\"\n index_command = obj._build_index_command(command, *arguments)\n return self.chassis_list[obj.chassis].sendQuery(index_command, True)",
"def send_command(self, command, args=None):\n \"\"\"\n Send a command to VNDB and then get the result.\n\n :param command: What command are we sending\n :param args: What are the json args for this command\n :return: Servers Response\n :rtype: Dictionary (See D11 docs on VNDB)\n \"\"\"\n if args:\n if isinstance(args, str):\n final_command = command + ' ' + args + '\\x04'\n else:\n # We just let ujson propogate the error here if it can't parse the arguments\n final_command = command + ' ' + ujson.dumps(args) + '\\x04'\n else:\n final_command = command + '\\x04'\n self.sslwrap.sendall(final_command.encode('utf-8'))\n return self._recv_data()",
"def send_command_return(self, command, *arguments):\n \"\"\" Send command and wait for single line output. \"\"\"\n return self.api.send_command_return(self, command, *arguments)",
"def send_command(self, cmd, sudo=False, stderr=None, stdout=None):\n '''send command is a non interactive version of run_command, meaning\n that we execute the command and return the return value, but don't\n attempt to stream any content (text from the screen) back to the\n user. This is useful for commands interacting with OCI bundles.\n\n Parameters\n ==========\n cmd: the list of commands to send to the terminal\n sudo: use sudo (or not)\n '''\n \n if sudo is True:\n cmd = ['sudo'] + cmd\n\n process = subprocess.Popen(cmd, stderr=stderr, stdout=stdout)\n result = process.communicate()\n return result",
"def send_and_get_output(self,\n\t send,\n\t timeout=None,\n\t retry=3,\n\t strip=True,\n\t preserve_newline=False,\n\t note=None,\n\t record_command=True,\n\t echo=None,\n\t fail_on_empty_before=True,\n\t check_sudo=True,\n\t nonewline=False,\n\t ignore_background=False,\n\t filter_backspaces=True,\n\t loglevel=logging.INFO):\n\t\t\"\"\"Returns the output of a command run. send() is called, and exit is not checked.\n\n\t\t@param send: See send()\n\t\t@param retry: Number of times to retry command (default 3)\n\t\t@param strip: Whether to strip output (defaults to True). Strips whitespace\n\t\t and ansi terminal codes\n\t\t@param note: See send()\n\t\t@param echo: See send()\n\n\t\t@type retry: integer\n\t\t@type strip: boolean\n\t\t\"\"\"\n\t\tshutit = self.shutit\n\t\tshutit.handle_note(note, command=str(send))\n\t\tshutit.log('Retrieving output from command: ' + send, level=loglevel)\n\t\t# Don't check exit, as that will pollute the output. Also, it's quite likely the submitted command is intended to fail.\n\t\techo = shutit.get_echo_override(echo)\n\t\tsend = shutit.get_send_command(send)\n\t\tself.send(ShutItSendSpec(self,\n\t\t send=send,\n\t\t check_exit=False,\n\t\t retry=retry,\n\t\t echo=echo,\n\t\t timeout=timeout,\n\t\t record_command=record_command,\n\t\t fail_on_empty_before=fail_on_empty_before,\n\t\t check_sudo=check_sudo,\n\t\t nonewline=nonewline,\n\t\t loglevel=loglevel,\n\t\t ignore_background=ignore_background))\n\t\tbefore = self.pexpect_child.before\n\n\t\tif before:\n\t\t\tpreserve_newline = bool(preserve_newline and before[-1] == '\\n')\n\t\t# CORNER CASE: if the terminal is eg a kubernetes one, it seems to use\n\t\t# backspaces (\\x08)s to manage newlines. So remove any line from before\n\t\t# if it has backspaces in the output\n\t\tif filter_backspaces:\n\t\t\tlines = before.split('\\n')\n\t\t\tlines = filter(lambda x: x.find('\\x08') == -1, lines)\n\t\t\tbefore = '\\n'.join(lines)\n\t\t# Remove the command we ran in from the output.\n\t\t# First, strip whitespace from the start of 'before', and the send:\n\t\tbefore = before.strip()\n\t\tsend = send.strip()\n\t\tshutit.log('send_and_get_output \"before\": ' + before + ', send_and_get_output send was: ' + send, level=logging.DEBUG)\n\t\tif strip:\n\t\t\t# cf: http://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python\n\t\t\tansi_escape = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -\\/]*[@-~]')\n\t\t\tstring_with_termcodes = before.strip()\n\t\t\tstring_without_termcodes = ansi_escape.sub('', string_with_termcodes)\n\t\t\t#string_without_termcodes_stripped = string_without_termcodes.strip()\n\n\t\t\t# Occasionally we see ' \\r' without a following \\n. Remove these. This could be optional.\n\t\t\tstring_without_termcodes_stripped_no_cr = string_without_termcodes.replace(' \\r','')\n\t\t\t# Strip out \\rs to make it output the same as a typical CL. This could be optional.\n\t\t\tstring_without_termcodes_stripped_no_cr = string_without_termcodes.replace('\\r','')\n\t\t\tif preserve_newline:\n\t\t\t\tbefore = string_without_termcodes_stripped_no_cr + '\\n'\n\t\t\telse:\n\t\t\t\tbefore = string_without_termcodes_stripped_no_cr\n\t\telse:\n\t\t\tbefore = before\n\t\tif before.startswith(send):\n\t\t\tbefore = before[len(send):]\n\t\t\t# Strip whitespace again\n\t\t\tbefore = before.strip()\n\t\tshutit.log('send_and_get_output \"before\" after startswith check: ' + before, level=logging.DEBUG)\n\t\t# Too chatty, but kept here in case useful for debugging\n\t\tshutit.log('send_and_get_output got: ' + before, level=logging.DEBUG)\n\t\t# Leave this debug in in case there are any strange characters to consider.\n\t\tif shutit_global.shutit_global_object.ispy3:\n\t\t\tshutit.log('send_and_get_output returning in base64:\\n' + str(base64.b64encode(bytes(before,shutit_global.shutit_global_object.default_encoding))), level=logging.DEBUGV)\n\t\telse:\n\t\t\tshutit.log('send_and_get_output returning in base64:\\n' + base64.b64encode(before), level=logging.DEBUGV)\n\t\t## In rare cases a bell has been seen - can't see why we'd want a bell so simply remove them all.\n\t\tbefore = before.replace('\\x07','')\n\t\t# If there happens to be an escape character in there, it's likely a\n\t\t# problem - see IWT-4812.\n\t\tbefore = before.split('\\x1b')[0].strip()\n\t\tif shutit_global.shutit_global_object.ispy3:\n\t\t\tshutit.log('send_and_get_output returning in base64: ' + str(base64.b64encode(bytes(before,shutit_global.shutit_global_object.default_encoding))), level=logging.DEBUGV)\n\t\telse:\n\t\t\tshutit.log('send_and_get_output returning in base64: ' + base64.b64encode(bytes(before)), level=logging.DEBUGV)\n\t\tshutit.handle_note_after(note=note)\n\t\treturn before"
] |
[
0.813307523727417,
0.7961030602455139,
0.7554588317871094,
0.7277511358261108,
0.7130541205406189,
0.7076394557952881,
0.6972492337226868,
0.695501446723938,
0.690433144569397,
0.6891587972640991,
0.6845901608467102,
0.6802817583084106
] |
Get all object's attributes.
Sends multi-parameter info/config queries and returns the result as dictionary.
:param obj: requested object.
:returns: dictionary of <name, value> of all attributes returned by the query.
:rtype: dict of (str, str)
|
def get_attributes(self, obj):
""" Get all object's attributes.
Sends multi-parameter info/config queries and returns the result as dictionary.
:param obj: requested object.
:returns: dictionary of <name, value> of all attributes returned by the query.
:rtype: dict of (str, str)
"""
return self._get_attributes('{}/{}'.format(self.session_url, obj.ref))
|
[
"def get_attributes(self, obj):\n \"\"\" Get all object's attributes.\n\n Sends multi-parameter info/config queries and returns the result as dictionary.\n\n :param obj: requested object.\n :returns: dictionary of <name, value> of all attributes returned by the query.\n :rtype: dict of (str, str)\n \"\"\"\n\n attributes = {}\n for info_config_command in obj.info_config_commands:\n index_commands_values = self.send_command_return_multilines(obj, info_config_command, '?')\n # poor implementation...\n li = obj._get_index_len()\n ci = obj._get_command_len()\n for index_command_value in index_commands_values:\n command = index_command_value.split()[ci].lower()\n if len(index_command_value.split()) > li + 1:\n value = ' '.join(index_command_value.split()[li+1:]).replace('\"', '')\n else:\n value = ''\n attributes[command] = value\n return attributes",
"def get_obj_attrs(obj):\n \"\"\" Return a dictionary built from the attributes of the given object.\n \"\"\"\n pr = {}\n if obj is not None:\n if isinstance(obj, numpy.core.records.record):\n for name in obj.dtype.names:\n pr[name] = getattr(obj, name)\n elif hasattr(obj, '__dict__') and obj.__dict__:\n pr = obj.__dict__\n elif hasattr(obj, '__slots__'):\n for slot in obj.__slots__:\n if hasattr(obj, slot):\n pr[slot] = getattr(obj, slot)\n elif isinstance(obj, dict):\n pr = obj.copy()\n else:\n for name in dir(obj):\n try:\n value = getattr(obj, name)\n if not name.startswith('__') and not inspect.ismethod(value):\n pr[name] = value\n except:\n continue\n\n return pr",
"def get_attributes(obj):\n \"\"\"\n the json objects look like this:\n {\n \"objType\": {\n \"attributes\": {\n ...\n }\n }\n It always has the attributes nested below the object type\n This helper provides a way of getting at the attributes\n \"\"\"\n if not obj or type(obj) is not dict:\n return {}\n\n keys = list(obj.keys())\n if len(keys) > 0:\n key = keys[0]\n else:\n return {}\n key_obj = obj.get(key, {})\n if type(key_obj) is not dict:\n # if the object is not a dict\n # it is probably already scoped to attributes\n return obj\n if key != \"attributes\":\n attrs = key_obj.get('attributes')\n if type(attrs) is not dict:\n # if the attributes doesn't exist,\n # it is probably already scoped to attributes\n return obj\n else:\n # if the attributes exist, we return the value, except if it's not a dict type\n attrs = key_obj\n if type(attrs) is not dict:\n return obj\n\n return attrs",
"def get_pk_attrnames(obj) -> List[str]:\n \"\"\"\n Asks an SQLAlchemy ORM object: \"what are your primary key(s)?\"\n\n Args:\n obj: SQLAlchemy ORM object\n\n Returns:\n list of attribute names of primary-key columns\n\n \"\"\"\n return [attrname\n for attrname, column in gen_columns(obj)\n if column.primary_key]",
"def _getattrs(self, obj, *attrs):\r\n \"\"\" Return dictionary of given attrs on given object, if they exist,\r\n processing through _filter_value().\r\n \"\"\"\r\n filtered_attrs = {}\r\n for attr in attrs:\r\n if hasattr(obj, attr):\r\n filtered_attrs[attr] = obj_to_string(\r\n self._filter_value(getattr(obj, attr))\r\n )\r\n return filtered_attrs",
"def get_object_attrs(obj):\n \"\"\"\n Get the attributes of an object using dir.\n\n This filters protected attributes\n \"\"\"\n attrs = [k for k in dir(obj) if not k.startswith('__')]\n if not attrs:\n attrs = dir(obj)\n return attrs",
"def obj_python_attrs(msg_):\n \"\"\"iterate object attributes for stringify purposes\n \"\"\"\n\n # a special case for namedtuple which seems widely used in\n # ofp parser implementations.\n if hasattr(msg_, '_fields'):\n for k in msg_._fields:\n yield(k, getattr(msg_, k))\n return\n base = getattr(msg_, '_base_attributes', [])\n opt = getattr(msg_, '_opt_attributes', [])\n for k, v in inspect.getmembers(msg_):\n if k in opt:\n pass\n elif k.startswith('_'):\n continue\n elif callable(v):\n continue\n elif k in base:\n continue\n elif hasattr(msg_.__class__, k):\n continue\n yield (k, v)",
"def get_dictionary_representation_of_object_attributes(obj, omit_null_fields=False):\n \"\"\"Returns a dictionary of object's attributes, ignoring methods\n\n @param obj: The object to represent as dict\n @param omit_null_fields: If true, will not include fields in the dictionary that are null\n @return: Dictionary of the object's attributes\n \"\"\"\n obj_dictionary = obj.__dict__\n\n obj_dictionary_temp = obj_dictionary.copy()\n for k, v in obj_dictionary.iteritems():\n if omit_null_fields:\n if v is None:\n obj_dictionary_temp.pop(k, None)\n if k.startswith('_'):\n obj_dictionary_temp.pop(k, None)\n\n return obj_dictionary_temp",
"def get_attrs(obj):\n \"\"\"Helper for dir2 implementation.\"\"\"\n if not hasattr(obj, '__dict__'):\n return [] # slots only\n proxy_type = types.MappingProxyType if six.PY3 else types.DictProxyType\n if not isinstance(obj.__dict__, (dict, proxy_type)):\n print(type(obj.__dict__), obj)\n raise TypeError(\"%s.__dict__ is not a dictionary\" % obj.__name__)\n return obj.__dict__.keys()",
"def format_object_attrs(obj):\n \"\"\"\n Return a list of tuples of the (attr, formatted_value)\n for common attrs, including dtype, name, length\n\n Parameters\n ----------\n obj : object\n must be iterable\n\n Returns\n -------\n list\n\n \"\"\"\n attrs = []\n if hasattr(obj, 'dtype'):\n attrs.append(('dtype', \"'{}'\".format(obj.dtype)))\n if getattr(obj, 'name', None) is not None:\n attrs.append(('name', default_pprint(obj.name)))\n max_seq_items = get_option('display.max_seq_items') or len(obj)\n if len(obj) > max_seq_items:\n attrs.append(('length', len(obj)))\n return attrs",
"def get_object_data(obj):\n \"\"\"Get object schema data\n\n NOTE: We RAM cache this data because it should only change when the object\n was modified!\n\n XXX: We need to set at least the modification date when we set fields in\n Ajax Listing when we take a snapshot there!\n\n :param obj: Content object\n :returns: Dictionary of extracted schema data\n \"\"\"\n\n model = SuperModel(obj)\n try:\n data = model.to_dict()\n except Exception as exc:\n logger.error(\"Failed to get schema data for {}: {}\"\n .format(repr(obj), str(exc)))\n data = {}\n\n return data",
"def getEncodableAttributes(self, obj, codec=None):\n \"\"\"\n Must return a C{dict} of attributes to be encoded, even if its empty.\n\n @param codec: An optional argument that will contain the encoder\n instance calling this function.\n @since: 0.5\n \"\"\"\n if not self._compiled:\n self.compile()\n\n if self.is_dict:\n return dict(obj)\n\n if self.shortcut_encode and self.dynamic:\n return obj.__dict__.copy()\n\n attrs = {}\n\n if self.static_attrs:\n for attr in self.static_attrs:\n attrs[attr] = getattr(obj, attr, pyamf.Undefined)\n\n if not self.dynamic:\n if self.non_static_encodable_properties:\n for attr in self.non_static_encodable_properties:\n attrs[attr] = getattr(obj, attr)\n\n return attrs\n\n dynamic_props = util.get_properties(obj)\n\n if not self.shortcut_encode:\n dynamic_props = set(dynamic_props)\n\n if self.encodable_properties:\n dynamic_props.update(self.encodable_properties)\n\n if self.static_attrs:\n dynamic_props.difference_update(self.static_attrs)\n\n if self.exclude_attrs:\n dynamic_props.difference_update(self.exclude_attrs)\n\n for attr in dynamic_props:\n attrs[attr] = getattr(obj, attr)\n\n if self.proxy_attrs is not None and attrs and codec:\n context = codec.context\n\n for k, v in attrs.copy().iteritems():\n if k in self.proxy_attrs:\n attrs[k] = context.getProxyForObject(v)\n\n if self.synonym_attrs:\n missing = object()\n\n for k, v in self.synonym_attrs.iteritems():\n value = attrs.pop(k, missing)\n\n if value is missing:\n continue\n\n attrs[v] = value\n\n return attrs"
] |
[
0.8867597579956055,
0.743111789226532,
0.739020824432373,
0.7151464223861694,
0.7121495008468628,
0.7109082341194153,
0.7049886584281921,
0.701708972454071,
0.7006149888038635,
0.6914050579071045,
0.6905038952827454,
0.6904515624046326
] |
Set attributes.
:param obj: requested object.
:param attributes: dictionary of {attribute: value} to set
|
def set_attributes(self, obj, **attributes):
""" Set attributes.
:param obj: requested object.
:param attributes: dictionary of {attribute: value} to set
"""
attributes_url = '{}/{}/attributes'.format(self.session_url, obj.ref)
attributes_list = [{u'name': str(name), u'value': str(value)} for name, value in attributes.items()]
self._request(RestMethod.patch, attributes_url, headers={'Content-Type': 'application/json'},
data=json.dumps(attributes_list))
|
[
"def set_attributes(self, obj, **attributes):\n \"\"\" Set attributes.\n\n :param obj: requested object.\n :param attributes: dictionary of {attribute: value} to set\n \"\"\"\n for attribute, value in attributes.items():\n self.send_command(obj, attribute, value)",
"def set_attribute(self, obj, attr, value):\n \"\"\"Set value of attribute in given object instance.\n\n Reason for existence of this method is the fact that 'attribute' can\n be also a object's key if it is a dict or any other kind of mapping.\n\n Args:\n obj (object): object instance to modify\n attr (str): attribute (or key) to change\n value: value to set\n\n \"\"\"\n # if this is any mutable mapping then instead of attributes use keys\n if isinstance(obj, MutableMapping):\n obj[attr] = value\n else:\n setattr(obj, attr, value)",
"def update_dict(obj, dict, attributes):\n \"\"\"Update dict with fields from obj.attributes.\n\n :param obj: the object updated into dict\n :param dict: the result dictionary\n :param attributes: a list of attributes belonging to obj\n \"\"\"\n for attribute in attributes:\n if hasattr(obj, attribute) and getattr(obj, attribute) is not None:\n dict[attribute] = getattr(obj, attribute)",
"def populate_obj(obj, attrs):\n \"\"\"Populates an object's attributes using the provided dict\n \"\"\"\n for k, v in attrs.iteritems():\n setattr(obj, k, v)",
"def set_value(obj, attrs, value, _copy=False):\n \"\"\"\n Allows set the same value to a list of attributes \n \"\"\"\n for attr in attrs:\n if _copy:\n value = copy(value)\n setattr(obj, attr, value)",
"def alter_object(self, obj):\n \"\"\"\n Alters all the attributes in an individual object.\n\n If it returns False, the object will not be saved\n \"\"\"\n for attname, field, replacer in self.replacers:\n currentval = getattr(obj, attname)\n replacement = replacer(self, obj, field, currentval)\n setattr(obj, attname, replacement)",
"def _init_obj_attrs(self, obj, user=False):\n \"\"\"\n Initialize obj attributes.\n Args:\n obj(object): A python object to set attributes to.\n user(bool): If this object is a user object mangle attribute names.\n \"\"\"\n for attr in obj.__class__._tx_attrs.values():\n\n if user:\n # Mangle name to prvent name clashing\n attr_name = \"_txa_%s\" % attr.name\n else:\n attr_name = attr.name\n\n if attr.mult in [MULT_ZEROORMORE, MULT_ONEORMORE]:\n # list\n setattr(obj, attr_name, [])\n elif attr.cls.__name__ in BASE_TYPE_NAMES:\n # Instantiate base python type\n if self.auto_init_attributes:\n setattr(obj, attr_name,\n python_type(attr.cls.__name__)())\n else:\n # See https://github.com/textX/textX/issues/11\n if attr.bool_assignment:\n # Only ?= assignments shall have default\n # value of False.\n setattr(obj, attr_name, False)\n else:\n # Set base type attribute to None initially\n # in order to be able to detect if an optional\n # values are given in the model. Default values\n # can be specified using object processors.\n setattr(obj, attr_name, None)\n else:\n # Reference to other obj\n setattr(obj, attr_name, None)",
"def set(self, obj, value):\n \"\"\"Set value for obj's attribute.\n\n :param obj: Result object or dict to assign the attribute to.\n :param value: Value to be assigned.\n \"\"\"\n assert self.setter is not None, \"Setter accessor is not specified.\"\n if callable(self.setter):\n return self.setter(obj, value)\n\n assert isinstance(self.setter, string_types), \"Accessor must be a function or a dot-separated string.\"\n\n def _set(obj, attr, value):\n if isinstance(obj, dict):\n obj[attr] = value\n else:\n setattr(obj, attr, value)\n return value\n\n path = self.setter.split(\".\")\n for attr in path[:-1]:\n obj = _set(obj, attr, {})\n\n _set(obj, path[-1], value)",
"def set_attributes(obj, additional_data):\n \"\"\"\n Given an object and a dictionary, give the object new attributes from that dictionary.\n\n Uses _strip_column_name to git rid of whitespace/uppercase/special characters.\n \"\"\"\n for key, value in additional_data.items():\n if hasattr(obj, key):\n raise ValueError(\"Key %s in additional_data already exists in this object\" % key)\n setattr(obj, _strip_column_name(key), value)",
"def set_attrs(obj, attrs):\n \"\"\"\n Applies a collection of attributes C{attrs} to object C{obj} in the most\n generic way possible.\n\n @param obj: An instance implementing C{__setattr__}, or C{__setitem__}\n @param attrs: A collection implementing the C{iteritems} function\n @type attrs: Usually a dict\n \"\"\"\n o = setattr\n\n if hasattr(obj, '__setitem__'):\n o = type(obj).__setitem__\n\n [o(obj, k, v) for k, v in attrs.iteritems()]",
"def set_attributes(self, attr_obj=None, ns_uri=None, **attr_dict):\n \"\"\"\n Add or update this element's attributes, where attributes can be\n specified in a number of ways.\n\n :param attr_obj: a dictionary or list of attribute name/value pairs.\n :type attr_obj: dict, list, tuple, or None\n :param ns_uri: a URI defining a namespace for the new attributes.\n :type ns_uri: string or None\n :param dict attr_dict: attribute name and values specified as keyword\n arguments.\n \"\"\"\n self._set_element_attributes(self.impl_node,\n attr_obj=attr_obj, ns_uri=ns_uri, **attr_dict)",
"def set(self, **kwargs):\n \"\"\"\n Set properties\n \"\"\"\n for name, value in kwargs.items():\n if hasattr(self, name):\n setattr(self, name, value)\n else:\n raise AttributeError(\n \"{!r} object has no attribute {}\".format(\n self.__class__.__name__,\n name))"
] |
[
0.8498634696006775,
0.7663450241088867,
0.7494417428970337,
0.7439797520637512,
0.7383324503898621,
0.7321034073829651,
0.7318034768104553,
0.7291602492332458,
0.7285177111625671,
0.7255296111106873,
0.7216994762420654,
0.7197322845458984
] |
Send CLI command that returns list of integer counters.
:param obj: requested object.
:param stat_name: statistics command name.
:return: list of counters.
:rtype: list(int)
|
def get_stats(self, obj, stat_name):
""" Send CLI command that returns list of integer counters.
:param obj: requested object.
:param stat_name: statistics command name.
:return: list of counters.
:rtype: list(int)
"""
return [int(v) for v in self.send_command_return(obj, stat_name, '?').split()]
|
[
"def get_stats(self, obj, stat_name):\n \"\"\" Send CLI command that returns list of integer counters.\n\n :param obj: requested object.\n :param stat_name: statistics command name.\n :return: list of counters.\n :rtype: list(int)\n \"\"\"\n return [int(v) for v in self.get_attribute(obj, stat_name).split()]",
"def get_all_counters(obj, instance_list=None):\n '''\n Get the values for all counters available to a Counter object\n\n Args:\n\n obj (str):\n The name of the counter object. You can get a list of valid names\n using the ``list_objects`` function\n\n instance_list (list):\n A list of instances to return. Use this to narrow down the counters\n that are returned.\n\n .. note::\n ``_Total`` is returned as ``*``\n '''\n counters, instances_avail = win32pdh.EnumObjectItems(None, None, obj, -1, 0)\n\n if instance_list is None:\n instance_list = instances_avail\n\n if not isinstance(instance_list, list):\n instance_list = [instance_list]\n\n counter_list = []\n for counter in counters:\n for instance in instance_list:\n instance = '*' if instance.lower() == '_total' else instance\n counter_list.append((obj, instance, counter))\n else: # pylint: disable=useless-else-on-loop\n counter_list.append((obj, None, counter))\n\n return get_counters(counter_list) if counter_list else {}",
"def build_counter_list(counter_list):\n r'''\n Create a list of Counter objects to be used in the pdh query\n\n Args:\n counter_list (list):\n A list of tuples containing counter information. Each tuple should\n contain the object, instance, and counter name. For example, to\n get the ``% Processor Time`` counter for all Processors on the\n system (``\\Processor(*)\\% Processor Time``) you would pass a tuple\n like this:\n\n ```\n counter_list = [('Processor', '*', '% Processor Time')]\n ```\n\n If there is no ``instance`` for the counter, pass ``None``\n\n Multiple counters can be passed like so:\n\n ```\n counter_list = [('Processor', '*', '% Processor Time'),\n ('System', None, 'Context Switches/sec')]\n ```\n\n .. note::\n Invalid counters are ignored\n\n Returns:\n list: A list of Counter objects\n '''\n counters = []\n index = 0\n for obj, instance, counter_name in counter_list:\n try:\n counter = Counter.build_counter(obj, instance, index, counter_name)\n index += 1\n counters.append(counter)\n except CommandExecutionError as exc:\n # Not a valid counter\n log.debug(exc.strerror)\n continue\n return counters",
"def get_stat(self, obj_name, stat_name):\n \"\"\"\n :param obj_name: requested object name.\n :param stat_name: requested statistics name.\n :return: str, the value of the requested statics for the requested object.\n \"\"\"\n\n return self.statistics[obj_name][self.captions.index(stat_name)]",
"def build_counter(obj, instance, instance_index, counter):\n r'''\n Makes a fully resolved counter path. Counter names are formatted like\n this:\n\n ``\\Processor(*)\\% Processor Time``\n\n The above breaks down like this:\n\n obj = 'Processor'\n instance = '*'\n counter = '% Processor Time'\n\n Args:\n\n obj (str):\n The top level object\n\n instance (str):\n The instance of the object\n\n instance_index (int):\n The index of the instance. Can usually be 0\n\n counter (str):\n The name of the counter\n\n Returns:\n Counter: A Counter object with the path if valid\n\n Raises:\n CommandExecutionError: If the path is invalid\n '''\n path = win32pdh.MakeCounterPath(\n (None, obj, instance, None, instance_index, counter), 0)\n if win32pdh.ValidatePath(path) is 0:\n return Counter(path, obj, instance, instance_index, counter)\n raise CommandExecutionError('Invalid counter specified: {0}'.format(path))",
"def get_counter(self, name=None):\n '''Shortcut for getting a :class:`~statsd.counter.Counter` instance\n\n :keyword name: See :func:`~statsd.client.Client.get_client`\n :type name: str\n '''\n return self.get_client(name=name, class_=statsd.Counter)",
"def get_stats(self, stat_name):\n \"\"\"\n :param stat_name: requested statistics name.\n :returns: all values of the requested statistic for all objects.\n \"\"\"\n\n return [self.get_stat(r, stat_name) for r in self.statistics.keys()]",
"def get_counters(counter_list):\n '''\n Get the values for the passes list of counters\n\n Args:\n counter_list (list):\n A list of counters to lookup\n\n Returns:\n dict: A dictionary of counters and their values\n '''\n if not isinstance(counter_list, list):\n raise CommandExecutionError('counter_list must be a list of tuples')\n\n try:\n # Start a Query instances\n query = win32pdh.OpenQuery()\n\n # Build the counters\n counters = build_counter_list(counter_list)\n\n # Add counters to the Query\n for counter in counters:\n counter.add_to_query(query)\n\n # https://docs.microsoft.com/en-us/windows/desktop/perfctrs/collecting-performance-data\n win32pdh.CollectQueryData(query)\n # The sleep here is required for counters that require more than 1\n # reading\n time.sleep(1)\n win32pdh.CollectQueryData(query)\n ret = {}\n\n for counter in counters:\n try:\n ret.update({counter.path: counter.value()})\n except pywintypes.error as exc:\n if exc.strerror == 'No data to return.':\n # Some counters are not active and will throw an error if\n # there is no data to return\n continue\n else:\n raise\n\n finally:\n win32pdh.CloseQuery(query)\n\n return ret",
"def get_object_stats(self, obj_name):\n \"\"\"\n :param obj_name: requested object name\n :returns: all statistics values for the requested object.\n \"\"\"\n\n return dict(zip(self.captions, self.statistics[obj_name]))",
"def info_count(i: int, n: int, *rest: Token, **kwargs: Any) -> None:\n \"\"\" Display a counter before the rest of the message.\n\n ``rest`` and ``kwargs`` are passed to :func:`info`\n\n Current index should start at 0 and end at ``n-1``, like in ``enumerate()``\n\n :param i: current index\n :param n: total number of items\n \"\"\"\n num_digits = len(str(n))\n counter_format = \"(%{}d/%d)\".format(num_digits)\n counter_str = counter_format % (i + 1, n)\n info(green, \"*\", reset, counter_str, reset, *rest, **kwargs)",
"def network_io_counters(interface=None):\n '''\n Return network I/O statistics.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' ps.network_io_counters\n\n salt '*' ps.network_io_counters interface=eth0\n '''\n if not interface:\n return dict(psutil.net_io_counters()._asdict())\n else:\n stats = psutil.net_io_counters(pernic=True)\n if interface in stats:\n return dict(stats[interface]._asdict())\n else:\n return False",
"def _count_objs(self, obj, path=None, **kwargs):\n \"\"\"\n cycles through the object and adds in count values\n\n Args:\n -----\n obj: the object to parse\n path: the current path\n\n kwargs:\n -------\n current: a dictionary of counts for current call\n sub_val: the value to use for subtotal aggregation\n\n \"\"\"\n sub_val = None\n # pdb.set_trace()\n if isinstance(obj, dict):\n for key, value in obj.items():\n if isinstance(value, (list, dict)):\n kwargs = self._count_objs(value,\n self.make_path(key, path),\n **kwargs)\n else:\n if self.make_path(key, path) == self.sub_total:\n # pdb.set_trace()\n sub_val = value\n kwargs['current'] = self._increment_prop(key,\n path,\n **kwargs)\n elif isinstance(obj, list):\n for item in obj:\n if isinstance(item, (list, dict)):\n kwargs = self._count_objs(item, path, **kwargs)\n else:\n if path == self.sub_total:\n pdb.set_trace()\n sub_val = item\n kwargs['current'] = self._increment_prop(path, **kwargs)\n else:\n kwargs['current'] = self._increment_prop(path, **kwargs)\n if path == self.sub_total:\n pdb.set_trace()\n sub_val = item\n if kwargs.get('sub_val') is None:\n kwargs['sub_val'] = sub_val\n return kwargs"
] |
[
0.8586172461509705,
0.7317438721656799,
0.666500985622406,
0.6614857316017151,
0.6577393412590027,
0.6479098796844482,
0.643774688243866,
0.6431843042373657,
0.6400884985923767,
0.6392818093299866,
0.6388030648231506,
0.6357133388519287
] |
Initialize common service,
When 'zone_name' is defined " at $zone_name" is added to service names
:param bool with_cloud_account:
:param str zone_name:
:return: OR tuple(Workflow, Vault), OR tuple(Workflow, Vault, CloudAccount) with services
|
def init_common_services(self, with_cloud_account=True, zone_name=None):
"""
Initialize common service,
When 'zone_name' is defined " at $zone_name" is added to service names
:param bool with_cloud_account:
:param str zone_name:
:return: OR tuple(Workflow, Vault), OR tuple(Workflow, Vault, CloudAccount) with services
"""
zone_names = ZoneConstants(zone_name)
type_to_app = lambda t: self.organization.applications[system_application_types.get(t, t)]
wf_service = self.organization.service(name=zone_names.DEFAULT_WORKFLOW_SERVICE,
application=type_to_app(WORKFLOW_SERVICE_TYPE),
environment=self)
key_service = self.organization.service(name=zone_names.DEFAULT_CREDENTIAL_SERVICE,
application=type_to_app(COBALT_SECURE_STORE_TYPE),
environment=self)
assert wf_service.running()
assert key_service.running()
if not with_cloud_account:
with self as env:
env.add_service(wf_service, force=True)
env.add_service(key_service, force=True)
return wf_service, key_service
cloud_account_service = self.organization.instance(name=zone_names.DEFAULT_CLOUD_ACCOUNT_SERVICE,
application=type_to_app(CLOUD_ACCOUNT_TYPE),
environment=self,
parameters=PROVIDER_CONFIG,
destroyInterval=0)
# Imidiate adding to env cause CA not to drop destroy interval. Known issue 6132. So, add service as instance with
# destroyInterval set to 'never'
assert cloud_account_service.running()
with self as env:
env.add_service(wf_service, force=True)
env.add_service(key_service, force=True)
env.add_service(cloud_account_service, force=True)
return wf_service, key_service, cloud_account_service
|
[
"def present(name, service_name, auth=None, **kwargs):\n '''\n Ensure an endpoint exists and is up-to-date\n\n name\n Interface name\n\n url\n URL of the endpoint\n\n service_name\n Service name or ID\n\n region\n The region name to assign the endpoint\n\n enabled\n Boolean to control if endpoint is enabled\n '''\n ret = {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': ''}\n\n kwargs = __utils__['args.clean_kwargs'](**kwargs)\n\n __salt__['keystoneng.setup_clouds'](auth)\n\n success, val = _, endpoint = _common(ret, name, service_name, kwargs)\n if not success:\n return val\n\n if not endpoint:\n if __opts__['test'] is True:\n ret['result'] = None\n ret['changes'] = kwargs\n ret['comment'] = 'Endpoint will be created.'\n return ret\n\n # NOTE(SamYaple): Endpoints are returned as a list which can contain\n # several items depending on the options passed\n endpoints = __salt__['keystoneng.endpoint_create'](**kwargs)\n if len(endpoints) == 1:\n ret['changes'] = endpoints[0]\n else:\n for i, endpoint in enumerate(endpoints):\n ret['changes'][i] = endpoint\n ret['comment'] = 'Created endpoint'\n return ret\n\n changes = __salt__['keystoneng.compare_changes'](endpoint, **kwargs)\n if changes:\n if __opts__['test'] is True:\n ret['result'] = None\n ret['changes'] = changes\n ret['comment'] = 'Endpoint will be updated.'\n return ret\n\n kwargs['endpoint_id'] = endpoint.id\n __salt__['keystoneng.endpoint_update'](**kwargs)\n ret['changes'].update(changes)\n ret['comment'] = 'Updated endpoint'\n\n return ret",
"def init(self, access_key=None, secret_key=None):\n \"\"\"\n Mimics wizard's environment preparation\n \"\"\"\n if not access_key and not secret_key:\n self._router.post_init(org_id=self.organizationId, data='{\"initCloudAccount\": true}')\n else:\n self._router.post_init(org_id=self.organizationId, data='{}')\n ca_data = dict(accessKey=access_key, secretKey=secret_key)\n self._router.post_init_custom_cloud_account(org_id=self.organizationId, data=json.dumps(ca_data))",
"def create_primary_zone(self, account_name, zone_name):\n \"\"\"Creates a new primary zone.\n\n Arguments:\n account_name -- The name of the account that will contain this zone.\n zone_name -- The name of the zone. It must be unique.\n\n \"\"\"\n zone_properties = {\"name\": zone_name, \"accountName\": account_name, \"type\": \"PRIMARY\"}\n primary_zone_info = {\"forceImport\": True, \"createType\": \"NEW\"}\n zone_data = {\"properties\": zone_properties, \"primaryCreateInfo\": primary_zone_info}\n return self.rest_api_connection.post(\"/v1/zones\", json.dumps(zone_data))",
"def full_zone_set(self, user_key, zone_name):\n \"\"\"\n Create new zone and all subdomains for user associated with this\n user_key.\n\n :param user_key: The unique 3auth string,identifying the user's\n CloudFlare Account. Generated from a user_create or user_auth\n :type user_key: str\n :param zone_name: The zone you'd like to run CNAMES through CloudFlare for, e.g. \"example.com\".\n :type zone_name: str\n\n :returns:\n :rtype: dict\n \"\"\"\n params = {\n 'act': 'full_zone_set',\n 'user_key': user_key,\n 'zone_name': zone_name,\n }\n return self._request(params)",
"def create_primary_zone_by_upload(self, account_name, zone_name, bind_file):\n \"\"\"Creates a new primary zone by uploading a bind file\n\n Arguments:\n account_name -- The name of the account that will contain this zone.\n zone_name -- The name of the zone. It must be unique.\n bind_file -- The file to upload.\n\n \"\"\"\n zone_properties = {\"name\": zone_name, \"accountName\": account_name, \"type\": \"PRIMARY\"}\n primary_zone_info = {\"forceImport\": True, \"createType\": \"UPLOAD\"}\n zone_data = {\"properties\": zone_properties, \"primaryCreateInfo\": primary_zone_info}\n files = {'zone': ('', json.dumps(zone_data), 'application/json'),\n 'file': ('file', open(bind_file, 'rb'), 'application/octet-stream')}\n return self.rest_api_connection.post_multi_part(\"/v1/zones\", files)",
"def bluemix(cls, vcap_services, instance_name=None, service_name=None, **kwargs):\n \"\"\"\n Create a Cloudant session using a VCAP_SERVICES environment variable.\n\n :param vcap_services: VCAP_SERVICES environment variable\n :type vcap_services: dict or str\n :param str instance_name: Optional Bluemix instance name. Only required\n if multiple Cloudant instances are available.\n :param str service_name: Optional Bluemix service name.\n\n Example usage:\n\n .. code-block:: python\n\n import os\n from cloudant.client import Cloudant\n\n client = Cloudant.bluemix(os.getenv('VCAP_SERVICES'),\n 'Cloudant NoSQL DB')\n\n print client.all_dbs()\n \"\"\"\n service_name = service_name or 'cloudantNoSQLDB' # default service\n try:\n service = CloudFoundryService(vcap_services,\n instance_name=instance_name,\n service_name=service_name)\n except CloudantException:\n raise CloudantClientException(103)\n\n if hasattr(service, 'iam_api_key'):\n return Cloudant.iam(service.username,\n service.iam_api_key,\n url=service.url,\n **kwargs)\n return Cloudant(service.username,\n service.password,\n url=service.url,\n **kwargs)",
"def zone_set(self, user_key, zone_name, resolve_to, subdomains):\n \"\"\"\n Create new zone for user associated with this user_key.\n\n :param user_key: The unique 3auth string,identifying the user's\n CloudFlare Account. Generated from a user_create or user_auth\n :type user_key: str\n :param zone_name: The zone you'd like to run CNAMES through CloudFlare for, e.g. \"example.com\".\n :type zone_name: str\n :param resolve_to: The CNAME that CloudFlare should ultimately\n resolve web connections to after they have been filtered\n :type resolve_to: str\n :param subdomains: A comma-separated string of subdomain(s) that\n CloudFlare should host, e.g. \"www,blog,forums\"\n :type subdomains: str\n\n :returns:\n :rtype: dict\n \"\"\"\n params = {\n 'act': 'zone_set',\n 'user_key': user_key,\n 'zone_name': zone_name,\n 'resolve_to': resolve_to,\n 'subdomains': subdomains,\n }\n return self._request(params)",
"def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,\n VPCName=None, VPCRegion=None, Comment=None,\n region=None, key=None, keyid=None, profile=None):\n '''\n Associates an Amazon VPC with a private hosted zone.\n\n To perform the association, the VPC and the private hosted zone must already exist. You can't\n convert a public hosted zone into a private hosted zone. If you want to associate a VPC from\n one AWS account with a zone from a another, the AWS account owning the hosted zone must first\n submit a CreateVPCAssociationAuthorization (using create_vpc_association_authorization() or by\n other means, such as the AWS console). With that done, the account owning the VPC can then call\n associate_vpc_with_hosted_zone() to create the association.\n\n Note that if both sides happen to be within the same account, associate_vpc_with_hosted_zone()\n is enough on its own, and there is no need for the CreateVPCAssociationAuthorization step.\n\n Also note that looking up hosted zones by name (e.g. using the Name parameter) only works\n within a single account - if you're associating a VPC to a zone in a different account, as\n outlined above, you unfortunately MUST use the HostedZoneId parameter exclusively.\n\n HostedZoneId\n The unique Zone Identifier for the Hosted Zone.\n\n Name\n The domain name associated with the Hosted Zone(s).\n\n VPCId\n When working with a private hosted zone, either the VPC ID or VPC Name to associate with is\n required. Exclusive with VPCName.\n\n VPCName\n When working with a private hosted zone, either the VPC ID or VPC Name to associate with is\n required. Exclusive with VPCId.\n\n VPCRegion\n When working with a private hosted zone, the region of the associated VPC is required. If\n not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If\n this fails, you'll need to provide an explicit value for VPCRegion.\n\n Comment\n Any comments you want to include about the change being made.\n\n CLI Example::\n\n salt myminion boto3_route53.associate_vpc_with_hosted_zone \\\n Name=example.org. VPCName=myVPC \\\n VPCRegion=us-east-1 Comment=\"Whoo-hoo! I added another VPC.\"\n\n '''\n if not _exactly_one((HostedZoneId, Name)):\n raise SaltInvocationError('Exactly one of either HostedZoneId or Name is required.')\n if not _exactly_one((VPCId, VPCName)):\n raise SaltInvocationError('Exactly one of either VPCId or VPCName is required.')\n if Name:\n # {'PrivateZone': True} because you can only associate VPCs with private hosted zones.\n args = {'Name': Name, 'PrivateZone': True, 'region': region,\n 'key': key, 'keyid': keyid, 'profile': profile}\n zone = find_hosted_zone(**args)\n if not zone:\n log.error(\n \"Couldn't resolve domain name %s to a private hosted zone ID.\",\n Name\n )\n return False\n HostedZoneId = zone[0]['HostedZone']['Id']\n vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key,\n keyid=keyid, profile=profile).get('vpcs', [])\n if VPCRegion and vpcs:\n vpcs = [v for v in vpcs if v['region'] == VPCRegion]\n if not vpcs:\n log.error('No VPC matching the given criteria found.')\n return False\n if len(vpcs) > 1:\n log.error('Multiple VPCs matching the given criteria found: %s.',\n ', '.join([v['id'] for v in vpcs]))\n return False\n vpc = vpcs[0]\n if VPCName:\n VPCId = vpc['id']\n if not VPCRegion:\n VPCRegion = vpc['region']\n args = {'HostedZoneId': HostedZoneId, 'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}}\n args.update({'Comment': Comment}) if Comment is not None else None\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n tries = 10\n while tries:\n try:\n r = conn.associate_vpc_with_hosted_zone(**args)\n return _wait_for_sync(r['ChangeInfo']['Id'], conn)\n except ClientError as e:\n if e.response.get('Error', {}).get('Code') == 'ConflictingDomainExists':\n log.debug('VPC Association already exists.')\n # return True since the current state is the desired one\n return True\n if tries and e.response.get('Error', {}).get('Code') == 'Throttling':\n log.debug('Throttled by AWS API.')\n time.sleep(3)\n tries -= 1\n continue\n log.error('Failed to associate VPC %s with hosted zone %s: %s',\n VPCName or VPCId, Name or HostedZoneId, e)\n return False",
"def get_zones_of_account(self, account_name, q=None, **kwargs):\n \"\"\"Returns a list of zones for the specified account.\n\n Arguments:\n account_name -- The name of the account.\n\n Keyword Arguments:\n q -- The search parameters, in a dict. Valid keys are:\n name - substring match of the zone name\n zone_type - one of:\n PRIMARY\n SECONDARY\n ALIAS\n sort -- The sort column used to order the list. Valid values for the sort field are:\n NAME\n ACCOUNT_NAME\n RECORD_COUNT\n ZONE_TYPE\n reverse -- Whether the list is ascending(False) or descending(True)\n offset -- The position in the list for the first returned element(0 based)\n limit -- The maximum number of rows to be returned.\n\n \"\"\"\n uri = \"/v1/accounts/\" + account_name + \"/zones\"\n params = build_params(q, kwargs)\n return self.rest_api_connection.get(uri, params)",
"def _create_gcloud_zone(self, dns_name):\n \"\"\"Creates a google cloud ManagedZone with dns_name, and zone named\n derived from it. calls .create() method and returns it.\n\n :param dns_name: fqdn of zone to create\n :type dns_name: str\n\n :type return: new google.cloud.dns.ManagedZone\n \"\"\"\n # Zone name must begin with a letter, end with a letter or digit,\n # and only contain lowercase letters, digits or dashes,\n # and be 63 characters or less\n zone_name = 'zone-{}-{}'.format(\n dns_name.replace('.', '-'), uuid4().hex)[:63]\n\n gcloud_zone = self.gcloud_client.zone(\n name=zone_name,\n dns_name=dns_name\n )\n gcloud_zone.create(client=self.gcloud_client)\n\n # add this new zone to the list of zones.\n self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone\n\n self.log.info(\"Created zone {}. Fqdn {}.\".format(zone_name, dns_name))\n\n return gcloud_zone",
"def add_service(service, zone=None, permanent=True):\n '''\n Add a service for zone. If zone is omitted, default zone will be used.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' firewalld.add_service ssh\n\n To assign a service to a specific zone:\n\n .. code-block:: bash\n\n salt '*' firewalld.add_service ssh my_zone\n '''\n if zone:\n cmd = '--zone={0} --add-service={1}'.format(zone, service)\n else:\n cmd = '--add-service={0}'.format(service)\n\n if permanent:\n cmd += ' --permanent'\n\n return __firewall_cmd(cmd)",
"def _get_container_service(container):\n '''\n Get the azure block blob service for the container in question\n\n Try account_key, sas_token, and no auth in that order\n '''\n if 'account_key' in container:\n account = azure.storage.CloudStorageAccount(container['account_name'], account_key=container['account_key'])\n elif 'sas_token' in container:\n account = azure.storage.CloudStorageAccount(container['account_name'], sas_token=container['sas_token'])\n else:\n account = azure.storage.CloudStorageAccount(container['account_name'])\n blob_service = account.create_block_blob_service()\n return blob_service"
] |
[
0.6945491433143616,
0.6877668499946594,
0.6863690614700317,
0.6855800747871399,
0.6790636777877808,
0.6702797412872314,
0.6656450033187866,
0.6601918935775757,
0.6536268591880798,
0.6525503396987915,
0.650916337966919,
0.6493642330169678
] |
:param name: new env name
:rtype: Environment
|
def clone(self, name=None):
"""
:param name: new env name
:rtype: Environment
"""
resp = self._router.post_env_clone(env_id=self.environmentId, json=dict(name=name) if name else {}).json()
return Environment(self.organization, id=resp['id']).init_router(self._router)
|
[
"def create(self, num):\n \"\"\"\n Creates the environment\n in your subclassed create function include the line below\n super().build(arg1, arg2, arg2, ...)\n \"\"\"\n self.log.record_process('enviroment.py', 'Creating ' + str(num) + ' environments - ' + self.name)",
"def create_atari_environment(name):\n \"\"\"\n Create OpenAI Gym atari environment\n :param name: name of the atari game\n :return: Preconfigured environment suitable for atari games\n \"\"\"\n env = gym.make(name)\n env = NoopResetEnv(env, noop_max=30)\n env = EpisodicLifeEnv(env)\n env = ClipRewardEnv(env)\n\n action_names = env.unwrapped.get_action_meanings()\n if 'FIRE' in action_names:\n env = FireResetEnv(env)\n\n env = WarpFrame(env)\n env = FrameStack(env, k=4)\n return env",
"def create(self, name=None, prefix=None, pkgs=None, channels=None):\n \"\"\"Create an environment with a specified set of packages.\"\"\"\n logger.debug(str((prefix, pkgs, channels)))\n\n # TODO: Fix temporal hack\n if (not pkgs or (not isinstance(pkgs, (list, tuple)) and\n not is_text_string(pkgs))):\n raise TypeError('must specify a list of one or more packages to '\n 'install into new environment')\n\n cmd_list = ['create', '--yes', '--json', '--mkdir']\n if name:\n ref = name\n search = [os.path.join(d, name) for d in\n self.info().communicate()[0]['envs_dirs']]\n cmd_list.extend(['--name', name])\n elif prefix:\n ref = prefix\n search = [prefix]\n cmd_list.extend(['--prefix', prefix])\n else:\n raise TypeError('must specify either an environment name or a '\n 'path for new environment')\n\n if any(os.path.exists(prefix) for prefix in search):\n raise CondaEnvExistsError('Conda environment {0} already '\n 'exists'.format(ref))\n\n # TODO: Fix temporal hack\n if isinstance(pkgs, (list, tuple)):\n cmd_list.extend(pkgs)\n elif is_text_string(pkgs):\n cmd_list.extend(['--file', pkgs])\n\n # TODO: Check if correct\n if channels:\n cmd_list.extend(['--override-channels'])\n\n for channel in channels:\n cmd_list.extend(['--channel'])\n cmd_list.extend([channel])\n\n return self._call_and_parse(cmd_list)",
"def new_env(environment):\n \"\"\" Create a new environment in the configuration and ask the\n user for the commands for this specific environment.\n \"\"\"\n if not environment:\n print(\"You need to supply an environment name\")\n return\n\n parser = read_config()\n\n if environment in parser.sections():\n print(\"Environment '%s' already exists\" % environment)\n return\n\n print(\"Please introduce (in order) the commands for '%s'\\n\" % environment)\n print(\"Press RETURN to end command and RETURN with empty line to finish\\n\")\n\n commands = []\n cmd = \"\"\n\n while True:\n try:\n cmd = raw_input(\"> \")\n\n if not cmd:\n break\n\n commands.append(cmd)\n\n except KeyboardInterrupt:\n return\n\n parser.add_section(environment)\n parser.set(environment, \"cmd\", \"\\n\".join(commands))\n\n write_config(parser)\n\n print(\"Added environment '%s'\" % environment)",
"public static EnvironmentType newInstance(String name)\n {\n \tEnvironmentType env = new EnvironmentType();\n \tenv.setName(name);\n return env;\n }",
"def set_environment(self, name, value):\n \"\"\"\n Set environment ``$ tmux set-environment <name> <value>``.\n\n Parameters\n ----------\n name : str\n the environment variable name. such as 'PATH'.\n option : str\n environment value.\n \"\"\"\n args = ['set-environment']\n if self._add_option:\n args += [self._add_option]\n\n args += [name, value]\n\n proc = self.cmd(*args)\n\n if proc.stderr:\n if isinstance(proc.stderr, list) and len(proc.stderr) == int(1):\n proc.stderr = proc.stderr[0]\n raise ValueError('tmux set-environment stderr: %s' % proc.stderr)",
"def create_environment(self, name, default=False, zone=None):\n \"\"\" Creates environment and returns Environment object.\n \"\"\"\n from qubell.api.private.environment import Environment\n return Environment.new(organization=self, name=name, zone_id=zone, default=default, router=self._router)",
"def inserir(self, name):\n \"\"\"Inserts a new Logical Environment and returns its identifier.\n\n :param name: Logical Environment name. String with a minimum 2 and maximum of 80 characters\n\n :return: Dictionary with the following structure:\n\n ::\n\n {'logical_environment': {'id': < id_logical_environment >}}\n\n :raise InvalidParameterError: Name is null and invalid.\n :raise NomeAmbienteLogicoDuplicadoError: There is already a registered Logical Environment with the value of name.\n :raise DataBaseError: Networkapi failed to access the database.\n :raise XMLError: Networkapi failed to generate the XML response.\n \"\"\"\n\n logical_environment_map = dict()\n logical_environment_map['name'] = name\n\n code, xml = self.submit(\n {'logical_environment': logical_environment_map}, 'POST', 'logicalenvironment/')\n\n return self.response(code, xml)",
"def create_from_yaml(self, name, yamlfile):\n \"\"\"\n Create new environment using conda-env via a yaml specification file.\n\n Unlike other methods, this calls conda-env, and requires a named\n environment and uses channels as defined in rcfiles.\n\n Parameters\n ----------\n name : string\n Environment name\n yamlfile : string\n Path to yaml file with package spec (as created by conda env export\n \"\"\"\n logger.debug(str((name, yamlfile)))\n cmd_list = ['env', 'create', '-n', name, '-f', yamlfile, '--json']\n return self._call_and_parse(cmd_list)",
"def change_env(name, val):\n \"\"\"\n Args:\n name(str), val(str):\n\n Returns:\n a context where the environment variable ``name`` being set to\n ``val``. It will be set back after the context exits.\n \"\"\"\n oldval = os.environ.get(name, None)\n os.environ[name] = val\n yield\n if oldval is None:\n del os.environ[name]\n else:\n os.environ[name] = oldval",
"def environment(name)\n yaml_path = yaml_path_if_exists(name)\n rb_path = rb_path_if_exists(name)\n\n raise \"found multiple env files for same env #{name}.\" if !yaml_path.nil? && !rb_path.nil?\n raise \"TODO: implement Ruby environments.\" unless rb_path.nil?\n\n env = Environments::Environment.load_yaml_file(yaml_path) unless yaml_path.nil?\n\n raise \"no env found for '#{name}'.\" if env.nil?\n\n IceNine.deep_freeze(env)\n env\n end",
"def register_env(name, env_creator):\n \"\"\"Register a custom environment for use with RLlib.\n\n Args:\n name (str): Name to register.\n env_creator (obj): Function that creates an env.\n \"\"\"\n\n if not isinstance(env_creator, FunctionType):\n raise TypeError(\"Second argument must be a function.\", env_creator)\n _global_registry.register(ENV_CREATOR, name, env_creator)"
] |
[
0.7197701334953308,
0.7145977020263672,
0.7126750349998474,
0.7057265639305115,
0.7025527954101562,
0.6969571113586426,
0.6964506506919861,
0.6944248676300049,
0.6940590739250183,
0.6897558569908142,
0.6870725154876709,
0.6836482882499695
] |
Returns environment marked as default.
When Zone is set marked default makes no sense, special env with proper Zone is returned.
|
def default(self):
"""
Returns environment marked as default.
When Zone is set marked default makes no sense, special env with proper Zone is returned.
"""
if ZONE_NAME:
log.info("Getting or creating default environment for zone with name '{0}'".format(DEFAULT_ENV_NAME()))
zone_id = self.organization.zones[ZONE_NAME].id
return self.organization.get_or_create_environment(name=DEFAULT_ENV_NAME(), zone=zone_id)
def_envs = [env_j["id"] for env_j in self.json() if env_j["isDefault"]]
if len(def_envs) > 1:
log.warning('Found more than one default environment. Picking last.')
return self[def_envs[-1]]
elif len(def_envs) == 1:
return self[def_envs[0]]
raise exceptions.NotFoundError('Unable to get default environment')
|
[
"public static TimeZone getDefault() {\n // Android patch (http://b/30979219) start.\n // Avoid race condition by copying defaultZone to a local variable.\n TimeZone result = defaultZone;\n if (result == null) {\n // Android patch (http://b/30937209) start.\n // Avoid a deadlock by always acquiring monitors in order (1) java.util.TimeZone.class\n // then (2) icu.util.TimeZone.class and not (2) then (1).\n // Without the synchronized here there is a possible deadlock between threads calling\n // this method and other threads calling methods on java.util.TimeZone. e.g.\n // java.util.TimeZone.setDefault() calls back into\n // icu.util.TimeZone.clearCachedDefault() so always acquires them in order (1) then (2).\n synchronized (java.util.TimeZone.class) {\n synchronized (TimeZone.class) {\n result = defaultZone;\n if (result == null) {\n if (TZ_IMPL == TIMEZONE_JDK) {\n result = new JavaTimeZone();\n } else {\n java.util.TimeZone temp = java.util.TimeZone.getDefault();\n result = getFrozenTimeZone(temp.getID());\n }\n defaultZone = result;\n }\n }\n }\n // Android patch (http://b/30937209) end.\n }\n return result.cloneAsThawed();\n // Android patch (http://b/30979219) end.\n }",
"def get_default_zone\n params = {\n 'command' => 'listZones',\n 'available' => 'true'\n }\n json = send_request(params)\n\n zones = json['zone']\n return nil unless zones\n # zones.sort! # sort zones so we always return the same zone\n # !this gives error in our production environment so need to retest this\n zones.first\n end",
"def dzen\n return @@dzen unless @@dzen.nil?\n\n @@dzen = DZEN::Terminal.new if defined?(::TERMINAL) || !!ENV['TERMINAL']\n @@dzen = DZEN::Default.new unless @@dzen\n\n @@dzen\n end",
"def get_default_ENV(env):\n \"\"\"\n A fiddlin' little function that has an 'import SCons.Environment' which\n can't be moved to the top level without creating an import loop. Since\n this import creates a local variable named 'SCons', it blocks access to\n the global variable, so we move it here to prevent complaints about local\n variables being used uninitialized.\n \"\"\"\n global default_ENV\n try:\n return env['ENV']\n except KeyError:\n if not default_ENV:\n import SCons.Environment\n # This is a hideously expensive way to get a default shell\n # environment. What it really should do is run the platform\n # setup to get the default ENV. Fortunately, it's incredibly\n # rare for an Environment not to have a shell environment, so\n # we're not going to worry about it overmuch.\n default_ENV = SCons.Environment.Environment()['ENV']\n return default_ENV",
"public java.lang.String getEnv() {\n java.lang.Object ref = env_;\n if (ref instanceof java.lang.String) {\n return (java.lang.String) ref;\n } else {\n com.google.protobuf.ByteString bs = \n (com.google.protobuf.ByteString) ref;\n java.lang.String s = bs.toStringUtf8();\n env_ = s;\n return s;\n }\n }",
"def get_region(cls, resource=None):\n \"\"\"Retrieve region from standard environmental variables\n or file name.\n\n More information of the following link: http://goo.gl/Vb9Jky\n \"\"\"\n if resource:\n resource_info = cls.parse_remote(resource)\n if resource_info.region:\n return resource_info.region\n\n return os.environ.get(\"AWS_DEFAULT_REGION\", cls._DEFAULT_REGION)",
"public static RuntimeEnvironmentBuilder getDefaultInMemory() {\n // PATCHED from: RuntimeEnvironmentBuilder builder = new RuntimeEnvironmentBuilder(new DefaultRuntimeEnvironment(null, false));\n RuntimeEnvironmentBuilder builder = new PatchedRuntimeEnvironmentBuilder(new PatchedRuntimeEnvironment());\n builder.addConfiguration(\"drools.processSignalManagerFactory\", DefaultSignalManagerFactory.class.getName()).addConfiguration(\"drools.processInstanceManagerFactory\",\n DefaultProcessInstanceManagerFactory.class\n .getName());\n\n return builder;\n }",
"public static String getEnv(){\n if(ENV == null){\n if(!blank(System.getenv(\"ACTIVE_ENV\"))) {\n ENV = System.getenv(\"ACTIVE_ENV\");\n }\n\n if(!blank(System.getProperty(\"ACTIVE_ENV\"))) {\n ENV = System.getProperty(\"ACTIVE_ENV\");\n }\n\n if(!blank(System.getProperty(\"active_env\"))) {\n ENV = System.getProperty(\"active_env\");\n }\n\n if(blank(ENV)){\n ENV = \"development\";\n LogFilter.log(LOGGER, LogLevel.INFO, \"Environment variable ACTIVE_ENV not provided, defaulting to '\" + ENV + \"'\");\n }\n }\n return ENV;\n }",
"public java.lang.String getEnvironment() {\n java.lang.Object ref = environment_;\n if (ref instanceof java.lang.String) {\n return (java.lang.String) ref;\n } else {\n com.google.protobuf.ByteString bs = \n (com.google.protobuf.ByteString) ref;\n java.lang.String s = bs.toStringUtf8();\n environment_ = s;\n return s;\n }\n }",
"public static String getEnvDefault(String propName, String defaultVal) {\n String ans = defaultVal;\n String value = System.getenv(propName);\n if (value != null) {\n ans = value.trim();\n } else if (defaultVal != null) {\n ans = defaultVal.trim();\n }\n return ans;\n }",
"public static String getEnv(){\n if(ENV == null){\n if(!blank(System.getenv(\"ACTIVE_ENV\"))) {\n ENV = System.getenv(\"ACTIVE_ENV\");\n }\n\n if(!blank(System.getProperty(\"ACTIVE_ENV\"))) {\n ENV = System.getProperty(\"ACTIVE_ENV\");\n }\n\n if(!blank(System.getProperty(\"active_env\"))) {\n ENV = System.getProperty(\"active_env\");\n }\n\n if(blank(ENV)){ \n ENV = \"development\";\n LOGGER.warn(\"Environment variable ACTIVE_ENV not provided, defaulting to '\" + ENV + \"'\");\n }\n }\n return ENV;\n }",
"public static String getEnvironmentString(String name, String defaultValue) {\n if (envVars == null) {\n throw new IllegalStateException(\"The environment vars must be provided before calling getEnvironmentString.\");\n }\n\n return envVars.get(ENV_PREFIX + name) != null ? envVars.get(ENV_PREFIX + name) : defaultValue;\n }"
] |
[
0.7198145389556885,
0.7121028900146484,
0.7038509845733643,
0.7020342350006104,
0.7004169225692749,
0.695034384727478,
0.6945798397064209,
0.6934176087379456,
0.6932082772254944,
0.6929348111152649,
0.6910450458526611,
0.6877166628837585
] |
Builds a mapping of class paths to URLs.
|
def build_urls(self: NodeVisitor, node: inheritance_diagram) -> Mapping[str, str]:
"""
Builds a mapping of class paths to URLs.
"""
current_filename = self.builder.current_docname + self.builder.out_suffix
urls = {}
for child in node:
# Another document
if child.get("refuri") is not None:
uri = child.get("refuri")
package_path = child["reftitle"]
if uri.startswith("http"):
_, _, package_path = uri.partition("#")
else:
uri = (
pathlib.Path("..")
/ pathlib.Path(current_filename).parent
/ pathlib.Path(uri)
)
uri = str(uri).replace(os.path.sep, "/")
urls[package_path] = uri
# Same document
elif child.get("refid") is not None:
urls[child["reftitle"]] = (
"../" + current_filename + "#" + child.get("refid")
)
return urls
|
[
"List<String> buildClassPath(String appClassPath) throws IOException {\n String sparkHome = getSparkHome();\n\n Set<String> cp = new LinkedHashSet<>();\n addToClassPath(cp, appClassPath);\n\n addToClassPath(cp, getConfDir());\n\n boolean prependClasses = !isEmpty(getenv(\"SPARK_PREPEND_CLASSES\"));\n boolean isTesting = \"1\".equals(getenv(\"SPARK_TESTING\"));\n if (prependClasses || isTesting) {\n String scala = getScalaVersion();\n List<String> projects = Arrays.asList(\n \"common/kvstore\",\n \"common/network-common\",\n \"common/network-shuffle\",\n \"common/network-yarn\",\n \"common/sketch\",\n \"common/tags\",\n \"common/unsafe\",\n \"core\",\n \"examples\",\n \"graphx\",\n \"launcher\",\n \"mllib\",\n \"repl\",\n \"resource-managers/mesos\",\n \"resource-managers/yarn\",\n \"sql/catalyst\",\n \"sql/core\",\n \"sql/hive\",\n \"sql/hive-thriftserver\",\n \"streaming\"\n );\n if (prependClasses) {\n if (!isTesting) {\n System.err.println(\n \"NOTE: SPARK_PREPEND_CLASSES is set, placing locally compiled Spark classes ahead of \" +\n \"assembly.\");\n }\n for (String project : projects) {\n addToClassPath(cp, String.format(\"%s/%s/target/scala-%s/classes\", sparkHome, project,\n scala));\n }\n }\n if (isTesting) {\n for (String project : projects) {\n addToClassPath(cp, String.format(\"%s/%s/target/scala-%s/test-classes\", sparkHome,\n project, scala));\n }\n }\n\n // Add this path to include jars that are shaded in the final deliverable created during\n // the maven build. These jars are copied to this directory during the build.\n addToClassPath(cp, String.format(\"%s/core/target/jars/*\", sparkHome));\n addToClassPath(cp, String.format(\"%s/mllib/target/jars/*\", sparkHome));\n }\n\n // Add Spark jars to the classpath. For the testing case, we rely on the test code to set and\n // propagate the test classpath appropriately. For normal invocation, look for the jars\n // directory under SPARK_HOME.\n boolean isTestingSql = \"1\".equals(getenv(\"SPARK_SQL_TESTING\"));\n String jarsDir = findJarsDir(getSparkHome(), getScalaVersion(), !isTesting && !isTestingSql);\n if (jarsDir != null) {\n addToClassPath(cp, join(File.separator, jarsDir, \"*\"));\n }\n\n addToClassPath(cp, getenv(\"HADOOP_CONF_DIR\"));\n addToClassPath(cp, getenv(\"YARN_CONF_DIR\"));\n addToClassPath(cp, getenv(\"SPARK_DIST_CLASSPATH\"));\n return new ArrayList<>(cp);\n }",
"private static void getClassPathesFromLoader(ClassLoader cl, Map pathes) {\n\tif (cl instanceof URLClassLoader) _getClassPathesFromLoader((URLClassLoader) cl, pathes);\n }",
"private static URL[] mergeClassPath(URL... urls) {\n \tfinal String path = System.getProperty(\"java.class.path\"); //$NON-NLS-1$\n \tfinal String separator = System.getProperty(\"path.separator\"); //$NON-NLS-1$\n \tfinal String[] parts = path.split(Pattern.quote(separator));\n \tfinal URL[] u = new URL[parts.length + urls.length];\n \tfor (int i = 0; i < parts.length; ++i) {\n \t\ttry {\n\t\t\t\tu[i] = new File(parts[i]).toURI().toURL();\n\t\t\t} catch (MalformedURLException exception) {\n\t\t\t\t// ignore exception\n\t\t\t}\n \t}\n \tSystem.arraycopy(urls, 0, u, parts.length, urls.length);\n \treturn u;\n }",
"private URL[] generateClassPathUrls() throws MojoExecutionException {\n List<URL> urls = new ArrayList<URL>();\n URL url;\n try {\n for (Object element : getCompileClasspath()) {\n String path = (String) element;\n if (path.endsWith(\".jar\")) {\n url = new URL(\"jar:\" + new File(path).toURI().toString() + \"!/\");\n } else {\n url = new File(path).toURI().toURL();\n }\n urls.add(url);\n }\n } catch (MalformedURLException e) {\n throw new MojoExecutionException(\"Could not set up classpath\", e);\n }\n return urls.toArray(new URL[urls.size()]);\n }",
"def _build_urlmapping(urls, strict_slashes=False, **kwargs):\n \"\"\"Convers the anillo urlmappings list into\n werkzeug Map instance.\n\n :return: a werkzeug Map instance\n :rtype: Map\n \"\"\"\n\n rules = _build_rules(urls)\n return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs)",
"public void buildResourcePaths() {\n try {\n if (Boolean.getBoolean(Stapler.class.getName()+\".noResourcePathCache\")) {\n resourcePaths = null;\n return;\n }\n\n Map<String,URL> paths = new HashMap<String,URL>();\n Stack<String> q = new Stack<String>();\n q.push(\"/\");\n while (!q.isEmpty()) {\n String dir = q.pop();\n Set<String> children = context.getResourcePaths(dir);\n if (children!=null) {\n for (String child : children) {\n if (child.endsWith(\"/\"))\n q.push(child);\n else {\n URL v = context.getResource(child);\n if (v==null) {\n resourcePaths = null;\n return; // this can't happen. abort with no cache\n }\n paths.put(child, v);\n }\n }\n }\n }\n\n resourcePaths = Collections.unmodifiableMap(paths);\n } catch (MalformedURLException e) {\n resourcePaths = null; // abort\n }\n }",
"def urlmap(patterns):\n \"\"\"Recursively build a map of (group, name) => url patterns.\n\n Group is either the resolver namespace or app name for the url config.\n\n The urls are joined with any prefixes, and cleaned up of extraneous regex\n specific syntax.\"\"\"\n for pattern in patterns:\n group = getattr(pattern, 'namespace', None)\n if group is None:\n group = getattr(pattern, 'app_name', None)\n path = '/' + get_pattern(pattern).lstrip('^').rstrip('$')\n if isinstance(pattern, PATTERNS):\n yield (group, pattern.name), path\n elif isinstance(pattern, RESOLVERS):\n subpatterns = pattern.url_patterns\n for (_, name), subpath in urlmap(subpatterns):\n yield (group, name), path.rstrip('/') + subpath",
"def relativize_classpath(classpath, root_dir, followlinks=True):\n \"\"\"Convert into classpath relative to a directory.\n\n This is eventually used by a jar file located in this directory as its manifest\n attribute Class-Path. See\n https://docs.oracle.com/javase/7/docs/technotes/guides/extensions/spec.html#bundled\n\n :param list classpath: Classpath to be relativized.\n :param string root_dir: directory to relativize urls in the classpath, does not\n have to exist yet.\n :param bool followlinks: whether to follow symlinks to calculate relative path.\n\n :returns: Converted classpath of the same size as input classpath.\n :rtype: list of strings\n \"\"\"\n def relativize_url(url, root_dir):\n # When symlink is involed, root_dir concatenated with the returned relpath may not exist.\n # Consider on mac `/var` is a symlink of `/private/var`, the relative path of subdirectories\n # under /var to any other directories under `/` computed by os.path.relpath misses one level\n # of `..`. Use os.path.realpath to guarantee returned relpath can always be located.\n # This is not needed only when path are all relative.\n url = os.path.realpath(url) if followlinks else url\n root_dir = os.path.realpath(root_dir) if followlinks else root_dir\n url_in_bundle = os.path.relpath(url, root_dir)\n # Append '/' for directories, those not ending with '/' are assumed to be jars.\n # Note isdir does what we need here to follow symlinks.\n if os.path.isdir(url):\n url_in_bundle += '/'\n return url_in_bundle\n\n return [relativize_url(url, root_dir) for url in classpath]",
"private String buildClassPath()\n {\n String classPath = null;//_classPath;\n\n if (classPath != null) {\n return classPath;\n }\n\n if (classPath == null && _loader instanceof DynamicClassLoader) {\n classPath = ((DynamicClassLoader) _loader).getClassPath();\n }\n else { // if (true || _loader instanceof URLClassLoader) {\n StringBuilder sb = new StringBuilder();\n sb.append(CauchoUtil.getClassPath());\n\n if (_loader != null)\n buildClassPath(sb, _loader);\n\n classPath = sb.toString();\n }\n //else if (classPath == null)\n //classPath = CauchoSystem.getClassPath();\n\n String srcDirName = getSourceDirName();\n String classDirName = getClassDirName();\n\n char sep = CauchoUtil.getPathSeparatorChar();\n\n if (_extraClassPath != null)\n classPath = classPath + sep + _extraClassPath;\n\n // Adding the srcDir lets javac and jikes find source files\n if (! srcDirName.equals(classDirName))\n classPath = srcDirName + sep + classPath;\n classPath = classDirName + sep + classPath;\n\n return classPath;\n }",
"protected URL[] getCompileClasspathElementURLs() throws DependencyResolutionRequiredException {\n // build class loader to get classes to generate resources for\n return project.getCompileClasspathElements().stream()\n .map(path -> {\n try {\n return new File(path).toURI().toURL();\n }\n catch (MalformedURLException ex) {\n throw new RuntimeException(ex);\n }\n })\n .toArray(size -> new URL[size]);\n }",
"def urltool(classqname, filt, reverse):\n \"\"\"\n Dump all urls branching from a class as OpenAPI 3 documentation\n\n The class must be given as a FQPN which points to a Klein() instance.\n\n Apply optional [FILT] as a regular expression searching within urls. For\n example, to match all urls beginning with api, you might use '^/api'\n \"\"\"\n filt = re.compile(filt or '.*')\n\n rootCls = namedAny(classqname)\n rules = list(_iterClass(rootCls))\n arr = []\n for item in sorted(rules):\n if item.subKlein:\n continue\n\n matched = filt.search(item.rulePath)\n matched = not matched if reverse else matched\n if matched:\n arr.append(tuple(item.toOpenAPIPath()))\n\n openapi3 = openapi.OpenAPI()\n for pathPath, pathItem in arr:\n if pathPath in openapi3.paths:\n openapi3.paths[pathPath].merge(pathItem)\n else:\n openapi3.paths[pathPath] = pathItem\n print(yaml.dump(openapi3, default_flow_style=False))",
"def classpath(self, targets, classpath_prefix=None, classpath_product=None, exclude_scopes=None,\n include_scopes=None):\n \"\"\"Builds a transitive classpath for the given targets.\n\n Optionally includes a classpath prefix or building from a non-default classpath product.\n\n :param targets: the targets for which to build the transitive classpath.\n :param classpath_prefix: optional additional entries to prepend to the classpath.\n :param classpath_product: an optional ClasspathProduct from which to build the classpath. if not\n specified, the runtime_classpath will be used.\n :param :class:`pants.build_graph.target_scopes.Scope` exclude_scopes: Exclude targets which\n have at least one of these scopes on the classpath.\n :param :class:`pants.build_graph.target_scopes.Scope` include_scopes: Only include targets which\n have at least one of these scopes on the classpath. Defaults to Scopes.JVM_RUNTIME_SCOPES.\n :return: a list of classpath strings.\n \"\"\"\n include_scopes = Scopes.JVM_RUNTIME_SCOPES if include_scopes is None else include_scopes\n classpath_product = classpath_product or self.context.products.get_data('runtime_classpath')\n closure = BuildGraph.closure(targets, bfs=True, include_scopes=include_scopes,\n exclude_scopes=exclude_scopes, respect_intransitive=True)\n\n classpath_for_targets = ClasspathUtil.classpath(closure, classpath_product, self.confs)\n classpath = list(classpath_prefix or ())\n classpath.extend(classpath_for_targets)\n return classpath"
] |
[
0.7510268688201904,
0.7378577589988708,
0.7305504083633423,
0.7301417589187622,
0.7251906394958496,
0.7251483798027039,
0.7240374684333801,
0.7172022461891174,
0.716221034526825,
0.7134982347488403,
0.7128140330314636,
0.7124380469322205
] |
Builds HTML output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.
|
def html_visit_inheritance_diagram(
self: NodeVisitor, node: inheritance_diagram
) -> None:
"""
Builds HTML output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.
"""
inheritance_graph = node["graph"]
urls = build_urls(self, node)
graphviz_graph = inheritance_graph.build_graph(urls)
dot_code = format(graphviz_graph, "graphviz")
# TODO: We can perform unflattening here
aspect_ratio = inheritance_graph.aspect_ratio
if aspect_ratio:
aspect_ratio = math.ceil(math.sqrt(aspect_ratio[1] / aspect_ratio[0]))
if aspect_ratio > 1:
process = subprocess.Popen(
["unflatten", "-l", str(aspect_ratio), "-c", str(aspect_ratio), "-f"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate(dot_code.encode())
dot_code = stdout.decode()
render_dot_html(self, node, dot_code, {}, "inheritance", "inheritance")
raise SkipNode
|
[
"def html_visit_inheritance_diagram(self, node):\n # type: (nodes.NodeVisitor, inheritance_diagram) -> None\n \"\"\"\n Output the graph for HTML. This will insert a PNG with clickable\n image map.\n \"\"\"\n graph = node['graph']\n\n graph_hash = get_graph_hash(node)\n name = 'inheritance%s' % graph_hash\n\n # Create a mapping from fully-qualified class names to URLs.\n graphviz_output_format = self.builder.env.config.graphviz_output_format.upper()\n current_filename = self.builder.current_docname + self.builder.out_suffix\n urls = {}\n for child in node:\n if child.get('refuri') is not None:\n if graphviz_output_format == 'SVG':\n urls[child['reftitle']] = os.path.join(\"..\", child.get('refuri'))\n else:\n urls[child['reftitle']] = child.get('refuri')\n elif child.get('refid') is not None:\n if graphviz_output_format == 'SVG':\n urls[child['reftitle']] = os.path.join('..', current_filename + '#' + child.get('refid'))\n else:\n urls[child['reftitle']] = '#' + child.get('refid')\n\n dotcode = graph.generate_dot(name, urls, env=self.builder.env)\n render_dot_html(\n self, node, dotcode, {}, 'inheritance', 'inheritance',\n alt='Inheritance diagram of ' + node['content'],\n link_to_svg='<i class=\"fa fa-external-link\" aria-hidden=\"true\"></i>'' SVG')\n raise nodes.SkipNode",
"def latex_visit_inheritance_diagram(\n self: NodeVisitor, node: inheritance_diagram\n) -> None:\n \"\"\"\n Builds LaTeX output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.\n \"\"\"\n inheritance_graph = node[\"graph\"]\n graphviz_graph = inheritance_graph.build_graph()\n graphviz_graph.attributes[\"size\"] = 6.0\n dot_code = format(graphviz_graph, \"graphviz\")\n render_dot_latex(self, node, dot_code, {}, \"inheritance\")\n raise SkipNode",
"def texinfo_visit_inheritance_diagram(self, node):\n # type: (nodes.NodeVisitor, inheritance_diagram) -> None\n \"\"\"\n Output the graph for Texinfo. This will insert a PNG.\n \"\"\"\n graph = node['graph']\n\n graph_hash = get_graph_hash(node)\n name = 'inheritance%s' % graph_hash\n\n dotcode = graph.generate_dot(name, env=self.builder.env,\n graph_attrs={'size': '\"6.0,6.0\"'})\n render_dot_texinfo(self, node, dotcode, {}, 'inheritance')\n raise nodes.SkipNode",
"def html_output_graph(self, node):\n \"\"\"\n Output the graph for HTML. This will insert a PNG with clickable\n image map.\n \"\"\"\n graph = node['graph']\n parts = node['parts']\n\n graph_hash = get_graph_hash(node)\n name = \"inheritance%s\" % graph_hash\n path = '_images'\n dest_path = os.path.join(setup.app.builder.outdir, path)\n if not os.path.exists(dest_path):\n os.makedirs(dest_path)\n png_path = os.path.join(dest_path, name + \".png\")\n path = setup.app.builder.imgpath\n\n # Create a mapping from fully-qualified class names to URLs.\n urls = {}\n for child in node:\n if child.get('refuri') is not None:\n urls[child['reftitle']] = child.get('refuri')\n elif child.get('refid') is not None:\n urls[child['reftitle']] = '#' + child.get('refid')\n\n # These arguments to dot will save a PNG file to disk and write\n # an HTML image map to stdout.\n image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],\n name, parts, urls)\n return ('<img src=\"%s/%s.png\" usemap=\"#%s\" class=\"inheritance\"/>%s' %\n (path, name, name, image_map))",
"def visit_inheritance_diagram(inner_func):\n \"\"\"\n This is just a wrapper around html/latex_output_graph to make it\n easier to handle errors and insert warnings.\n \"\"\"\n def visitor(self, node):\n try:\n content = inner_func(self, node)\n except DotException, e:\n # Insert the exception as a warning in the document\n warning = self.document.reporter.warning(str(e), line=node.line)\n warning.parent = node\n node.children = [warning]\n else:\n source = self.document.attributes['source']\n self.body.append(content)\n node.children = []\n return visitor",
"def latex_visit_inheritance_diagram(self, node):\n # type: (nodes.NodeVisitor, inheritance_diagram) -> None\n \"\"\"\n Output the graph for LaTeX. This will insert a PDF.\n \"\"\"\n graph = node['graph']\n\n graph_hash = get_graph_hash(node)\n name = 'inheritance%s' % graph_hash\n\n dotcode = graph.generate_dot(name, env=self.builder.env,\n graph_attrs={'size': '\"6.0,6.0\"'})\n render_dot_latex(self, node, dotcode, {}, 'inheritance')\n raise nodes.SkipNode",
"def _repr_html_(self):\n \"\"\"Build the HTML representation for IPython.\"\"\"\n self.chart_id = '_'.join(['bearcart', uuid4().hex])\n self.template_vars.update({'chart_id': self.chart_id,\n 'y_axis_id': self.y_axis_id,\n 'legend_id': self.legend_id,\n 'slider_id': self.slider_id,\n 'export_json': json.dumps(self.json_data)})\n\n self._build_graph()\n html = self.env.get_template('ipynb_repr.html')\n return html.render(self.template_vars)",
"def inheritance_diagram_directive(name, arguments, options, content, lineno,\n content_offset, block_text, state,\n state_machine):\n \"\"\"\n Run when the inheritance_diagram directive is first encountered.\n \"\"\"\n node = inheritance_diagram()\n\n class_names = arguments\n\n # Create a graph starting with the list of classes\n graph = InheritanceGraph(class_names)\n\n # Create xref nodes for each target of the graph's image map and\n # add them to the doc tree so that Sphinx can resolve the\n # references to real URLs later. These nodes will eventually be\n # removed from the doctree after we're done with them.\n for name in graph.get_all_class_names():\n refnodes, x = xfileref_role(\n 'class', ':class:`%s`' % name, name, 0, state)\n node.extend(refnodes)\n # Store the graph object so we can use it to generate the\n # dot file later\n node['graph'] = graph\n # Store the original content for use as a hash\n node['parts'] = options.get('parts', 0)\n node['content'] = \" \".join(class_names)\n return [node]",
"def setup(app) -> Dict[str, Any]:\n \"\"\"\n Sets up Sphinx extension.\n \"\"\"\n app.setup_extension(\"sphinx.ext.graphviz\")\n app.add_node(\n inheritance_diagram,\n html=(html_visit_inheritance_diagram, None),\n latex=(latex_visit_inheritance_diagram, None),\n man=(skip, None),\n texinfo=(skip, None),\n text=(skip, None),\n )\n app.add_directive(\"inheritance-diagram\", InheritanceDiagram)\n return {\n \"version\": uqbar.__version__,\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }",
"def to_html(self, html_file, directed=False, weighted=False, vertex_ids=None,\n vertex_colors=None, vertex_labels=None, width=900, height=600,\n title=None, svg_border='1px solid black'):\n '''Write the graph as a d3 force-directed layout SVG to an HTML file.\n\n html_file : str|file-like, writeable destination for the output HTML.\n vertex_ids : unique IDs for each vertex, defaults to arange(num_vertices).\n vertex_colors : numeric color mapping for vertices, optional.\n vertex_labels : class labels for vertices, optional.\n title : str, written above the SVG as an h1, optional.\n svg_border : str, CSS for the 'border' attribute of the SVG element.\n '''\n if directed:\n raise NotImplementedError('Directed graphs are NYI for HTML output.')\n if (vertex_colors is not None) and (vertex_labels is not None):\n raise ValueError('Supply only one of vertex_colors, vertex_labels')\n\n # set up vertices\n if vertex_ids is None:\n vertex_ids = np.arange(self.num_vertices())\n elif len(vertex_ids) != self.num_vertices():\n raise ValueError('len(vertex_ids) != num vertices.')\n\n if vertex_labels is not None:\n vlabels, vcolors = np.unique(vertex_labels, return_inverse=True)\n if len(vcolors) != len(vertex_ids):\n raise ValueError('len(vertex_labels) != num vertices.')\n elif vertex_colors is not None:\n vcolors = np.array(vertex_colors, dtype=float, copy=False)\n if len(vcolors) != len(vertex_ids):\n raise ValueError('len(vertex_colors) != num vertices.')\n vcolors -= vcolors.min()\n vcolors /= vcolors.max()\n else:\n vcolors = []\n\n node_json = []\n for name, c in zip_longest(vertex_ids, vcolors):\n if c is not None:\n node_json.append('{\"id\": \"%s\", \"color\": %s}' % (name, c))\n else:\n node_json.append('{\"id\": \"%s\"}' % name)\n\n # set up edges\n pairs = self.pairs(directed=directed)\n if weighted:\n weights = self.edge_weights(directed=directed, copy=True).astype(float)\n weights -= weights.min()\n weights /= weights.max()\n else:\n weights = np.zeros(len(pairs)) + 0.5\n\n edge_json = []\n for (i,j), w in zip(pairs, weights):\n edge_json.append('{\"source\": \"%s\", \"target\": \"%s\", \"weight\": %f}' % (\n vertex_ids[i], vertex_ids[j], w))\n\n # emit self-contained HTML\n if not hasattr(html_file, 'write'):\n fh = open(html_file, 'w')\n else:\n fh = html_file\n print(u'<!DOCTYPE html><meta charset=\"utf-8\"><style>', file=fh)\n print(u'svg { border: %s; }' % svg_border, file=fh)\n if weighted:\n print(u'.links line { stroke-width: 2px; }', file=fh)\n else:\n print(u'.links line { stroke: #000; stroke-width: 2px; }', file=fh)\n print(u'.nodes circle { stroke: #fff; stroke-width: 1px; }', file=fh)\n print(u'</style>', file=fh)\n if title:\n print(u'<h1>%s</h1>' % title, file=fh)\n print(u'<svg width=\"%d\" height=\"%d\"></svg>' % (width, height), file=fh)\n print(u'<script src=\"https://d3js.org/d3.v4.min.js\"></script>', file=fh)\n print(u'<script>', LAYOUT_JS, sep=u'\\n', file=fh)\n if vertex_colors is not None:\n print(u'var vcolor=d3.scaleSequential(d3.interpolateViridis);', file=fh)\n elif vertex_labels is not None:\n scale = 'd3.schemeCategory%d' % (10 if len(vlabels) <= 10 else 20)\n print(u'var vcolor = d3.scaleOrdinal(%s);' % scale, file=fh)\n else:\n print(u'function vcolor(){ return \"#1776b6\"; }', file=fh)\n print(u'var sim=layout_graph({\"nodes\": [%s], \"links\": [%s]});</script>' % (\n ',\\n'.join(node_json), ',\\n'.join(edge_json)), file=fh)\n fh.flush()",
"def to_text_diagram(\n self,\n *,\n use_unicode_characters: bool = True,\n transpose: bool = False,\n precision: Optional[int] = 3,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT) -> str:\n \"\"\"Returns text containing a diagram describing the circuit.\n\n Args:\n use_unicode_characters: Determines if unicode characters are\n allowed (as opposed to ascii-only diagrams).\n transpose: Arranges qubit wires vertically instead of horizontally.\n precision: Number of digits to display in text diagram\n qubit_order: Determines how qubits are ordered in the diagram.\n\n Returns:\n The text diagram.\n \"\"\"\n diagram = self.to_text_diagram_drawer(\n use_unicode_characters=use_unicode_characters,\n precision=precision,\n qubit_order=qubit_order,\n transpose=transpose)\n\n return diagram.render(\n crossing_char=(None\n if use_unicode_characters\n else ('-' if transpose else '|')),\n horizontal_spacing=1 if transpose else 3,\n use_unicode_characters=use_unicode_characters)",
"def build(self) -> str:\n \"\"\"Return HTML representation of this document.\"\"\"\n self._set_autoreload()\n return ''.join(child.html for child in self.childNodes)"
] |
[
0.8069048523902893,
0.7841325998306274,
0.7429071664810181,
0.7377827167510986,
0.7345815300941467,
0.7217815518379211,
0.70815509557724,
0.704833447933197,
0.7041767239570618,
0.7005437016487122,
0.6949968934059143,
0.6871033310890198
] |
Builds LaTeX output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.
|
def latex_visit_inheritance_diagram(
self: NodeVisitor, node: inheritance_diagram
) -> None:
"""
Builds LaTeX output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.
"""
inheritance_graph = node["graph"]
graphviz_graph = inheritance_graph.build_graph()
graphviz_graph.attributes["size"] = 6.0
dot_code = format(graphviz_graph, "graphviz")
render_dot_latex(self, node, dot_code, {}, "inheritance")
raise SkipNode
|
[
"def html_visit_inheritance_diagram(\n self: NodeVisitor, node: inheritance_diagram\n) -> None:\n \"\"\"\n Builds HTML output from an :py:class:`~uqbar.sphinx.inheritance.inheritance_diagram` node.\n \"\"\"\n inheritance_graph = node[\"graph\"]\n urls = build_urls(self, node)\n graphviz_graph = inheritance_graph.build_graph(urls)\n dot_code = format(graphviz_graph, \"graphviz\")\n # TODO: We can perform unflattening here\n aspect_ratio = inheritance_graph.aspect_ratio\n if aspect_ratio:\n aspect_ratio = math.ceil(math.sqrt(aspect_ratio[1] / aspect_ratio[0]))\n if aspect_ratio > 1:\n process = subprocess.Popen(\n [\"unflatten\", \"-l\", str(aspect_ratio), \"-c\", str(aspect_ratio), \"-f\"],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n stdout, stderr = process.communicate(dot_code.encode())\n dot_code = stdout.decode()\n render_dot_html(self, node, dot_code, {}, \"inheritance\", \"inheritance\")\n raise SkipNode",
"def latex_visit_inheritance_diagram(self, node):\n # type: (nodes.NodeVisitor, inheritance_diagram) -> None\n \"\"\"\n Output the graph for LaTeX. This will insert a PDF.\n \"\"\"\n graph = node['graph']\n\n graph_hash = get_graph_hash(node)\n name = 'inheritance%s' % graph_hash\n\n dotcode = graph.generate_dot(name, env=self.builder.env,\n graph_attrs={'size': '\"6.0,6.0\"'})\n render_dot_latex(self, node, dotcode, {}, 'inheritance')\n raise nodes.SkipNode",
"def texinfo_visit_inheritance_diagram(self, node):\n # type: (nodes.NodeVisitor, inheritance_diagram) -> None\n \"\"\"\n Output the graph for Texinfo. This will insert a PNG.\n \"\"\"\n graph = node['graph']\n\n graph_hash = get_graph_hash(node)\n name = 'inheritance%s' % graph_hash\n\n dotcode = graph.generate_dot(name, env=self.builder.env,\n graph_attrs={'size': '\"6.0,6.0\"'})\n render_dot_texinfo(self, node, dotcode, {}, 'inheritance')\n raise nodes.SkipNode",
"def visit_inheritance_diagram(inner_func):\n \"\"\"\n This is just a wrapper around html/latex_output_graph to make it\n easier to handle errors and insert warnings.\n \"\"\"\n def visitor(self, node):\n try:\n content = inner_func(self, node)\n except DotException, e:\n # Insert the exception as a warning in the document\n warning = self.document.reporter.warning(str(e), line=node.line)\n warning.parent = node\n node.children = [warning]\n else:\n source = self.document.attributes['source']\n self.body.append(content)\n node.children = []\n return visitor",
"def html_visit_inheritance_diagram(self, node):\n # type: (nodes.NodeVisitor, inheritance_diagram) -> None\n \"\"\"\n Output the graph for HTML. This will insert a PNG with clickable\n image map.\n \"\"\"\n graph = node['graph']\n\n graph_hash = get_graph_hash(node)\n name = 'inheritance%s' % graph_hash\n\n # Create a mapping from fully-qualified class names to URLs.\n graphviz_output_format = self.builder.env.config.graphviz_output_format.upper()\n current_filename = self.builder.current_docname + self.builder.out_suffix\n urls = {}\n for child in node:\n if child.get('refuri') is not None:\n if graphviz_output_format == 'SVG':\n urls[child['reftitle']] = os.path.join(\"..\", child.get('refuri'))\n else:\n urls[child['reftitle']] = child.get('refuri')\n elif child.get('refid') is not None:\n if graphviz_output_format == 'SVG':\n urls[child['reftitle']] = os.path.join('..', current_filename + '#' + child.get('refid'))\n else:\n urls[child['reftitle']] = '#' + child.get('refid')\n\n dotcode = graph.generate_dot(name, urls, env=self.builder.env)\n render_dot_html(\n self, node, dotcode, {}, 'inheritance', 'inheritance',\n alt='Inheritance diagram of ' + node['content'],\n link_to_svg='<i class=\"fa fa-external-link\" aria-hidden=\"true\"></i>'' SVG')\n raise nodes.SkipNode",
"def latex_output_graph(self, node):\n \"\"\"\n Output the graph for LaTeX. This will insert a PDF.\n \"\"\"\n graph = node['graph']\n parts = node['parts']\n\n graph_hash = get_graph_hash(node)\n name = \"inheritance%s\" % graph_hash\n dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))\n if not os.path.exists(dest_path):\n os.makedirs(dest_path)\n pdf_path = os.path.abspath(os.path.join(dest_path, name + \".pdf\"))\n\n graph.run_dot(['-Tpdf', '-o%s' % pdf_path],\n name, parts, graph_options={'size': '\"6.0,6.0\"'})\n return '\\n\\\\includegraphics{%s}\\n\\n' % pdf_path",
"def inheritance_diagram_directive(name, arguments, options, content, lineno,\n content_offset, block_text, state,\n state_machine):\n \"\"\"\n Run when the inheritance_diagram directive is first encountered.\n \"\"\"\n node = inheritance_diagram()\n\n class_names = arguments\n\n # Create a graph starting with the list of classes\n graph = InheritanceGraph(class_names)\n\n # Create xref nodes for each target of the graph's image map and\n # add them to the doc tree so that Sphinx can resolve the\n # references to real URLs later. These nodes will eventually be\n # removed from the doctree after we're done with them.\n for name in graph.get_all_class_names():\n refnodes, x = xfileref_role(\n 'class', ':class:`%s`' % name, name, 0, state)\n node.extend(refnodes)\n # Store the graph object so we can use it to generate the\n # dot file later\n node['graph'] = graph\n # Store the original content for use as a hash\n node['parts'] = options.get('parts', 0)\n node['content'] = \" \".join(class_names)\n return [node]",
"def setup(app) -> Dict[str, Any]:\n \"\"\"\n Sets up Sphinx extension.\n \"\"\"\n app.setup_extension(\"sphinx.ext.graphviz\")\n app.add_node(\n inheritance_diagram,\n html=(html_visit_inheritance_diagram, None),\n latex=(latex_visit_inheritance_diagram, None),\n man=(skip, None),\n texinfo=(skip, None),\n text=(skip, None),\n )\n app.add_directive(\"inheritance-diagram\", InheritanceDiagram)\n return {\n \"version\": uqbar.__version__,\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }",
"def to_text_diagram(\n self,\n *,\n use_unicode_characters: bool = True,\n transpose: bool = False,\n precision: Optional[int] = 3,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT) -> str:\n \"\"\"Returns text containing a diagram describing the circuit.\n\n Args:\n use_unicode_characters: Determines if unicode characters are\n allowed (as opposed to ascii-only diagrams).\n transpose: Arranges qubit wires vertically instead of horizontally.\n precision: Number of digits to display in text diagram\n qubit_order: Determines how qubits are ordered in the diagram.\n\n Returns:\n The text diagram.\n \"\"\"\n diagram = self.to_text_diagram_drawer(\n use_unicode_characters=use_unicode_characters,\n precision=precision,\n qubit_order=qubit_order,\n transpose=transpose)\n\n return diagram.render(\n crossing_char=(None\n if use_unicode_characters\n else ('-' if transpose else '|')),\n horizontal_spacing=1 if transpose else 3,\n use_unicode_characters=use_unicode_characters)",
"def circuit_to_latex_using_qcircuit(\n circuit: circuits.Circuit,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT) -> str:\n \"\"\"Returns a QCircuit-based latex diagram of the given circuit.\n\n Args:\n circuit: The circuit to represent in latex.\n qubit_order: Determines the order of qubit wires in the diagram.\n\n Returns:\n Latex code for the diagram.\n \"\"\"\n diagram = circuit.to_text_diagram_drawer(\n qubit_namer=qcircuit_qubit_namer,\n qubit_order=qubit_order,\n get_circuit_diagram_info=get_qcircuit_diagram_info)\n return _render(diagram)",
"def to_text_diagram_drawer(\n self,\n *,\n use_unicode_characters: bool = True,\n qubit_namer: Optional[Callable[[ops.Qid], str]] = None,\n transpose: bool = False,\n precision: Optional[int] = 3,\n qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT,\n get_circuit_diagram_info:\n Optional[Callable[[ops.Operation,\n protocols.CircuitDiagramInfoArgs],\n protocols.CircuitDiagramInfo]]=None\n ) -> TextDiagramDrawer:\n \"\"\"Returns a TextDiagramDrawer with the circuit drawn into it.\n\n Args:\n use_unicode_characters: Determines if unicode characters are\n allowed (as opposed to ascii-only diagrams).\n qubit_namer: Names qubits in diagram. Defaults to str.\n transpose: Arranges qubit wires vertically instead of horizontally.\n precision: Number of digits to use when representing numbers.\n qubit_order: Determines how qubits are ordered in the diagram.\n get_circuit_diagram_info: Gets circuit diagram info. Defaults to\n protocol with fallback.\n\n Returns:\n The TextDiagramDrawer instance.\n \"\"\"\n qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(\n self.all_qubits())\n qubit_map = {qubits[i]: i for i in range(len(qubits))}\n\n if qubit_namer is None:\n qubit_namer = lambda q: str(q) + ('' if transpose else ': ')\n diagram = TextDiagramDrawer()\n for q, i in qubit_map.items():\n diagram.write(0, i, qubit_namer(q))\n\n moment_groups = [] # type: List[Tuple[int, int]]\n for moment in self._moments:\n _draw_moment_in_diagram(moment,\n use_unicode_characters,\n qubit_map,\n diagram,\n precision,\n moment_groups,\n get_circuit_diagram_info)\n\n w = diagram.width()\n for i in qubit_map.values():\n diagram.horizontal_line(i, 0, w)\n\n if moment_groups:\n _draw_moment_groups_in_diagram(moment_groups,\n use_unicode_characters,\n diagram)\n\n if transpose:\n diagram = diagram.transpose()\n\n return diagram",
"def html_output_graph(self, node):\n \"\"\"\n Output the graph for HTML. This will insert a PNG with clickable\n image map.\n \"\"\"\n graph = node['graph']\n parts = node['parts']\n\n graph_hash = get_graph_hash(node)\n name = \"inheritance%s\" % graph_hash\n path = '_images'\n dest_path = os.path.join(setup.app.builder.outdir, path)\n if not os.path.exists(dest_path):\n os.makedirs(dest_path)\n png_path = os.path.join(dest_path, name + \".png\")\n path = setup.app.builder.imgpath\n\n # Create a mapping from fully-qualified class names to URLs.\n urls = {}\n for child in node:\n if child.get('refuri') is not None:\n urls[child['reftitle']] = child.get('refuri')\n elif child.get('refid') is not None:\n urls[child['reftitle']] = '#' + child.get('refid')\n\n # These arguments to dot will save a PNG file to disk and write\n # an HTML image map to stdout.\n image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],\n name, parts, urls)\n return ('<img src=\"%s/%s.png\" usemap=\"#%s\" class=\"inheritance\"/>%s' %\n (path, name, name, image_map))"
] |
[
0.8222299814224243,
0.8135387301445007,
0.7848462462425232,
0.7603569626808167,
0.7572928071022034,
0.7462043166160583,
0.7177525758743286,
0.707528293132782,
0.7025811672210693,
0.6949119567871094,
0.6806793808937073,
0.6785973906517029
] |
Sets up Sphinx extension.
|
def setup(app) -> Dict[str, Any]:
"""
Sets up Sphinx extension.
"""
app.setup_extension("sphinx.ext.graphviz")
app.add_node(
inheritance_diagram,
html=(html_visit_inheritance_diagram, None),
latex=(latex_visit_inheritance_diagram, None),
man=(skip, None),
texinfo=(skip, None),
text=(skip, None),
)
app.add_directive("inheritance-diagram", InheritanceDiagram)
return {
"version": uqbar.__version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
[
"def setup(app):\n \"Setup function for Sphinx Extension\"\n app.add_config_value(\"sphinx_to_github\", True, '')\n app.add_config_value(\"sphinx_to_github_verbose\", True, '')\n app.connect(\"build-finished\", sphinx_extension)",
"def setup(app):\n \"\"\"Initialize Sphinx extension.\"\"\"\n import sphinx\n from .parser import CommonMarkParser\n\n if sphinx.version_info >= (1, 8):\n app.add_source_suffix('.md', 'markdown')\n app.add_source_parser(CommonMarkParser)\n elif sphinx.version_info >= (1, 4):\n app.add_source_parser('.md', CommonMarkParser)\n\n return {'version': __version__, 'parallel_read_safe': True}",
"def setup(sphinx):\n \"\"\"Setup Sphinx object.\"\"\"\n from flask import has_app_context\n from invenio_base.factory import create_app\n PACKAGES = ['invenio_base', 'invenio.modules.accounts',\n 'invenio.modules.records', 'invenio_knowledge']\n\n if not has_app_context():\n app = create_app(PACKAGES=PACKAGES)\n ctx = app.test_request_context('/')\n ctx.push()",
"def setup(app):\n ''' Required Sphinx extension setup function. '''\n app.add_autodocumenter(ColorDocumenter)\n app.add_autodocumenter(EnumDocumenter)\n app.add_autodocumenter(PropDocumenter)\n app.add_autodocumenter(ModelDocumenter)",
"def setup(app):\n \"\"\"Initialize Sphinx extension.\"\"\"\n app.setup_extension('nbsphinx')\n app.add_source_suffix('.nblink', 'linked_jupyter_notebook')\n app.add_source_parser(LinkedNotebookParser)\n app.add_config_value('nbsphinx_link_target_root', None, rebuild='env')\n\n return {'version': __version__, 'parallel_read_safe': True}",
"def setup(app):\n ''' Required Sphinx extension setup function. '''\n app.connect('html-page-context', html_page_context)\n app.connect('build-finished', build_finished)\n app.sitemap_links = set()",
"def sphinx_extension(app, exception):\n \"Wrapped up as a Sphinx Extension\"\n if not app.builder.name in (\"html\", \"dirhtml\"):\n return\n\n if not app.config.sphinx_to_github:\n if app.config.sphinx_to_github_verbose:\n print(\"Sphinx-to-github: Disabled, doing nothing.\")\n return\n\n if exception:\n if app.config.sphinx_to_github_verbose:\n print(\"Sphinx-to-github: Exception raised in main build, doing nothing.\")\n return\n\n dir_helper = DirHelper(\n os.path.isdir,\n os.listdir,\n os.walk,\n shutil.rmtree\n )\n\n file_helper = FileSystemHelper(\n open,\n os.path.join,\n shutil.move,\n os.path.exists\n )\n\n operations_factory = OperationsFactory()\n handler_factory = HandlerFactory()\n\n layout_factory = LayoutFactory(\n operations_factory,\n handler_factory,\n file_helper,\n dir_helper,\n app.config.sphinx_to_github_verbose,\n sys.stdout,\n force=True\n )\n\n layout = layout_factory.create_layout(app.outdir)\n layout.process()",
"def setup(app):\n \"\"\"Sphinx extension entry point\"\"\"\n app.add_config_value('jsdoc_source_root', '..', 'env')\n app.add_config_value('jsdoc_output_root', 'javascript', 'env')\n app.add_config_value('jsdoc_exclude', [], 'env')\n app.connect('builder-inited', generate_docs)",
"def setup(app):\n \"\"\"Register directives.\n\n When sphinx loads the extension (= imports the extension module) it\n also executes the setup() function. Setup is the way extension\n informs Sphinx about everything that the extension enables: which\n config_values are introduced, which custom nodes/directives/roles\n and which events are defined in extension.\n\n In this case, only one new directive is created. All used nodes are\n constructed from already existing nodes in docutils.nodes package.\n\n \"\"\"\n app.add_config_value('autoprocess_process_dir', '', 'env')\n app.add_config_value('autoprocess_source_base_url', '', 'env')\n app.add_config_value('autoprocess_definitions_uri', '', 'env')\n\n app.add_directive('autoprocess', AutoProcessDirective)\n app.add_directive('autoprocesscategory', AutoProcessCategoryDirective)\n app.add_directive('autoprocesstype', AutoProcessTypesDirective)\n\n # The setup() function can return a dictionary. This is treated by\n # Sphinx as metadata of the extension:\n return {'version': '0.2'}",
"def setup(app):\n \"\"\"Allow this package to be used as Sphinx extension.\n This is also called from the top-level ``__init__.py``.\n\n :type app: sphinx.application.Sphinx\n \"\"\"\n from .patches import patch_django_for_autodoc\n\n # When running, make sure Django doesn't execute querysets\n patch_django_for_autodoc()\n\n # Generate docstrings for Django model fields\n # Register the docstring processor with sphinx\n app.connect('autodoc-process-docstring', improve_model_docstring)\n\n # influence skip rules\n app.connect(\"autodoc-skip-member\", autodoc_skip)",
"def setup(app):\n \"\"\" Set up the plugin \"\"\"\n app.add_config_value('sphinx_tabs_nowarn', False, '')\n app.add_config_value('sphinx_tabs_valid_builders', [], '')\n app.add_directive('tabs', TabsDirective)\n app.add_directive('tab', TabDirective)\n app.add_directive('group-tab', GroupTabDirective)\n app.add_directive('code-tab', CodeTabDirective)\n for path in ['sphinx_tabs/' + f for f in FILES]:\n if path.endswith('.css'):\n if 'add_css_file' in dir(app):\n app.add_css_file(path)\n else:\n app.add_stylesheet(path)\n if path.endswith('.js'):\n if 'add_script_file' in dir(app):\n app.add_script_file(path)\n else:\n app.add_javascript(path)\n app.connect('html-page-context', update_context)\n app.connect('build-finished', copy_assets)",
"def setup(sphinx):\n \"\"\"\n This will be called by sphinx.\n \"\"\"\n create_auto_documentation()\n\n # add the py3status lexer (for code blocks)\n from sphinx.highlighting import lexers\n lexers['py3status'] = Py3statusLexer()\n\n # enable screenshot directive for dynamic screenshots\n sphinx.add_directive('screenshot', ScreenshotDirective)"
] |
[
0.8509041666984558,
0.837480366230011,
0.8208704590797424,
0.8176983594894409,
0.8152626156806946,
0.8129501938819885,
0.8111793398857117,
0.8101816773414612,
0.8052058219909668,
0.8038188815116882,
0.8027165532112122,
0.8027135729789734
] |
Run the daemon and all its services
|
def run(configuration: str, level: str, target: str, short_format: bool):
"""Run the daemon and all its services"""
initialise_logging(level=level, target=target, short_format=short_format)
logger = logging.getLogger(__package__)
logger.info('COBalD %s', cobald.__about__.__version__)
logger.info(cobald.__about__.__url__)
logger.info('%s %s (%s)', platform.python_implementation(), platform.python_version(), sys.executable)
logger.debug(cobald.__file__)
logger.info('Using configuration %s', configuration)
with load(configuration):
logger.info('Starting daemon services...')
runtime.accept()
|
[
"def run(self):\n '''\n Run the master service!\n '''\n self.utils = salt.loader.utils(self.opts, proxy=self.proxy)\n if salt.utils.platform.is_windows():\n # Calculate function references since they can't be pickled.\n if self.opts['__role'] == 'master':\n self.runners = salt.loader.runner(self.opts, utils=self.utils)\n else:\n self.runners = []\n self.funcs = salt.loader.minion_mods(self.opts, utils=self.utils, proxy=self.proxy)\n\n self.engine = salt.loader.engines(self.opts,\n self.funcs,\n self.runners,\n self.utils,\n proxy=self.proxy)\n kwargs = self.config or {}\n try:\n self.engine[self.fun](**kwargs)\n except Exception as exc:\n log.critical(\n 'Engine \\'%s\\' could not be started!',\n self.fun.split('.')[0], exc_info=True\n )",
"def run(ctx, service, args, show_args, daemon, editable, integration):\n \"\"\"Load and run a specific service.\"\"\"\n home = ctx.obj[\"HOME\"]\n service_path = plugin_utils.get_plugin_path(home, SERVICES, service, editable)\n service_log_path = os.path.join(service_path, LOGS_DIR)\n\n logger.debug(\"running command %s (%s)\", ctx.command.name, ctx.params,\n extra={\"command\": ctx.command.name, \"params\": ctx.params})\n\n logger.debug(\"loading {} ({})\".format(service, service_path))\n service = register_service(service_path)\n\n if show_args:\n return plugin_utils.print_plugin_args(service_path)\n\n # get our service class instance\n service_module = get_service_module(service_path)\n service_args = plugin_utils.parse_plugin_args(args, config_utils.get_config_parameters(service_path))\n service_obj = service_module.service_class(alert_types=service.alert_types, service_args=service_args)\n\n if not os.path.exists(service_log_path):\n os.mkdir(service_log_path)\n\n # prepare runner\n if daemon:\n runner = myRunner(service_obj,\n pidfile=service_path + \".pid\",\n stdout=open(os.path.join(service_log_path, STDOUTLOG), \"ab\"),\n stderr=open(os.path.join(service_log_path, STDERRLOG), \"ab\"))\n\n files_preserve = []\n for handler in logging.getLogger().handlers:\n if hasattr(handler, \"stream\"):\n if hasattr(handler.stream, \"fileno\"):\n files_preserve.append(handler.stream.fileno())\n if hasattr(handler, \"socket\"):\n files_preserve.append(handler.socket.fileno())\n\n runner.daemon_context.files_preserve = files_preserve\n runner.daemon_context.signal_map.update({\n signal.SIGTERM: service_obj._on_server_shutdown,\n signal.SIGINT: service_obj._on_server_shutdown,\n })\n logger.debug(\"daemon_context\", extra={\"daemon_context\": vars(runner.daemon_context)})\n\n for integration_name in integration:\n integration_path = plugin_utils.get_plugin_path(home, INTEGRATIONS, integration_name, editable)\n configure_integration(integration_path)\n\n click.secho(\"[+] Launching {} {}\".format(service.name, \"in daemon mode\" if daemon else \"\"))\n try:\n # save service_args for external reference (see test)\n with open(os.path.join(service_path, ARGS_JSON), \"w\") as f:\n f.write(json.dumps(service_args))\n runner._start() if daemon else service_obj.run()\n except KeyboardInterrupt:\n service_obj._on_server_shutdown()\n\n click.secho(\"[*] {} has stopped\".format(service.name))",
"def run(cls):\n \"\"\" Fires up the event loop and starts serving attached services\n \"\"\"\n if cls._tcp_service or cls._http_service or cls._http_views or cls._tcp_views:\n cls._set_host_id()\n cls._setup_logging()\n cls._set_process_name()\n cls._set_signal_handlers()\n cls._start_pubsub()\n cls._start_server()\n else:\n cls._logger.error('No services to host')",
"def run(cls):\n \"\"\" Fires up the event loop and starts serving attached services\n \"\"\"\n if not all(v is None for v in cls._services.values()):\n cls._set_host_id()\n cls._setup_logging()\n cls._set_process_name()\n cls._set_signal_handlers()\n cls._start_server()\n else:\n cls._logger.error('No services to host')",
"public function run()\n {\n\n try {\n // register shutdown handler\n register_shutdown_function($this->getDefaultShutdownMethod());\n\n // bootstrap the daemon\n $this->bootstrap();\n\n // synchronize the application instance and register the class loaders\n $application = $this->application;\n\n // mark the daemon as successfully shutdown\n $this->synchronized(function ($self) {\n $self->state = EnumState::get(EnumState::RUNNING);\n }, $this);\n\n // create local instances of the storages\n $messages = $this->messages;\n $priorityKey = $this->priorityKey;\n $jobsToExecute = $this->jobsToExecute;\n\n // load the maximum number of jobs to process in parallel\n $maximumJobsToProcess = $this->getManagerSettings()->getMaximumJobsToProcess();\n\n // initialize the arrays for the message states and the jobs executing\n $jobsExecuting = array();\n $messagesFailed = array();\n $retryCounter = array();\n $callbacksToExecute = array();\n\n // keep the daemon running\n while ($this->keepRunning()) {\n // iterate over all job wrappers\n foreach ($jobsToExecute as $jobWrapper) {\n try {\n // load the message\n $message = $messages[$jobWrapper->jobId];\n\n // check if we've a message found\n if ($message instanceof MessageInterface) {\n // check the message state\n switch ($message->getState()->getState()) {\n\n // message is active and ready to be processed\n case StateActive::KEY:\n\n // set the new state now\n $message->setState(StateToProcess::get());\n\n break;\n\n // message is paused or in progress\n case StatePaused::KEY:\n\n // invoke the callbacks for the state\n if ($message->hasCallbacks($message->getState())) {\n $callbacksToExecute[] = new Callback(clone $message, $application);\n }\n\n // log a message that we've a message that has been paused\n \\info(sprintf('Message %s has been paused', $message->getMessageId()));\n\n break;\n\n case StateInProgress::KEY:\n\n // query whether or not the job is still available\n if (isset($jobsExecuting[$message->getMessageId()])) {\n // make sure the job has been finished\n if ($jobsExecuting[$message->getMessageId()] instanceof JobInterface && $jobsExecuting[$message->getMessageId()]->isFinished()) {\n // log a message that the job is still in progress\n \\info(sprintf('Job %s has been finished', $message->getMessageId()));\n\n // set the new state now\n $message->setState($jobsExecuting[$message->getMessageId()]->getMessage()->getState());\n\n } else {\n // log a message that the job is still in progress\n \\info(sprintf('Job %s is still in progress', $message->getMessageId()));\n }\n\n } else {\n // log a message that the job is still in progress\n \\critical(sprintf('Message %s has been deleted, but should still be there', $message->getMessageId()));\n }\n\n break;\n\n // message failed\n case StateFailed::KEY:\n\n // remove the old job from the queue\n unset($jobsExecuting[$message->getMessageId()]);\n\n // query whether or not the message has to be processed again\n if (isset($messagesFailed[$message->getMessageId()]) && $message->getRetryCounter() > 0) {\n // query whether or not the message has to be processed now\n if ($messagesFailed[$message->getMessageId()] < time() && $retryCounter[$message->getMessageId()] < $message->getRetryCounter()) {\n // retry to process the message\n $message->setState(StateToProcess::get());\n\n // update the execution time and raise the retry counter\n $messagesFailed[$message->getMessageId()] = time() + $message->getRetryTimeout($retryCounter[$message->getMessageId()]);\n $retryCounter[$message->getMessageId()]++;\n\n } elseif ($messagesFailed[$message->getMessageId()] < time() && $retryCounter[$message->getMessageId()] === $message->getRetryCounter()) {\n // log a message that we've a message with a unknown state\n \\critical(sprintf('Message %s finally failed after %d retries', $message->getMessageId(), $retryCounter[$message->getMessageId()]));\n\n // stop executing the job because we've reached the maximum number of retries\n unset($jobsToExecute[$messageId = $message->getMessageId()]);\n unset($messagesFailed[$messageId]);\n unset($retryCounter[$messageId]);\n\n // invoke the callbacks for the state\n if ($message->hasCallbacks($message->getState())) {\n $callbacksToExecute[] = new Callback(clone $message, $application);\n }\n\n } else {\n // wait for the next try here\n }\n\n } elseif (!isset($messagesFailed[$message->getMessageId()]) && $message->getRetryCounter() > 0) {\n // first retry, so we've to initialize the next execution time and the retry counter\n $retryCounter[$message->getMessageId()] = 0;\n $messagesFailed[$message->getMessageId()] = time() + $message->getRetryTimeout($retryCounter[$message->getMessageId()]);\n\n } else {\n // log a message that we've a message with a unknown state\n \\critical(sprintf('Message %s failed with NO retries', $message->getMessageId()));\n\n // stop executing the job because we've reached the maximum number of retries\n unset($jobsToExecute[$messageId = $message->getMessageId()]);\n\n // invoke the callbacks for the state\n if ($message->hasCallbacks($message->getState())) {\n $callbacksToExecute[] = new Callback(clone $message, $application);\n }\n }\n\n break;\n\n case StateToProcess::KEY:\n\n // count messages in queue\n $inQueue = sizeof($jobsExecuting);\n\n // we only process 200 jobs in parallel\n if ($inQueue < $maximumJobsToProcess) {\n // set the new message state now\n $message->setState(StateInProgress::get());\n\n // start the job and add it to the internal array\n $jobsExecuting[$message->getMessageId()] = new Job(clone $message, $application);\n\n } else {\n // log a message that queue is actually full\n \\info(sprintf('Job queue full - (%d jobs/%d msg wait)', $inQueue, sizeof($messages)));\n\n // if the job queue is full, restart iteration to remove processed jobs from queue first\n continue 2;\n }\n\n break;\n\n // message processing has been successfully processed\n case StateProcessed::KEY:\n\n // invoke the callbacks for the state\n if ($message->hasCallbacks($message->getState())) {\n $callbacksToExecute[] = new Callback(clone $message, $application);\n }\n\n // remove the job from the queue with jobs that has to be executed\n unset($jobsToExecute[$messageId = $message->getMessageId()]);\n\n // also remove the job + the message from the queue\n unset($jobsExecuting[$messageId]);\n unset($messages[$messageId]);\n\n break;\n\n // message is in an unknown state -> this is weired and should never happen!\n case StateUnknown::KEY:\n\n // log a message that we've a message with a unknown state\n \\critical(sprintf('Message %s has state %s', $message->getMessageId(), $message->getState()));\n\n // set new state now\n $message->setState(StateFailed::get());\n\n break;\n\n // we don't know the message state -> this is weired and should never happen!\n default:\n\n // set the failed message state\n $message->setState(StateFailed::get());\n\n // log a message that we've a message with an invalid state\n \\critical(sprintf('Message %s has an invalid state', $message->getMessageId()));\n\n break;\n }\n }\n\n // add the message back to the stack (because we've a copy here)\n if (isset($messages[$message->getMessageId()])) {\n $messages[$jobWrapper->jobId] = $message;\n }\n\n // catch all exceptions\n } catch (\\Exception $e) {\n $application->getInitialContext()->getSystemLogger()->critical($e->__toString());\n }\n\n // reduce CPU load depending on queue priority\n $this->iterate($this->getQueueTimeout());\n }\n\n // reduce CPU load after each iteration\n $this->iterate($this->getDefaultTimeout());\n\n // profile the size of the session pool\n if ($this->profileLogger) {\n $this->profileLogger->debug(\n sprintf(\n 'Processed queue worker with priority %s, size of queue size is: %d',\n $priorityKey,\n sizeof($jobsToExecute)\n )\n );\n }\n }\n\n // clean up the instances and free memory\n $this->cleanUp();\n\n // mark the daemon as successfully shutdown\n $this->synchronized(function ($self) {\n $self->state = EnumState::get(EnumState::SHUTDOWN);\n }, $this);\n\n } catch (\\Exception $e) {\n \\error($e->__toString());\n }\n }",
"def start(self):\n \"\"\"\n Start the daemon\n \"\"\"\n # Check for a pidfile to see if the daemon already runs\n pid = None\n if os.path.exists(self.pidfile):\n try:\n pf = file(self.pidfile,'r')\n pid = int(pf.read().strip())\n pf.close()\n except IOError:\n pid = None\n\n if pid:\n message = \"pidfile %s already exist. Daemon already running?\\n\"\n sys.stderr.write(message % self.pidfile)\n sys.exit(1)\n\n # Start the daemon\n self.daemonize()\n self.run()",
"def run(self):\n \"\"\"Main service entrypoint. Called via Thread.start() via PantsDaemon.run().\"\"\"\n while not self._state.is_terminating:\n self._maybe_garbage_collect()\n self._maybe_extend_lease()\n # Waiting with a timeout in maybe_pause has the effect of waiting until:\n # 1) we are paused and then resumed\n # 2) we are terminated (which will break the loop)\n # 3) the timeout is reached, which will cause us to wake up and check gc/leases\n self._state.maybe_pause(timeout=10)",
"async function () {\n const oThis = this\n , servicesList = [];\n\n var cmd = \"ps aux | grep dynamo | grep -v grep | tr -s ' ' | cut -d ' ' -f2\";\n let processId = shell.exec(cmd).stdout;\n\n if (processId == '') {\n // Start Dynamo DB in openST env\n let startDynamo = new StartDynamo();\n await startDynamo.perform();\n }\n\n // Start Value Chain\n logger.step(\"** Start value chain\");\n var cmd = \"sh \" + setupHelper.binFolderAbsolutePath() + \"/run-value.sh\";\n servicesList.push(cmd);\n oThis._asyncCommand(cmd);\n\n // Start Utility Chain\n logger.step(\"** Start utility chain\");\n var cmd = \"sh \" + setupHelper.binFolderAbsolutePath() + \"/run-utility.sh\";\n servicesList.push(cmd);\n oThis._asyncCommand(cmd);\n\n // Wait for 5 seconds for geth to come up\n const sleep = function(ms) {\n return new Promise(function(resolve) {setTimeout(resolve, ms)});\n };\n await sleep(5000);\n\n // Check geths are up and running\n logger.step(\"** Check chains are up and responding\");\n const statusObj = new platformStatus()\n , servicesResponse = await statusObj.perform();\n if (servicesResponse.isFailure()) {\n logger.error(\"* Error \", servicesResponse);\n process.exit(1);\n } else {\n logger.info(\"* Value Chain:\", servicesResponse.data.chain.value, \"Utility Chain:\", servicesResponse.data.chain.utility);\n }\n\n // Start intercom processes in openST env\n logger.step(\"** Start stake and mint inter-communication process\");\n var cmd = \"sh \" + setupHelper.binFolderAbsolutePath() + \"/run-stake_and_mint.sh\";\n servicesList.push(cmd);\n oThis._asyncCommand(cmd);\n\n logger.step(\"** Start redeem and unstake inter-communication process\");\n var cmd = \"sh \" + setupHelper.binFolderAbsolutePath() + \"/run-redeem_and_unstake.sh\";\n servicesList.push(cmd);\n oThis._asyncCommand(cmd);\n\n logger.step(\"** Start register branded token inter-communication process\");\n var cmd = \"sh \" + setupHelper.binFolderAbsolutePath() + \"/run-register_branded_token.sh\";\n servicesList.push(cmd);\n oThis._asyncCommand(cmd);\n\n // Start intercom processes in OST env\n logger.step(\"** Start stake and mint processor\");\n var cmd = \"sh \" + setupHelper.binFolderAbsolutePath() + \"/run-stake_and_mint_processor.sh\";\n servicesList.push(cmd);\n oThis._asyncCommand(cmd);\n\n logger.step(\"** Start redeem and unstake processor\");\n var cmd = \"sh \" + setupHelper.binFolderAbsolutePath() + \"/run-redeem_and_unstake_processor.sh\";\n servicesList.push(cmd);\n oThis._asyncCommand(cmd);\n\n logger.win(\"\\n** Congratulation! All services are up and running. \\n\" +\n \"NOTE: We will keep monitoring the services, and notify you if any service stops.\");\n\n // Check all services are running\n oThis._uptime(servicesList);\n }",
"public function run()\n {\n\n try {\n // register shutdown handler\n register_shutdown_function($this->getDefaultShutdownMethod());\n\n // bootstrap the daemon\n $this->bootstrap();\n\n // invoke the execute method\n try {\n $this->execute();\n } catch (\\Exception $e) {\n $this->log(LogLevel::ERROR, $e->__toString());\n }\n\n // clean up the instances and free memory\n $this->cleanUp();\n\n } catch (\\Exception $e) {\n $this->log(LogLevel::ERROR, $e->__toString());\n }\n }",
"def start(self):\n \"\"\"\n Start the daemon\n \"\"\"\n # Check for a pidfile to see if the daemon already runs\n try:\n pf = file(self.pidfile, 'r')\n pid = int(pf.read().strip())\n pf.close()\n os.kill(pid, 0)\n except IOError:\n pid = None\n except OSError:\n pid = None\n\n if pid:\n message = \"pidfile %s already exist. Daemon already running?\\n\"\n sys.stderr.write(message % self.pidfile)\n sys.exit(1)\n\n # Start the daemon\n self._daemonize()",
"def start(self):\n \"\"\"Start the daemon.\"\"\"\n if self.worker is None:\n raise DaemonError('No worker is defined for daemon')\n\n if os.environ.get('DAEMONOCLE_RELOAD'):\n # If this is actually a reload, we need to wait for the\n # existing daemon to exit first\n self._emit_message('Reloading {prog} ... '.format(prog=self.prog))\n # Orhpan this process so the parent can exit\n self._orphan_this_process(wait_for_parent=True)\n pid = self._read_pidfile()\n if (pid is not None and\n self._pid_is_alive(pid, timeout=self.stop_timeout)):\n # The process didn't exit for some reason\n self._emit_failed()\n message = ('Previous process (PID {pid}) did NOT '\n 'exit during reload').format(pid=pid)\n self._emit_error(message)\n self._shutdown(message, 1)\n\n # Check to see if the daemon is already running\n pid = self._read_pidfile()\n if pid is not None:\n # I don't think this should not be a fatal error\n self._emit_warning('{prog} already running with PID {pid}'.format(\n prog=self.prog, pid=pid))\n return\n\n if not self.detach and not os.environ.get('DAEMONOCLE_RELOAD'):\n # This keeps the original parent process open so that we\n # maintain control of the tty\n self._fork_and_supervise_child()\n\n if not os.environ.get('DAEMONOCLE_RELOAD'):\n # A custom message is printed for reloading\n self._emit_message('Starting {prog} ... '.format(prog=self.prog))\n\n self._setup_environment()\n\n if self.detach:\n self._detach_process()\n else:\n self._emit_ok()\n\n if self.pidfile is not None:\n self._write_pidfile()\n\n # Setup signal handlers\n signal.signal(signal.SIGINT, self._handle_terminate)\n signal.signal(signal.SIGQUIT, self._handle_terminate)\n signal.signal(signal.SIGTERM, self._handle_terminate)\n\n self._run()",
"def _run(self):\n \"\"\"Discovers the health of a service.\n\n Runs until it is being killed from main program and is responsible to\n put an item into the queue based on the status of the health check.\n The status of service is consider UP after a number of consecutive\n successful health checks, in that case it asks main program to add the\n IP prefix associated with service to BIRD configuration, otherwise ask\n for a removal.\n Rise and fail options prevent unnecessary configuration changes when\n check is flapping.\n \"\"\"\n up_cnt = 0\n down_cnt = 0\n # The current established state of the service check, it can be\n # either UP or DOWN but only after a number of consecutive successful\n # or unsuccessful health checks.\n check_state = 'Unknown'\n\n for key, value in self.config.items():\n self.log.debug(\"%s=%s:%s\", key, value, type(value))\n\n # Service check will abort if it is disabled.\n if self._check_disabled():\n return\n\n if self.splay_startup is not None:\n sleep_time = float(\"%.3f\" % random.uniform(0, self.splay_startup))\n self.log.info(\"delaying startup for %ssecs\", sleep_time)\n time.sleep(sleep_time)\n\n interval = self.config['check_interval']\n start_offset = time.time() % interval\n # Go in a loop until we are told to stop\n while True:\n timestamp = time.time()\n if not self._ip_assigned():\n up_cnt = 0\n self.extra['status'] = 'down'\n self.log.warning(\"status DOWN because %s isn't assigned to \"\n \"loopback interface.\",\n self.ip_with_prefixlen,\n extra=self.extra)\n if check_state != 'DOWN':\n check_state = 'DOWN'\n self.log.info(\"adding %s in the queue\",\n self.ip_with_prefixlen,\n extra=self.extra)\n self.action.put(self.del_operation)\n elif self._run_check():\n if up_cnt == (self.config['check_rise'] - 1):\n self.extra['status'] = 'up'\n self.log.info(\"status UP\", extra=self.extra)\n # Service exceeded all consecutive checks. Set its state\n # accordingly and put an item in queue. But do it only if\n # previous state was different, to prevent unnecessary bird\n # reloads when a service flaps between states.\n if check_state != 'UP':\n check_state = 'UP'\n self.log.info(\"adding %s in the queue\",\n self.ip_with_prefixlen,\n extra=self.extra)\n self.action.put(self.add_operation)\n elif up_cnt < self.config['check_rise']:\n up_cnt += 1\n self.log.info(\"going up %s\", up_cnt, extra=self.extra)\n else:\n self.log.error(\"up_cnt is higher %s, it's a BUG!\",\n up_cnt,\n extra=self.extra)\n down_cnt = 0\n else:\n if down_cnt == (self.config['check_fail'] - 1):\n self.extra['status'] = 'down'\n self.log.info(\"status DOWN\", extra=self.extra)\n # Service exceeded all consecutive checks.\n # Set its state accordingly and put an item in queue.\n # But do it only if previous state was different, to\n # prevent unnecessary bird reloads when a service flaps\n # between states\n if check_state != 'DOWN':\n check_state = 'DOWN'\n self.log.info(\"adding %s in the queue\",\n self.ip_with_prefixlen,\n extra=self.extra)\n self.action.put(self.del_operation)\n elif down_cnt < self.config['check_fail']:\n down_cnt += 1\n self.log.info(\"going down %s\", down_cnt, extra=self.extra)\n else:\n self.log.error(\"up_cnt is higher %s, it's a BUG!\",\n up_cnt,\n extra=self.extra)\n up_cnt = 0\n\n self.log.info(\"wall clock time %.3fms\",\n (time.time() - timestamp) * 1000,\n extra=self.extra)\n\n # calculate sleep time\n sleep = start_offset - time.time() % interval\n if sleep < 0:\n sleep += interval\n self.log.debug(\"sleeping for %.3fsecs\", sleep, extra=self.extra)\n time.sleep(sleep)"
] |
[
0.753221333026886,
0.7473872303962708,
0.7463153600692749,
0.7399263978004456,
0.7397425174713135,
0.7396121025085449,
0.7381593585014343,
0.728173553943634,
0.7244118452072144,
0.7239984273910522,
0.7239532470703125,
0.7209172248840332
] |
Run the daemon from a command line interface
|
def cli_run():
"""Run the daemon from a command line interface"""
options = CLI.parse_args()
run(options.CONFIGURATION, options.log_level, options.log_target, options.log_journal)
|
[
"def _run():\n \"\"\"Entry point for package and cli uses\"\"\"\n\n args = parse_args()\n\n # parse custom parameters\n custom_meta = None\n if args.custom_meta:\n print \"Adding custom parameters:\"\n custom_meta = {}\n try:\n for item in args.custom_meta.split(','):\n key, value = item.split(':')\n custom_meta[key] = value\n print 'key: %s, value: %s' % (key, value)\n except Exception as e:\n sys.stderr.write(\"ERROR: Can not parse custom meta tags! %s\\n\" % (str(e)))\n\n # we need to store some persistent info, so check if a config file\n # exists (default location is ~/.centinel/config.ini). If the file\n # does not exist, then create a new one at run time\n configuration = centinel.config.Configuration()\n if args.config:\n configuration.parse_config(args.config)\n else:\n # if the file does not exist, then the default config file\n # will be used\n new_configuration = None\n if os.path.exists(DEFAULT_CONFIG_FILE):\n configuration.parse_config(DEFAULT_CONFIG_FILE)\n else:\n print 'Configuration file does not exist. Creating a new one.'\n new_configuration = centinel.config.Configuration()\n\n if not ('version' in configuration.params and\n configuration.params['version']['version'] == centinel.__version__):\n if not args.update_config:\n print ('WARNING: configuration file is from '\n 'a different version (%s) of '\n 'Centinel. Run with --update-config to update '\n 'it.' % (configuration.params['version']['version']))\n else:\n new_configuration = centinel.config.Configuration()\n backup_path = DEFAULT_CONFIG_FILE + \".old\"\n new_configuration.update(configuration, backup_path)\n\n if new_configuration is not None:\n configuration = new_configuration\n configuration.write_out_config(DEFAULT_CONFIG_FILE)\n print 'New configuration written to %s' % (DEFAULT_CONFIG_FILE)\n if args.update_config:\n sys.exit(0)\n\n if args.verbose:\n if 'log' not in configuration.params:\n configuration.params['log'] = dict()\n configuration.params['log']['log_level'] = logging.DEBUG\n\n # add custom meta values from CLI\n if custom_meta is not None:\n if 'custom_meta' in configuration.params:\n configuration.params['custom_meta'].update(custom_meta)\n else:\n configuration.params['custom_meta'] = custom_meta\n\n centinel.conf = configuration.params\n client = centinel.client.Client(configuration.params)\n client.setup_logging()\n # disable cert verification if the flag is set\n if args.no_verify:\n configuration.params['server']['verify'] = False\n\n user = centinel.backend.User(configuration.params)\n # Note: because we have mutually exclusive arguments, we don't\n # have to worry about multiple arguments being called\n if args.sync:\n centinel.backend.sync(configuration.params)\n elif args.consent:\n user.informed_consent()\n elif args.daemonize:\n # if we don't have a valid binary location, then exit\n if not os.path.exists(args.binary):\n print \"Error: no binary found to daemonize\"\n exit(1)\n centinel.daemonize.daemonize(args.auto_update, args.binary,\n args.user)\n else:\n client.run()",
"def run(interface, config, logfile, ros_args):\n \"\"\"\n Start a pyros node.\n :param interface: the interface implementation (ROS, Mock, ZMP, etc.)\n :param config: the config file path, absolute, or relative to working directory\n :param logfile: the logfile path, absolute, or relative to working directory\n :param ros_args: the ros arguments (useful to absorb additional args when launched with roslaunch)\n \"\"\"\n logging.info(\n 'pyros started with : interface {interface} config {config} logfile {logfile} ros_args {ros_args}'.format(\n interface=interface, config=config, logfile=logfile, ros_args=ros_args))\n\n if interface == 'ros':\n node_proc = pyros_rosinterface_launch(node_name='pyros_rosinterface', pyros_config=config, ros_argv=ros_args)\n else:\n node_proc = None # NOT IMPLEMENTED\n\n # node_proc.daemon = True # we do NOT want a daemon(would stop when this main process exits...)\n client_conn = node_proc.start()",
"def run():\n \"\"\"\n CLI entry point. Parses args and starts the gevent-socketio server.\n \"\"\"\n settings.parse_args()\n pid_name = \"gnotty-%s-%s.pid\" % (settings.HTTP_HOST, settings.HTTP_PORT)\n pid_file = settings.PID_FILE or os.path.join(gettempdir(), pid_name)\n if settings.KILL:\n if kill(pid_file):\n print \"Daemon killed\"\n else:\n print \"Could not kill any daemons\"\n return\n elif kill(pid_file):\n print \"Running daemon killed\"\n if settings.DAEMON:\n daemonize(pid_file)\n serve_forever()",
"def main(argv=None, loop=SharedLoop):\n \"\"\"Serve access to a virtual IOTile device using a virtual iotile interface.\"\"\"\n\n if argv is None:\n argv = sys.argv[1:]\n\n list_parser = argparse.ArgumentParser(add_help=False)\n list_parser.add_argument('-l', '--list', action='store_true', help=\"List all known installed interfaces and devices and then exit\")\n list_parser.add_argument('-v', '--verbose', action=\"count\", default=0, help=\"Increase logging level (goes error, warn, info, debug)\")\n\n parser = argparse.ArgumentParser(description=\"Serve acess to a virtual IOTile device using a virtual IOTile interface\")\n\n parser.add_argument('interface', help=\"The name of the virtual device interface to use\")\n parser.add_argument('device', help=\"The name of the virtual device to create\")\n parser.add_argument('-c', '--config', help=\"An optional JSON config file with arguments for the interface and device\")\n parser.add_argument('-l', '--list', action='store_true', help=\"List all known installed interfaces and devices and then exit\")\n parser.add_argument('-n', '--scenario', help=\"Load a test scenario from the given file\")\n parser.add_argument('-s', '--state', help=\"Load a given state into the device before starting to serve it. Only works with emulated devices.\")\n parser.add_argument('-d', '--dump', help=\"Dump the device's state when we exit the program. Only works with emulated devices.\")\n parser.add_argument('-t', '--track', help=\"Track all changes to the device's state. Only works with emulated devices.\")\n parser.add_argument('-v', '--verbose', action=\"count\", default=0, help=\"Increase logging level (goes error, warn, info, debug)\")\n\n args, _rest = list_parser.parse_known_args(argv)\n\n if args.list:\n configure_logging(args.verbose)\n\n reg = ComponentRegistry()\n print(\"Installed Device Servers:\")\n for name, _iface in reg.load_extensions('iotile.device_server', class_filter=AbstractDeviceServer):\n print('- {}'.format(name))\n\n print(\"\\nInstalled Virtual Devices:\")\n for name, dev in reg.load_extensions('iotile.virtual_device', class_filter=VirtualIOTileDevice,\n product_name=\"virtual_device\"):\n print('- {}: {}'.format(name, one_line_desc(dev)))\n\n return 0\n\n args = parser.parse_args(argv)\n\n configure_logging(args.verbose)\n\n config = {}\n if args.config is not None:\n with open(args.config, \"r\") as conf_file:\n config = json.load(conf_file)\n\n started = False\n device = None\n stop_immediately = args.interface == 'null'\n try:\n server = instantiate_interface(args.interface, config, loop)\n device = instantiate_device(args.device, config, loop)\n\n if args.state is not None:\n print(\"Loading device state from file %s\" % args.state)\n device.load_state(args.state)\n\n if args.scenario is not None:\n print(\"Loading scenario from file %s\" % args.scenario)\n\n with open(args.scenario, \"r\") as infile:\n scenario = json.load(infile)\n\n # load_metascenario expects a list of scenarios even when there is only one\n if isinstance(scenario, dict):\n scenario = [scenario]\n\n device.load_metascenario(scenario)\n\n if args.track is not None:\n print(\"Tracking all state changes to device\")\n device.state_history.enable()\n\n adapter = VirtualDeviceAdapter(devices=[device], loop=loop)\n server.adapter = adapter\n\n loop.run_coroutine(adapter.start())\n\n try:\n loop.run_coroutine(server.start())\n except:\n loop.run_coroutine(adapter.stop())\n adapter = None\n raise\n\n started = True\n\n print(\"Starting to serve virtual IOTile device\")\n\n if stop_immediately:\n return 0\n\n # We need to periodically process events that are queued up in the interface\n while True:\n time.sleep(0.5)\n\n except KeyboardInterrupt:\n print(\"Break received, cleanly exiting...\")\n finally:\n if args.dump is not None and device is not None:\n print(\"Dumping final device state to %s\" % args.dump)\n device.save_state(args.dump)\n\n if started:\n loop.run_coroutine(server.stop())\n loop.run_coroutine(adapter.stop())\n\n if args.track is not None and device is not None:\n print(\"Saving state history to file %s\" % args.track)\n device.state_history.dump(args.track)\n\n return 0",
"def start(self, *args, **kw):\n \"\"\"Start the daemon.\"\"\"\n pid = None\n if os.path.exists(self.pidfile):\n with open(self.pidfile, 'r') as fp:\n pid = int(fp.read().strip())\n\n if pid:\n msg = 'pidfile (%s) exists. Daemon already running?\\n'\n sys.stderr.write(msg % self.pidfile)\n sys.exit(1)\n\n self.daemonize()\n self.run(*args, **kw)",
"def main(args=None):\n \"\"\"Call the CLI interface and wait for the result.\"\"\"\n retcode = 0\n try:\n ci = CliInterface()\n args = ci.parser.parse_args()\n result = args.func(args)\n if result is not None:\n print(result)\n retcode = 0\n except Exception:\n retcode = 1\n traceback.print_exc()\n sys.exit(retcode)",
"def run(self, check_interval=300):\n \"\"\" Run the daemon\n\n :type check_interval: int\n :param check_interval: Delay in seconds between checks\n \"\"\"\n while True:\n # Read configuration from the config file if present, else fall\n # back to command line options\n if args.config:\n config = config_file_parser.get_configuration(args.config)\n access_key_id = config['access-key-id']\n secret_access_key = config['secret-access-key']\n region = config['region']\n else:\n access_key_id = args.access_key_id\n secret_access_key = args.secret_access_key\n region = args.region\n\n # Connect to AWS\n connection = connection_manager.connect_to_ec2(\n region, access_key_id, secret_access_key)\n\n snapshot_manager.run(connection)\n\n logger.info('Sleeping {} seconds until next check'.format(\n check_interval))\n time.sleep(check_interval)",
"def cli():\n \"\"\"Run the command line interface.\"\"\"\n args = docopt.docopt(__doc__, version=__VERSION__)\n secure = args['--secure']\n numberofwords = int(args['<numberofwords>'])\n\n dictpath = args['--dict']\n if dictpath is not None:\n dictfile = open(dictpath)\n else:\n dictfile = load_stream('words.txt')\n with dictfile:\n wordlist = read_wordlist(dictfile)\n\n words = generate_words(numberofwords, wordlist, secure=secure)\n print(' '.join(words))",
"def execute\n cli_arguments = Telegram::CLIArguments.new(@config)\n command = \"'#{@config.daemon}' #{cli_arguments.to_s}\"\n @stdout = IO.popen(command, 'a+')\n initialize_stdout_reading\n end",
"def main_cli():\n \"\"\"CLI minimal interface.\"\"\"\n # Get params\n args = _cli_argument_parser()\n delta_secs = args.delay\n i2cbus = args.bus\n i2c_address = args.address\n sensor_key = args.sensor\n sensor_params = args.params\n params = {}\n if sensor_params:\n def _parse_param(str_param):\n key, value = str_param.split('=')\n try:\n value = int(value)\n except ValueError:\n pass\n return {key.strip(): value}\n\n [params.update(_parse_param(sp)) for sp in sensor_params]\n\n if sensor_key:\n from time import sleep\n # Bus init\n try:\n # noinspection PyUnresolvedReferences\n import smbus\n bus_handler = smbus.SMBus(i2cbus)\n except ImportError as exc:\n print(exc, \"\\n\", \"Please install smbus-cffi before.\")\n sys.exit(-1)\n\n # Sensor selection\n try:\n sensor_handler, i2c_default_address = SENSORS[sensor_key]\n except KeyError:\n print(\"'%s' is not recognized as an implemented i2c sensor.\"\n % sensor_key)\n sys.exit(-1)\n\n if i2c_address:\n i2c_address = hex(int(i2c_address, 0))\n else:\n i2c_address = i2c_default_address\n\n # Sensor init\n sensor = sensor_handler(bus_handler, i2c_address, **params)\n\n # Infinite loop\n try:\n while True:\n sensor.update()\n if not sensor.sample_ok:\n print(\"An error has occured.\")\n break\n print(sensor.current_state_str)\n sleep(delta_secs)\n except KeyboardInterrupt:\n print(\"Bye!\")\n else:\n # Run detection mode\n from subprocess import check_output\n cmd = '/usr/sbin/i2cdetect -y {}'.format(i2cbus)\n try:\n output = check_output(cmd.split())\n print(\"Running i2cdetect utility in i2c bus {}:\\n\"\n \"The command '{}' has returned:\\n{}\"\n .format(i2cbus, cmd, output.decode()))\n except FileNotFoundError:\n print(\"Please install i2cdetect before.\")\n sys.exit(-1)\n\n # Parse output\n addresses = ['0x' + l for line in output.decode().splitlines()[1:]\n for l in line.split()[1:] if l != '--']\n if addresses:\n print(\"{} sensors detected in {}\"\n .format(len(addresses), ', '.join(addresses)))\n else:\n print(\"No i2c sensors detected.\")",
"def mainRun():\n '''\n This is the primary function for external typical users to run when the Command Line Interface is used\n '''\n #start up the logger\n initLogger()\n \n #attempt to parse the arguments\n p = ap.ArgumentParser(description=util.DESC, formatter_class=ap.RawTextHelpFormatter)\n \n #version data\n p.add_argument('-V', '--version', action='version', version='%(prog)s' + \\\n ' %s in MSBWT %s' % (util.VERSION, util.PKG_VERSION))\n \n #TODO: do we want subparsers groups by type or sorted by name? it's type currently\n \n sp = p.add_subparsers(dest='subparserID')\n p2 = sp.add_parser('cffq', help='create a MSBWT from FASTQ files (pp + cfpp)')\n p2.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')\n p2.add_argument('-u', '--uniform', dest='areUniform', action='store_true', help='the input sequences have uniform length', default=False)\n p2.add_argument('-c', '--compressed', dest='buildCompressed', action='store_true', help='build the RLE BWT (faster, less disk I/O)', default=False)\n p2.add_argument('outBwtDir', type=util.newDirectory, help='the output MSBWT directory')\n p2.add_argument('inputFastqs', nargs='+', type=util.readableFastqFile, help='the input FASTQ files')\n \n p7 = sp.add_parser('pp', help='pre-process FASTQ files before BWT creation')\n p7.add_argument('-u', '--uniform', dest='areUniform', action='store_true', help='the input sequences have uniform length', default=False)\n p7.add_argument('outBwtDir', type=util.newDirectory, help='the output MSBWT directory')\n p7.add_argument('inputFastqs', nargs='+', type=util.readableFastqFile, help='the input FASTQ files')\n \n p3 = sp.add_parser('cfpp', help='create a MSBWT from pre-processed sequences and offsets')\n p3.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')\n p3.add_argument('-u', '--uniform', dest='areUniform', action='store_true', help='the input sequences have uniform length', default=False)\n p3.add_argument('-c', '--compressed', dest='buildCompressed', action='store_true', help='build the RLE BWT (faster, less disk I/O)', default=False)\n p3.add_argument('bwtDir', type=util.existingDirectory, help='the MSBWT directory to process')\n \n p4 = sp.add_parser('merge', help='merge many MSBWTs into a single MSBWT')\n p4.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')\n p4.add_argument('outBwtDir', type=util.newDirectory, help='the output MSBWT directory')\n p4.add_argument('inputBwtDirs', nargs='+', type=util.existingDirectory, help='input BWT directories to merge')\n \n p5 = sp.add_parser('query', help='search for a sequence in an MSBWT, prints sequence and seqID')\n p5.add_argument('inputBwtDir', type=util.existingDirectory, help='the BWT to query')\n p5.add_argument('kmer', type=util.validKmer, help='the input k-mer to search for')\n p5.add_argument('-d', '--dump-seqs', dest='dumpSeqs', action='store_true', help='print all sequences with the given kmer (default=False)', default=False)\n \n p6 = sp.add_parser('massquery', help='search for many sequences in an MSBWT')\n p6.add_argument('inputBwtDir', type=util.existingDirectory, help='the BWT to query')\n p6.add_argument('kmerFile', help='a file with one k-mer per line')\n p6.add_argument('outputFile', help='output file with counts per line')\n p6.add_argument('-r', '--rev-comp', dest='reverseComplement', action='store_true', help='also search for each kmer\\'s reverse complement', default=False)\n \n p8 = sp.add_parser('compress', help='compress a MSBWT from byte/base to RLE')\n p8.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')\n p8.add_argument('srcDir', type=util.existingDirectory, help='the source directory for the BWT to compress')\n p8.add_argument('dstDir', type=util.newDirectory, help='the destination directory')\n \n p9 = sp.add_parser('decompress', help='decompress a MSBWT from RLE to byte/base')\n p9.add_argument('-p', metavar='numProcesses', dest='numProcesses', type=int, default=1, help='number of processes to run (default: 1)')\n p9.add_argument('srcDir', type=util.existingDirectory, help='the source directory for the BWT to compress')\n p9.add_argument('dstDir', type=util.newDirectory, help='the destination directory')\n \n p10 = sp.add_parser('convert', help='convert from a raw text input to RLE')\n p10.add_argument('-i', metavar='inputTextFN', dest='inputTextFN', default=None, help='input text filename (default: stdin)')\n p10.add_argument('dstDir', type=util.newDirectory, help='the destination directory')\n \n args = p.parse_args()\n \n if args.subparserID == 'cffq':\n logger.info('Inputs:\\t'+str(args.inputFastqs))\n logger.info('Uniform:\\t'+str(args.areUniform))\n logger.info('Output:\\t'+args.outBwtDir)\n logger.info('Output Compressed:\\t'+str(args.buildCompressed))\n logger.info('Processes:\\t'+str(args.numProcesses))\n if args.numProcesses > 1:\n logger.warning('Using multi-processing with slow disk accesses can lead to slower build times.')\n print\n if args.areUniform:\n #if they are uniform, use the method developed by Bauer et al., it's likely short Illumina seq\n if args.buildCompressed:\n MultiStringBWT.createMSBWTCompFromFastq(args.inputFastqs, args.outBwtDir, args.numProcesses, args.areUniform, logger)\n else:\n MultiStringBWT.createMSBWTFromFastq(args.inputFastqs, args.outBwtDir, args.numProcesses, args.areUniform, logger)\n else:\n #if they aren't uniform, use the merge method by Holt et al., it's likely longer PacBio seq\n if args.buildCompressed:\n logger.error('No compressed builder for non-uniform datasets, compress after creation.')\n else:\n Multimerge.createMSBWTFromFastq(args.inputFastqs, args.outBwtDir, args.numProcesses, args.areUniform, logger)\n \n elif args.subparserID == 'pp':\n logger.info('Inputs:\\t'+str(args.inputFastqs))\n logger.info('Uniform:\\t'+str(args.areUniform))\n logger.info('Output:\\t'+args.outBwtDir)\n if args.areUniform:\n #preprocess for Bauer et al. method\n MultiStringBWT.preprocessFastqs(args.inputFastqs, args.outBwtDir, args.areUniform, logger)\n else:\n #preprocess for Holt et al. method\n numProcs = 1\n Multimerge.preprocessFastqs(args.inputFastqs, args.outBwtDir, numProcs, args.areUniform, logger)\n \n elif args.subparserID == 'cfpp':\n logger.info('BWT dir:\\t'+args.bwtDir)\n logger.info('Uniform:\\t'+str(args.areUniform))\n logger.info('Output Compressed:\\t'+str(args.buildCompressed))\n logger.info('Processes:\\t'+str(args.numProcesses))\n if args.numProcesses > 1:\n logger.warning('Using multi-processing with slow disk accesses can lead to slower build times.')\n print\n seqFN = args.bwtDir+'/seqs.npy'\n offsetFN = args.bwtDir+'/offsets.npy'\n bwtFN = args.bwtDir+'/msbwt.npy'\n \n if args.areUniform:\n #process it using the column-wise Bauer et al. method\n if args.buildCompressed:\n MSBWTCompGenCython.createMsbwtFromSeqs(args.bwtDir, args.numProcesses, logger)\n else:\n MSBWTGenCython.createMsbwtFromSeqs(args.bwtDir, args.numProcesses, logger)\n else:\n #process it using the Holt et al. merge method\n if args.buildCompressed:\n logger.error('No compressed builder for non-uniform datasets, compress after creation.')\n else:\n Multimerge.interleaveLevelMerge(args.bwtDir, args.numProcesses, args.areUniform, logger)\n \n elif args.subparserID == 'compress':\n logger.info('Source Directory:'+args.srcDir)\n logger.info('Dest Directory:'+args.dstDir)\n logger.info('Processes:'+str(args.numProcesses))\n if args.srcDir == args.dstDir:\n raise Exception('Source and destination directories cannot be the same directory.')\n print\n MSBWTGen.compressBWT(args.srcDir+'/msbwt.npy', args.dstDir+'/comp_msbwt.npy', args.numProcesses, logger)\n \n elif args.subparserID == 'decompress':\n logger.info('Source Directory: '+args.srcDir)\n logger.info('Dest Directory: '+args.dstDir)\n logger.info('Processes: '+str(args.numProcesses))\n print\n MSBWTGen.decompressBWT(args.srcDir, args.dstDir, args.numProcesses, logger)\n #TODO: remove if srcdir and dstdir are the same?\n \n elif args.subparserID == 'merge':\n logger.info('Inputs:\\t'+str(args.inputBwtDirs))\n logger.info('Output:\\t'+args.outBwtDir)\n logger.info('Processes:\\t'+str(args.numProcesses))\n if args.numProcesses > 1:\n logger.warning('Multi-processing is not supported at this time, but will be included in a future release.')\n numProcs = 1\n #logger.warning('Using multi-processing with slow disk accesses can lead to slower build times.')\n print\n #MSBWTGen.mergeNewMSBWT(args.outBwtDir, args.inputBwtDirs, args.numProcesses, logger)\n if len(args.inputBwtDirs) > 2:\n #this is a deprecated method, it may still work if you feel daring\n #MSBWTGenCython.mergeMsbwts(args.inputBwtDirs, args.outBwtDir, 1, logger)\n logger.error('Merging more than two MSBWTs at once is not currently supported.')\n else:\n GenericMerge.mergeTwoMSBWTs(args.inputBwtDirs[0], args.inputBwtDirs[1], args.outBwtDir, numProcs, logger)\n \n elif args.subparserID == 'query':\n #this is the easiest thing we can do, don't dump the standard info, just do it\n msbwt = MultiStringBWT.loadBWT(args.inputBwtDir, logger=logger)\n \n #always print how many are found, users can parse it out if they want\n r = msbwt.findIndicesOfStr(args.kmer)\n print r[1]-r[0]\n \n #dump the seqs if request\n if args.dumpSeqs:\n for x in xrange(r[0], r[1]):\n dInd = msbwt.getSequenceDollarID(x)\n print msbwt.recoverString(dInd)[1:]+','+str(dInd)\n \n elif args.subparserID == 'massquery':\n logger.info('Input:\\t'+str(args.inputBwtDir))\n logger.info('Queries:\\t'+str(args.kmerFile))\n logger.info('Output:\\t'+args.outputFile)\n logger.info('Rev-comp:\\t'+str(args.reverseComplement))\n print\n msbwt = MultiStringBWT.loadBWT(args.inputBwtDir, logger=logger)\n \n output = open(args.outputFile, 'w+')\n output.write('k-mer,counts')\n if args.reverseComplement:\n output.write(',revCompCounts\\n')\n else:\n output.write('\\n')\n \n logger.info('Beginning queries...')\n for line in open(args.kmerFile, 'r'):\n kmer = line.strip('\\n')\n c = msbwt.countOccurrencesOfSeq(kmer)\n if args.reverseComplement:\n rc = msbwt.countOccurrencesOfSeq(MultiStringBWT.reverseComplement(kmer))\n output.write(kmer+','+str(c)+','+str(rc)+'\\n')\n else:\n output.write(kmer+','+str(c)+'\\n')\n logger.info('Queries complete.')\n \n elif args.subparserID == 'convert':\n if args.inputTextFN == None:\n logger.info('Input: stdin')\n else:\n logger.info('Input: '+args.inputTextFN)\n logger.info('Output: '+args.dstDir)\n logger.info('Beginning conversion...')\n CompressToRLE.compressInput(args.inputTextFN, args.dstDir)\n logger.info('Finished conversion.')\n \n else:\n print args.subparserID+\" is currently not implemented, please wait for a future release.\"",
"def main():\n \"\"\"Parse args and run main daemon function\n\n :return: None\n \"\"\"\n try:\n args = parse_daemon_args(True)\n\n # Protect for windows multiprocessing that will RELAUNCH all\n while True:\n daemon = Arbiter(**args.__dict__)\n daemon.main()\n if not daemon.need_config_reload:\n break\n daemon = None\n except Exception as exp: # pylint: disable=broad-except\n sys.stderr.write(\"*** Daemon exited because: %s\" % str(exp))\n traceback.print_exc()\n exit(1)"
] |
[
0.7287781834602356,
0.7014651298522949,
0.6990735530853271,
0.695350706577301,
0.6953243613243103,
0.6948589086532593,
0.6935513615608215,
0.691478431224823,
0.6895577907562256,
0.6894117593765259,
0.6882059574127197,
0.6876498460769653
] |
Note:
Return type is probably ``-> List[AN]``, but can't get it to pass.
|
def build_body(cls: Type[AN], body: List[ast.stmt]) -> List:
"""
Note:
Return type is probably ``-> List[AN]``, but can't get it to pass.
"""
act_nodes = [] # type: List[ActNode]
for child_node in body:
act_nodes += ActNode.build(child_node)
return act_nodes
|
[
"def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if type(ids) == list:\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]",
"def ann_default_transformer(fills=ANN_DEFAULT_FILLS):\n \"\"\"\n Return a simple transformer function for parsing ANN annotations. N.B.,\n ignores all but the first effect.\n\n \"\"\"\n def _transformer(vals):\n if len(vals) == 0:\n return fills\n else:\n # ignore all but first effect\n ann = vals[0].split(b'|')\n ann = ann[:11] + _ann_split2(ann[11]) + _ann_split2(ann[12]) + \\\n _ann_split2(ann[13]) + ann[14:]\n result = tuple(\n fill if v == b''\n else int(v.partition(b'/')[0]) if i == 8\n else int(v) if 11 <= i < 18\n else v\n for i, (v, fill) in enumerate(list(zip(ann, fills))[:18])\n )\n return result\n return _transformer",
"def build(cls: Type[AN], node: ast.stmt) -> List[AN]:\n \"\"\"\n Starting at this ``node``, check if it's an act node. If it's a context\n manager, recurse into child nodes.\n\n Returns:\n List of all act nodes found.\n \"\"\"\n if node_is_result_assignment(node):\n return [cls(node, ActNodeType.result_assignment)]\n if node_is_pytest_raises(node):\n return [cls(node, ActNodeType.pytest_raises)]\n if node_is_unittest_raises(node):\n return [cls(node, ActNodeType.unittest_raises)]\n\n token = node.first_token # type: ignore\n # Check if line marked with '# act'\n if token.line.strip().endswith('# act'):\n return [cls(node, ActNodeType.marked_act)]\n\n # Recurse (downwards) if it's a context manager\n if isinstance(node, ast.With):\n return cls.build_body(node.body)\n\n return []",
"def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):\n \"\"\"Build a neural net with the indicated input, hidden, and outout dimensions\n\n Arguments:\n params (dict or PyBrainParams namedtuple):\n default: {'N_hidden': 6}\n (this is the only parameter that affects the NN build)\n\n Returns:\n FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers\n \"\"\"\n N_input = N_input or 1\n N_output = N_output or 1\n N_hidden = N_hidden or tuple()\n if isinstance(N_hidden, (int, float, basestring)):\n N_hidden = (int(N_hidden),)\n\n hidden_layer_type = hidden_layer_type or tuple()\n hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))\n\n if verbosity > 0:\n print(N_hidden, ' layers of type ', hidden_layer_type)\n\n assert(len(N_hidden) == len(hidden_layer_type))\n nn = pb.structure.FeedForwardNetwork()\n\n # layers\n nn.addInputModule(pb.structure.BiasUnit(name='bias'))\n nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))\n for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):\n Nhid = int(Nhid)\n nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))\n nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))\n\n # connections\n nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))\n nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))\n for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):\n Nhid = int(Nhid)\n nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],\n nn['hidden-{}'.format(i + 1)]))\n i = len(N_hidden) - 1\n nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))\n\n nn.sortModules()\n if FAST:\n try:\n nn.convertToFastNetwork()\n except:\n if verbosity > 0:\n print('Unable to convert slow PyBrain NN to a fast ARAC network...')\n if verbosity > 0:\n print(nn.connections)\n return nn",
"def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]",
"def _get_anns_to_remove(in_file):\n \"\"\"Find larger annotations, if present in VCF, that slow down processing.\n \"\"\"\n to_remove = [\"ANN\", \"LOF\"]\n to_remove_str = tuple([\"##INFO=<ID=%s\" % x for x in to_remove])\n cur_remove = []\n with utils.open_gzipsafe(in_file) as in_handle:\n for line in in_handle:\n if not line.startswith(\"#\"):\n break\n elif line.startswith(to_remove_str):\n cur_id = line.split(\"ID=\")[-1].split(\",\")[0]\n cur_remove.append(\"INFO/%s\" % cur_id)\n return \",\".join(cur_remove)",
"def annToMask(self, ann):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n :return: binary mask (numpy 2D array)\n \"\"\"\n rle = self.annToRLE(ann)\n m = maskUtils.decode(rle)\n return m",
"def resample_ann(resampled_t, ann_sample):\n \"\"\"\n Compute the new annotation indices\n\n Parameters\n ----------\n resampled_t : numpy array\n Array of signal locations as returned by scipy.signal.resample\n ann_sample : numpy array\n Array of annotation locations\n\n Returns\n -------\n resampled_ann_sample : numpy array\n Array of resampled annotation locations\n\n \"\"\"\n tmp = np.zeros(len(resampled_t), dtype='int16')\n j = 0\n tprec = resampled_t[j]\n for i, v in enumerate(ann_sample):\n while True:\n d = False\n if v < tprec:\n j -= 1\n tprec = resampled_t[j]\n\n if j+1 == len(resampled_t):\n tmp[j] += 1\n break\n\n tnow = resampled_t[j+1]\n if tprec <= v and v <= tnow:\n if v-tprec < tnow-v:\n tmp[j] += 1\n else:\n tmp[j+1] += 1\n d = True\n j += 1\n tprec = tnow\n if d:\n break\n\n idx = np.where(tmp>0)[0].astype('int64')\n res = []\n for i in idx:\n for j in range(tmp[i]):\n res.append(i)\n assert len(res) == len(ann_sample)\n\n return np.asarray(res, dtype='int64')",
"def theAn\r\n array = Array.new(11)\r\n\r\n array[0] = 0.598\r\n array[1] = 1.65\r\n array[2] = 3.1\r\n array[3] = 3.87\r\n array[4] = 2.33\r\n array[5] = 1.29\r\n array[6] = 0.462\r\n array[7] = 0.328\r\n array[8] = 0.17\r\n array[9] = 0.0865\r\n array[10] = 0.114\r\n\r\n array\r\n end",
"def convert_to_arn(arns, region=None, key=None, keyid=None, profile=None):\n '''\n Convert a list of strings into actual arns. Converts convenience names such\n as 'scaling_policy:...'\n\n CLI Example::\n\n salt '*' convert_to_arn 'scaling_policy:'\n '''\n results = []\n for arn in arns:\n if arn.startswith(\"scaling_policy:\"):\n _, as_group, scaling_policy_name = arn.split(\":\")\n policy_arn = __salt__[\"boto_asg.get_scaling_policy_arn\"](\n as_group, scaling_policy_name, region, key, keyid, profile\n )\n if policy_arn:\n results.append(policy_arn)\n else:\n log.error('Could not convert: %s', arn)\n else:\n results.append(arn)\n return results",
"def not_right(self, num):\n \"\"\"\n WITH SLICES BEING FLAT, WE NEED A SIMPLE WAY TO SLICE FROM THE LEFT [:-num:]\n \"\"\"\n if not self.list:\n self._build_list()\n\n if num == None:\n return self.list[:-1:]\n if num <= 0:\n return []\n\n return self.list[:-num:]",
"def agent_list(self, deep_sorted=False):\n \"\"\"Get the canonicallized agent list.\"\"\"\n ag_list = []\n for ag_name in self._agent_order:\n ag_attr = getattr(self, ag_name)\n if isinstance(ag_attr, Concept) or ag_attr is None:\n ag_list.append(ag_attr)\n elif isinstance(ag_attr, list):\n if not all([isinstance(ag, Concept) for ag in ag_attr]):\n raise TypeError(\"Expected all elements of list to be Agent \"\n \"and/or Concept, but got: %s\"\n % {type(ag) for ag in ag_attr})\n if deep_sorted:\n ag_attr = sorted_agents(ag_attr)\n ag_list.extend(ag_attr)\n else:\n raise TypeError(\"Expected type Agent, Concept, or list, got \"\n \"type %s.\" % type(ag_attr))\n return ag_list"
] |
[
0.6926379799842834,
0.683552086353302,
0.6813563108444214,
0.680248498916626,
0.6799278259277344,
0.6691052913665771,
0.6649107933044434,
0.660660445690155,
0.6598241925239563,
0.6566042304039001,
0.651421844959259,
0.6494168043136597
] |
Starting at this ``node``, check if it's an act node. If it's a context
manager, recurse into child nodes.
Returns:
List of all act nodes found.
|
def build(cls: Type[AN], node: ast.stmt) -> List[AN]:
"""
Starting at this ``node``, check if it's an act node. If it's a context
manager, recurse into child nodes.
Returns:
List of all act nodes found.
"""
if node_is_result_assignment(node):
return [cls(node, ActNodeType.result_assignment)]
if node_is_pytest_raises(node):
return [cls(node, ActNodeType.pytest_raises)]
if node_is_unittest_raises(node):
return [cls(node, ActNodeType.unittest_raises)]
token = node.first_token # type: ignore
# Check if line marked with '# act'
if token.line.strip().endswith('# act'):
return [cls(node, ActNodeType.marked_act)]
# Recurse (downwards) if it's a context manager
if isinstance(node, ast.With):
return cls.build_body(node.body)
return []
|
[
"def load_act_node(self) -> ActNode:\n \"\"\"\n Raises:\n ValidationError: AAA01 when no act block is found and AAA02 when\n multiple act blocks are found.\n \"\"\"\n act_nodes = ActNode.build_body(self.node.body)\n\n if not act_nodes:\n raise ValidationError(self.first_line_no, self.node.col_offset, 'AAA01 no Act block found in test')\n\n # Allow `pytest.raises` and `self.assertRaises()` in assert nodes - if\n # any of the additional nodes are `pytest.raises`, then raise\n for a_n in act_nodes[1:]:\n if a_n.block_type in [ActNodeType.marked_act, ActNodeType.result_assignment]:\n raise ValidationError(\n self.first_line_no,\n self.node.col_offset,\n 'AAA02 multiple Act blocks found in test',\n )\n\n return act_nodes[0]",
"def get_actions(self, parent_environ=None):\n \"\"\"Get the list of rex.Action objects resulting from interpreting this\n context. This is provided mainly for testing purposes.\n\n Args:\n parent_environ Environment to interpret the context within,\n defaults to os.environ if None.\n\n Returns:\n A list of rex.Action subclass instances.\n \"\"\"\n interp = Python(target_environ={}, passive=True)\n executor = self._create_executor(interp, parent_environ)\n self._execute(executor)\n return executor.actions",
"def build_body(cls: Type[AN], body: List[ast.stmt]) -> List:\n \"\"\"\n Note:\n Return type is probably ``-> List[AN]``, but can't get it to pass.\n \"\"\"\n act_nodes = [] # type: List[ActNode]\n for child_node in body:\n act_nodes += ActNode.build(child_node)\n return act_nodes",
"def _get_children(self, node):\n \"\"\"Return not `ast.expr_context` children of `node`\"\"\"\n children = ast.get_children(node)\n return [child for child in children\n if not isinstance(child, ast.expr_context)]",
"def _construct_node_from_actions(self,\n current_node: Tree,\n remaining_actions: List[List[str]]) -> List[List[str]]:\n \"\"\"\n Given a current node in the logical form tree, and a list of actions in an action sequence,\n this method fills in the children of the current node from the action sequence, then\n returns whatever actions are left.\n\n For example, we could get a node with type ``c``, and an action sequence that begins with\n ``c -> [<r,c>, r]``. This method will add two children to the input node, consuming\n actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,\n recursively) and ``r`` (and all of its children, recursively). This method assumes that\n action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``\n appear before actions for the subtree under ``r``. If there are any actions in the action\n sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be\n returned.\n \"\"\"\n if not remaining_actions:\n logger.error(\"No actions left to construct current node: %s\", current_node)\n raise ParsingError(\"Incomplete action sequence\")\n left_side, right_side = remaining_actions.pop(0)\n if left_side != current_node.label():\n logger.error(\"Current node: %s\", current_node)\n logger.error(\"Next action: %s -> %s\", left_side, right_side)\n logger.error(\"Remaining actions were: %s\", remaining_actions)\n raise ParsingError(\"Current node does not match next action\")\n if right_side[0] == '[':\n # This is a non-terminal expansion, with more than one child node.\n for child_type in right_side[1:-1].split(', '):\n child_node = Tree(child_type, [])\n current_node.append(child_node) # you add a child to an nltk.Tree with `append`\n # For now, we assume that all children in a list like this are non-terminals, so we\n # recurse on them. I'm pretty sure that will always be true for the way our\n # grammar induction works. We can revisit this later if we need to.\n remaining_actions = self._construct_node_from_actions(child_node, remaining_actions)\n else:\n # The current node is a pre-terminal; we'll add a single terminal child. By\n # construction, the right-hand side of our production rules are only ever terminal\n # productions or lists of non-terminals.\n current_node.append(Tree(right_side, [])) # you add a child to an nltk.Tree with `append`\n return remaining_actions",
"def path(self):\n '''Path (list of nodes and actions) from root to this node.'''\n node = self\n path = []\n while node:\n path.append((node.action, node.state))\n node = node.parent\n return list(reversed(path))",
"private static List<ActionRef> getRefs(Xml node)\r\n {\r\n final Collection<Xml> children = node.getChildren(NODE_ACTION);\r\n final List<ActionRef> actions = new ArrayList<>(children.size());\r\n\r\n for (final Xml action : children)\r\n {\r\n final String path = action.readString(ATT_PATH);\r\n final boolean cancel = action.readBoolean(false, ATT_CANCEL);\r\n actions.add(new ActionRef(path, cancel, getRefs(action)));\r\n }\r\n\r\n return actions;\r\n }",
"def get_children(self):\n \"\"\"Get the child nodes below this node.\n\n :returns: The children.\n :rtype: iterable(NodeNG)\n \"\"\"\n for field in self._astroid_fields:\n attr = getattr(self, field)\n if attr is None:\n continue\n if isinstance(attr, (list, tuple)):\n yield from attr\n else:\n yield attr",
"def select_actions(root, action_space, max_episode_steps):\n \"\"\"\n Select actions from the tree\n\n Normally we select the greedy action that has the highest reward\n associated with that subtree. We have a small chance to select a\n random action based on the exploration param and visit count of the\n current node at each step.\n\n We select actions for the longest possible episode, but normally these\n will not all be used. They will instead be truncated to the length\n of the actual episode and then used to update the tree.\n \"\"\"\n node = root\n\n acts = []\n steps = 0\n while steps < max_episode_steps:\n if node is None:\n # we've fallen off the explored area of the tree, just select random actions\n act = action_space.sample()\n else:\n epsilon = EXPLORATION_PARAM / np.log(node.visits + 2)\n if random.random() < epsilon:\n # random action\n act = action_space.sample()\n else:\n # greedy action\n act_value = {}\n for act in range(action_space.n):\n if node is not None and act in node.children:\n act_value[act] = node.children[act].value\n else:\n act_value[act] = -np.inf\n best_value = max(act_value.values())\n best_acts = [\n act for act, value in act_value.items() if value == best_value\n ]\n act = random.choice(best_acts)\n\n if act in node.children:\n node = node.children[act]\n else:\n node = None\n\n acts.append(act)\n steps += 1\n\n return acts",
"def _construct_node_from_actions(self,\n current_node: Tree,\n remaining_actions: List[List[str]],\n add_var_function: bool) -> List[List[str]]:\n \"\"\"\n Given a current node in the logical form tree, and a list of actions in an action sequence,\n this method fills in the children of the current node from the action sequence, then\n returns whatever actions are left.\n\n For example, we could get a node with type ``c``, and an action sequence that begins with\n ``c -> [<r,c>, r]``. This method will add two children to the input node, consuming\n actions from the action sequence for nodes of type ``<r,c>`` (and all of its children,\n recursively) and ``r`` (and all of its children, recursively). This method assumes that\n action sequences are produced `depth-first`, so all actions for the subtree under ``<r,c>``\n appear before actions for the subtree under ``r``. If there are any actions in the action\n sequence after the ``<r,c>`` and ``r`` subtrees have terminated in leaf nodes, they will be\n returned.\n \"\"\"\n if not remaining_actions:\n logger.error(\"No actions left to construct current node: %s\", current_node)\n raise ParsingError(\"Incomplete action sequence\")\n left_side, right_side = remaining_actions.pop(0)\n if left_side != current_node.label():\n mismatch = True\n multi_match_mapping = {str(key): [str(value) for value in values] for key,\n values in self.get_multi_match_mapping().items()}\n current_label = current_node.label()\n if current_label in multi_match_mapping and left_side in multi_match_mapping[current_label]:\n mismatch = False\n if mismatch:\n logger.error(\"Current node: %s\", current_node)\n logger.error(\"Next action: %s -> %s\", left_side, right_side)\n logger.error(\"Remaining actions were: %s\", remaining_actions)\n raise ParsingError(\"Current node does not match next action\")\n if right_side[0] == '[':\n # This is a non-terminal expansion, with more than one child node.\n for child_type in right_side[1:-1].split(', '):\n if child_type.startswith(\"'lambda\"):\n # We need to special-case the handling of lambda here, because it's handled a\n # bit weirdly in the action sequence. This is stripping off the single quotes\n # around something like `'lambda x'`.\n child_type = child_type[1:-1]\n child_node = Tree(child_type, [])\n current_node.append(child_node) # you add a child to an nltk.Tree with `append`\n if not self.is_terminal(child_type):\n remaining_actions = self._construct_node_from_actions(child_node,\n remaining_actions,\n add_var_function)\n elif self.is_terminal(right_side):\n # The current node is a pre-terminal; we'll add a single terminal child. We need to\n # check first for whether we need to add a (var _) around the terminal node, though.\n if add_var_function and right_side in self._lambda_variables:\n right_side = f\"(var {right_side})\"\n if add_var_function and right_side == 'var':\n raise ParsingError('add_var_function was true, but action sequence already had var')\n current_node.append(Tree(right_side, [])) # you add a child to an nltk.Tree with `append`\n else:\n # The only way this can happen is if you have a unary non-terminal production rule.\n # That is almost certainly not what you want with this kind of grammar, so we'll crash.\n # If you really do want this, open a PR with a valid use case.\n raise ParsingError(f\"Found a unary production rule: {left_side} -> {right_side}. \"\n \"Are you sure you want a unary production rule in your grammar?\")\n return remaining_actions",
"def get_valid_actions(self) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]:\n \"\"\"\n Returns the valid actions in the current grammar state. See the class docstring for a\n description of what we're returning here.\n \"\"\"\n actions = self._valid_actions[self._nonterminal_stack[-1]]\n context_actions = []\n for type_, variable in self._lambda_stacks:\n if self._nonterminal_stack[-1] == type_:\n production_string = f\"{type_} -> {variable}\"\n context_actions.append(self._context_actions[production_string])\n if context_actions:\n input_tensor, output_tensor, action_ids = actions['global']\n new_inputs = [input_tensor] + [x[0] for x in context_actions]\n input_tensor = torch.cat(new_inputs, dim=0)\n new_outputs = [output_tensor] + [x[1] for x in context_actions]\n output_tensor = torch.cat(new_outputs, dim=0)\n new_action_ids = action_ids + [x[2] for x in context_actions]\n # We can't just reassign to actions['global'], because that would modify the state of\n # self._valid_actions. Instead, we need to construct a new actions dictionary.\n new_actions = {**actions}\n new_actions['global'] = (input_tensor, output_tensor, new_action_ids)\n actions = new_actions\n return actions",
"def build_act(cls: Type[_Block], node: ast.stmt, test_func_node: ast.FunctionDef) -> _Block:\n \"\"\"\n Act block is a single node - either the act node itself, or the node\n that wraps the act node.\n \"\"\"\n add_node_parents(test_func_node)\n # Walk up the parent nodes of the parent node to find test's definition.\n act_block_node = node\n while act_block_node.parent != test_func_node: # type: ignore\n act_block_node = act_block_node.parent # type: ignore\n return cls([act_block_node], LineType.act)"
] |
[
0.7258774638175964,
0.7241926193237305,
0.7147367000579834,
0.6918389201164246,
0.691047191619873,
0.6856587529182434,
0.6754450798034668,
0.670809805393219,
0.6692724227905273,
0.6687043309211731,
0.6680272221565247,
0.6679202318191528
] |
Checks if organization properly created.
Note: New organization must have 'default' environment and two default services
running there. Cannot use DEFAULT_ENV_NAME, because zone could be added there.
:rtype: bool
|
def ready(self):
"""
Checks if organization properly created.
Note: New organization must have 'default' environment and two default services
running there. Cannot use DEFAULT_ENV_NAME, because zone could be added there.
:rtype: bool
"""
@retry(tries=3, retry_exception=exceptions.NotFoundError) # org init, takes some times
def check_init():
env = self.environments['default']
return env.services['Default workflow service'].running(timeout=1) and \
env.services['Default credentials service'].running(timeout=1)
return check_init()
|
[
"def _create_in_progress(self):\n \"\"\"\n Creating this service is handled asynchronously so this method will\n simply check if the create is in progress. If it is not in progress,\n we could probably infer it either failed or succeeded.\n \"\"\"\n instance = self.service.service.get_instance(self.service.name)\n if (instance['last_operation']['state'] == 'in progress' and\n instance['last_operation']['type'] == 'create'):\n return True\n\n return False",
"def create_with_organization(global_options, options)\n create(global_options, options)\n\n if Organization.new.create(global_options, name: options[:username],\n key: options[:username])\n save_message('Organization': ['created!'])\n true\n else\n save_message('Organization': ['not created!'])\n false\n end\n end",
"def launched():\n \"\"\"Test whether the current python environment is the correct lore env.\n\n :return: :any:`True` if the environment is launched\n :rtype: bool\n \"\"\"\n if not PREFIX:\n return False\n\n return os.path.realpath(sys.prefix) == os.path.realpath(PREFIX)",
"def prepare_env(org):\n \"\"\" Example shows how to configure environment from scratch \"\"\"\n\n # Add services\n key_service = org.service(type='builtin:cobalt_secure_store', name='Keystore')\n wf_service = org.service(type='builtin:workflow_service', name='Workflow', parameters='{}')\n\n # Add services to environment\n env = org.environment(name='default')\n env.clean()\n env.add_service(key_service)\n env.add_service(wf_service)\n env.add_policy(\n {\"action\": \"provisionVms\",\n \"parameter\": \"publicKeyId\",\n \"value\": key_service.regenerate()['id']})\n\n # Add cloud provider account\n access = {\n \"provider\": \"aws-ec2\",\n \"usedEnvironments\": [],\n \"ec2SecurityGroup\": \"default\",\n \"providerCopy\": \"aws-ec2\",\n \"name\": \"test-provider\",\n \"jcloudsIdentity\": KEY,\n \"jcloudsCredential\": SECRET_KEY,\n \"jcloudsRegions\": \"us-east-1\"\n }\n prov = org.provider(access)\n env.add_provider(prov)\n return org.organizationId",
"def create_environment(self, name, default=False, zone=None):\n \"\"\" Creates environment and returns Environment object.\n \"\"\"\n from qubell.api.private.environment import Environment\n return Environment.new(organization=self, name=name, zone_id=zone, default=default, router=self._router)",
"def init_common_services(self, with_cloud_account=True, zone_name=None):\n \"\"\"\n Initialize common service,\n When 'zone_name' is defined \" at $zone_name\" is added to service names\n :param bool with_cloud_account:\n :param str zone_name:\n :return: OR tuple(Workflow, Vault), OR tuple(Workflow, Vault, CloudAccount) with services\n \"\"\"\n zone_names = ZoneConstants(zone_name)\n type_to_app = lambda t: self.organization.applications[system_application_types.get(t, t)]\n wf_service = self.organization.service(name=zone_names.DEFAULT_WORKFLOW_SERVICE,\n application=type_to_app(WORKFLOW_SERVICE_TYPE),\n environment=self)\n key_service = self.organization.service(name=zone_names.DEFAULT_CREDENTIAL_SERVICE,\n application=type_to_app(COBALT_SECURE_STORE_TYPE),\n environment=self)\n assert wf_service.running()\n assert key_service.running()\n if not with_cloud_account:\n with self as env:\n env.add_service(wf_service, force=True)\n env.add_service(key_service, force=True)\n return wf_service, key_service\n\n cloud_account_service = self.organization.instance(name=zone_names.DEFAULT_CLOUD_ACCOUNT_SERVICE,\n application=type_to_app(CLOUD_ACCOUNT_TYPE),\n environment=self,\n parameters=PROVIDER_CONFIG,\n destroyInterval=0)\n # Imidiate adding to env cause CA not to drop destroy interval. Known issue 6132. So, add service as instance with\n # destroyInterval set to 'never'\n assert cloud_account_service.running()\n\n with self as env:\n env.add_service(wf_service, force=True)\n env.add_service(key_service, force=True)\n env.add_service(cloud_account_service, force=True)\n return wf_service, key_service, cloud_account_service",
"def create_missing(self):\n \"\"\"Automatically populate additional instance attributes.\n\n When a new lifecycle environment is created, it must either:\n\n * Reference a parent lifecycle environment in the tree of lifecycle\n environments via the ``prior`` field, or\n * have a name of \"Library\".\n\n Within a given organization, there can only be a single lifecycle\n environment with a name of 'Library'. This lifecycle environment is at\n the root of a tree of lifecycle environments, so its ``prior`` field is\n blank.\n\n This method finds the 'Library' lifecycle environment within the\n current organization and points to it via the ``prior`` field. This is\n not done if the current lifecycle environment has a name of 'Library'.\n\n \"\"\"\n # We call `super` first b/c it populates `self.organization`, and we\n # need that field to perform a search a little later.\n super(LifecycleEnvironment, self).create_missing()\n if (self.name != 'Library' and # pylint:disable=no-member\n not hasattr(self, 'prior')):\n results = self.search({'organization'}, {u'name': u'Library'})\n if len(results) != 1:\n raise APIResponseError(\n u'Could not find the \"Library\" lifecycle environment for '\n u'organization {0}. Search results: {1}'\n .format(self.organization, results) # pylint:disable=E1101\n )\n self.prior = results[0]",
"def default(self):\n \"\"\"\n Returns environment marked as default.\n When Zone is set marked default makes no sense, special env with proper Zone is returned.\n \"\"\"\n if ZONE_NAME:\n log.info(\"Getting or creating default environment for zone with name '{0}'\".format(DEFAULT_ENV_NAME()))\n zone_id = self.organization.zones[ZONE_NAME].id\n return self.organization.get_or_create_environment(name=DEFAULT_ENV_NAME(), zone=zone_id)\n\n def_envs = [env_j[\"id\"] for env_j in self.json() if env_j[\"isDefault\"]]\n\n if len(def_envs) > 1:\n log.warning('Found more than one default environment. Picking last.')\n return self[def_envs[-1]]\n elif len(def_envs) == 1:\n return self[def_envs[0]]\n raise exceptions.NotFoundError('Unable to get default environment')",
"def check( state_engine, nameop, block_id, checked_ops ):\n \"\"\"\n Verify the validity of a NAMESPACE_READY operation.\n It is only valid if it has been imported by the same sender as\n the corresponding NAMESPACE_REVEAL, and the namespace is still\n in the process of being imported.\n \"\"\"\n\n namespace_id = nameop['namespace_id']\n sender = nameop['sender']\n\n # must have been revealed\n if not state_engine.is_namespace_revealed( namespace_id ):\n log.warning(\"Namespace '%s' is not revealed\" % namespace_id )\n return False\n\n # must have been sent by the same person who revealed it\n revealed_namespace = state_engine.get_namespace_reveal( namespace_id )\n if revealed_namespace['recipient'] != sender:\n log.warning(\"Namespace '%s' is not owned by '%s' (but by %s)\" % (namespace_id, sender, revealed_namespace['recipient']))\n return False\n\n # can't be ready yet\n if state_engine.is_namespace_ready( namespace_id ):\n # namespace already exists\n log.warning(\"Namespace '%s' is already registered\" % namespace_id )\n return False\n\n # preserve from revealed \n nameop['sender_pubkey'] = revealed_namespace['sender_pubkey']\n nameop['address'] = revealed_namespace['address']\n\n # can commit imported nameops\n return True",
"def create_organization(self, name):\n \"\"\"\n Creates new organization\n :rtype: Organization\n \"\"\"\n org = Organization.new(name, self._router)\n assert org.ready(), \"Organization {} hasn't got ready after creation\".format(name)\n return org",
"def _check_zone(self, name, create=False):\n '''Checks whether a zone specified in a source exist in Azure server.\n\n Note that Azure zones omit end '.' eg: contoso.com vs contoso.com.\n Returns the name if it exists.\n\n :param name: Name of a zone to checks\n :type name: str\n :param create: If True, creates the zone of that name.\n :type create: bool\n\n :type return: str or None\n '''\n self.log.debug('_check_zone: name=%s', name)\n try:\n if name in self._azure_zones:\n return name\n self._dns_client.zones.get(self._resource_group, name)\n self._azure_zones.add(name)\n return name\n except CloudError as err:\n msg = 'The Resource \\'Microsoft.Network/dnszones/{}\\''.format(name)\n msg += ' under resource group \\'{}\\''.format(self._resource_group)\n msg += ' was not found.'\n if msg == err.message:\n # Then the only error is that the zone doesn't currently exist\n if create:\n self.log.debug('_check_zone:no matching zone; creating %s',\n name)\n create_zone = self._dns_client.zones.create_or_update\n create_zone(self._resource_group, name,\n Zone(location='global'))\n return name\n else:\n return\n raise",
"def __verify_ready(self, creating=False):\n '''Some cleanup, ensures that everything is set up properly to avoid\n random errors during execution\n\n Args:\n creating (bool): True if currently creating employer bees, False\n for checking all other operations\n '''\n\n if len(self._value_ranges) == 0:\n self._logger.log(\n 'crit',\n 'Attribute value_ranges must have at least one value'\n )\n raise RuntimeWarning(\n 'Attribute value_ranges must have at least one value'\n )\n if len(self._employers) == 0 and creating is False:\n self._logger.log('crit', 'Need to create employers')\n raise RuntimeWarning('Need to create employers')"
] |
[
0.6964225172996521,
0.6787546873092651,
0.6773152947425842,
0.6708482503890991,
0.6707577705383301,
0.6669090986251831,
0.6666747331619263,
0.6664408445358276,
0.6601459980010986,
0.6560268998146057,
0.6540591716766357,
0.6535741090774536
] |
Creates application and returns Application object.
|
def create_application(self, name=None, manifest=None):
""" Creates application and returns Application object.
"""
if not manifest:
raise exceptions.NotEnoughParams('Manifest not set')
if not name:
name = 'auto-generated-name'
from qubell.api.private.application import Application
return Application.new(self, name, manifest, self._router)
|
[
"def create_application(self):\n \"\"\"Create and return a new instance of tinman.application.Application\"\"\"\n return application.Application(self.settings,\n self.namespace.routes,\n self.port)",
"def _create_app(self):\n \"\"\"\n Create `Application` instance for this .\n \"\"\"\n pymux = self.pymux\n\n def on_focus_changed():\n \"\"\" When the focus changes to a read/write buffer, make sure to go\n to insert mode. This happens when the ViState was set to NAVIGATION\n in the copy buffer. \"\"\"\n vi_state = app.vi_state\n\n if app.current_buffer.read_only():\n vi_state.input_mode = InputMode.NAVIGATION\n else:\n vi_state.input_mode = InputMode.INSERT\n\n app = Application(\n output=self.output,\n input=self.input,\n color_depth=self.color_depth,\n\n layout=Layout(container=self.layout_manager.layout),\n key_bindings=pymux.key_bindings_manager.key_bindings,\n mouse_support=Condition(lambda: pymux.enable_mouse_support),\n full_screen=True,\n style=self.pymux.style,\n style_transformation=ConditionalStyleTransformation(\n SwapLightAndDarkStyleTransformation(),\n Condition(lambda: self.pymux.swap_dark_and_light),\n ),\n on_invalidate=(lambda _: pymux.invalidate()))\n\n # Synchronize the Vi state with the CLI object.\n # (This is stored in the current class, but expected to be in the\n # CommandLineInterface.)\n def sync_vi_state(_):\n VI = EditingMode.VI\n EMACS = EditingMode.EMACS\n\n if self.confirm_text or self.prompt_command or self.command_mode:\n app.editing_mode = VI if pymux.status_keys_vi_mode else EMACS\n else:\n app.editing_mode = VI if pymux.mode_keys_vi_mode else EMACS\n\n app.key_processor.before_key_press += sync_vi_state\n app.key_processor.after_key_press += sync_vi_state\n app.key_processor.after_key_press += self.sync_focus\n\n # Set render postpone time. (.1 instead of 0).\n # This small change ensures that if for a split second a process\n # outputs a lot of information, we don't give the highest priority to\n # rendering output. (Nobody reads that fast in real-time.)\n app.max_render_postpone_time = .1 # Second.\n\n # Hide message when a key has been pressed.\n def key_pressed(_):\n self.message = None\n app.key_processor.before_key_press += key_pressed\n\n # The following code needs to run with the application active.\n # Especially, `create_window` needs to know what the current\n # application is, in order to focus the new pane.\n with set_app(app):\n # Redraw all CLIs. (Adding a new client could mean that the others\n # change size, so everything has to be redrawn.)\n pymux.invalidate()\n\n pymux.startup()\n\n return app",
"def create(self, **kwargs):\n \"\"\"Create a new Application.\n\n Args:\n **kwargs: Arbitrary keyword arguments, including:\n name (str): A name for the new Application.\n\n Returns:\n A round.Application object if successful.\n \"\"\"\n resource = self.resource.create(kwargs)\n if 'admin_token' in kwargs:\n resource.context.authorize('Gem-Application',\n api_token=resource.api_token,\n admin_token=kwargs['admin_token'])\n app = self.wrap(resource)\n return self.add(app)",
"def create_app(**config):\n \"\"\"Application Factory\n\n You can create a new He-Man application with::\n\n from heman.config import create_app\n\n app = create_app() # app can be uses as WSGI application\n app.run() # Or you can run as a simple web server\n \"\"\"\n app = Flask(\n __name__, static_folder=None\n )\n\n if 'MONGO_URI' in os.environ:\n app.config['MONGO_URI'] = os.environ['MONGO_URI']\n\n app.config['LOG_LEVEL'] = 'DEBUG'\n app.config['SECRET_KEY'] = '2205552d13b5431bb537732bbb051f1214414f5ab34d47'\n\n configure_logging(app)\n configure_sentry(app)\n configure_api(app)\n configure_mongodb(app)\n configure_login(app)\n\n return app",
"def create_app(name, config=None, flask_params=None):\n \"\"\"\n Create app\n Generalized way of creating a flask app. Use it in your concrete apps and\n do further configuration there: add app-specific options, extensions,\n listeners and other features.\n\n Note: application name should be its fully qualified __name__, something\n like project.api.app. This is how we fetch routing settings.\n \"\"\"\n from boiler.config import DefaultConfig\n if config is None:\n config = DefaultConfig()\n\n # get flask parameters\n options = dict(import_name=name)\n if flask_params is not None:\n options.update(flask_params)\n if config.get('FLASK_STATIC_URL') is not None:\n options['static_url_path'] = config.get('FLASK_STATIC_URL')\n if config.get('FLASK_STATIC_PATH') is not None:\n options['static_folder'] = config.get('FLASK_STATIC_PATH')\n\n # create an app\n app = Flask(**options)\n\n # configure app\n if config.__class__ is type:\n raise Exception('Config must be an object, got class instead.')\n\n app.config.from_object(DefaultConfig())\n app.config.from_object(config)\n\n # register error handler\n register_error_handler(app)\n\n # use kernel templates\n kernel_templates_path = path.realpath(path.dirname(__file__)+'/templates')\n fallback_loader = FileSystemLoader([kernel_templates_path])\n custom_loader = ChoiceLoader([app.jinja_loader, fallback_loader])\n app.jinja_loader = custom_loader\n\n # time restarts?\n if app.config.get('TIME_RESTARTS'):\n restart_timer.time_restarts(os.path.join(os.getcwd(), 'var', 'data'))\n\n # detect browsersync proxy\n @app.before_request\n def detect_browsersync():\n g.dev_proxy = False\n proxy_header = app.config.get('DEV_PROXY_HEADER')\n if proxy_header:\n g.dev_proxy = bool(request.headers.get(proxy_header))\n\n return app",
"def application(self, id=None, manifest=None, name=None):\n \"\"\" Smart method. Creates, picks or modifies application.\n If application found by name or id and manifest not changed: return app.\n If app found by id, but other parameters differs: change them.\n If no application found, create.\n \"\"\"\n\n modify = False\n found = False\n\n # Try to find application by name or id\n if name and id:\n found = self.get_application(id=id)\n if not found.name == name:\n modify = True\n elif id:\n found = self.get_application(id=id)\n name = found.name\n elif name:\n try:\n found = self.get_application(name=name)\n except exceptions.NotFoundError:\n pass\n\n # If found - compare parameters\n if found:\n if manifest and not manifest == found.manifest:\n modify = True\n\n # We need to update application\n if found and modify:\n found.update(name=name, manifest=manifest)\n if not found:\n created = self.create_application(name=name, manifest=manifest)\n\n return found or created",
"def _create_application(self):\n \"\"\"\n Create an `Application` instance.\n \"\"\"\n return Application(\n input=self.input,\n output=self.output,\n layout=self.ptpython_layout.layout,\n key_bindings=merge_key_bindings([\n load_python_bindings(self),\n load_auto_suggest_bindings(),\n load_sidebar_bindings(self),\n load_confirm_exit_bindings(self),\n ConditionalKeyBindings(\n load_open_in_editor_bindings(),\n Condition(lambda: self.enable_open_in_editor)),\n # Extra key bindings should not be active when the sidebar is visible.\n ConditionalKeyBindings(\n self.extra_key_bindings,\n Condition(lambda: not self.show_sidebar))\n ]),\n color_depth=lambda: self.color_depth,\n paste_mode=Condition(lambda: self.paste_mode),\n mouse_support=Condition(lambda: self.enable_mouse_support),\n style=DynamicStyle(lambda: self._current_style),\n style_transformation=self.style_transformation,\n include_default_pygments_style=False,\n reverse_vi_search_direction=True)",
"def new(self, name=None, stack='cedar', region=None):\n \"\"\"Creates a new app.\"\"\"\n\n payload = {}\n\n if name:\n payload['app[name]'] = name\n\n if stack:\n payload['app[stack]'] = stack\n\n if region:\n payload['app[region]'] = region\n\n r = self._h._http_resource(\n method='POST',\n resource=('apps',),\n data=payload\n )\n\n name = json.loads(r.content).get('name')\n return self._h.apps.get(name)",
"def get_app(self):\n '''Eliminate the builder by producing a new Bottle application.\n\n This should be the final call in your method chain. It uses all\n of the built up options to create a new Bottle application.\n\n :rtype: :class:`bottle.Bottle`\n '''\n if self.config is None:\n # If the user never sets a config instance, then just create\n # a default.\n self.config = Config()\n if self.mount_prefix is None:\n self.mount_prefix = self.config.config.get('url_prefix')\n\n self.inject('config', lambda: self.config)\n self.inject('kvlclient', lambda: self.config.kvlclient)\n self.inject('store', lambda: self.config.store)\n self.inject('label_store', lambda: self.config.label_store)\n self.inject('tags', lambda: self.config.tags)\n self.inject('search_engines', lambda: self.search_engines)\n self.inject('filters', lambda: self.filters)\n self.inject('request', lambda: bottle.request)\n self.inject('response', lambda: bottle.response)\n\n # DEPRECATED. Remove. ---AG\n self.inject('visid_to_dbid', lambda: self.visid_to_dbid)\n self.inject('dbid_to_visid', lambda: self.dbid_to_visid)\n\n # Also DEPRECATED.\n self.inject('label_hooks', lambda: [])\n\n # Load routes defined in entry points.\n for extroute in self.config.config.get('external_routes', []):\n mod, fun_name = extroute.split(':')\n logger.info('Loading external route: %s', extroute)\n fun = getattr(__import__(mod, fromlist=[fun_name]), fun_name)\n self.add_routes(fun())\n\n # This adds the `json=True` feature on routes, which always coerces\n # the output to JSON. Bottle, by default, only permits dictionaries\n # to be JSON, which is the correct behavior. (Because returning JSON\n # arrays is a hazard.)\n #\n # So we should fix the routes and then remove this. ---AG\n self.app.install(JsonPlugin())\n\n # Throw away the app and return it. Because this is elimination!\n app = self.app\n self.app = None\n if self.mount_prefix is not None:\n root = bottle.Bottle()\n root.mount(self.mount_prefix, app)\n return root\n else:\n return app",
"def get_or_create_application(self, id=None, manifest=None, name=None):\n \"\"\" Get application by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_application(id=id)\n elif name:\n try:\n app = self.get_application(name=name)\n except exceptions.NotFoundError:\n app = self.create_application(name=name, manifest=manifest)\n return app\n raise exceptions.NotEnoughParams('Not enough parameters')",
"def create_app():\n \"\"\"Flask application factory function.\"\"\"\n app = Flask(__name__)\n app.config_from_envvar = app.config.from_envvar\n app.config_from_object = app.config.from_object\n configure_app(app)\n init_core(app)\n register_blueprints(app)\n return app",
"def make_app(global_conf, full_stack=True, static_files=True, **app_conf):\n \"\"\"Create a Pylons WSGI application and return it\n\n ``global_conf``\n The inherited configuration for this application. Normally from\n the [DEFAULT] section of the Paste ini file.\n\n ``full_stack``\n Whether this application provides a full WSGI stack (by default,\n meaning it handles its own exceptions and errors). Disable\n full_stack when this application is \"managed\" by another WSGI\n middleware.\n\n ``static_files``\n Whether this application serves its own static files; disable\n when another web server is responsible for serving them.\n\n ``app_conf``\n The application's local configuration. Normally specified in\n the [app:<name>] section of the Paste ini file (where <name>\n defaults to main).\n\n \"\"\"\n # Configure the Pylons environment\n config = load_environment(global_conf, app_conf)\n\n # The Pylons WSGI app\n app = PylonsApp(config=config)\n\n # Routing/Session Middleware\n app = RoutesMiddleware(app, config['routes.map'], singleton=False)\n app = SessionMiddleware(app, config)\n\n # CUSTOM MIDDLEWARE HERE (filtered by error handling middlewares)\n\n if asbool(full_stack):\n # Handle Python exceptions\n app = ErrorHandler(app, global_conf, **config['pylons.errorware'])\n\n # Display error documents for 401, 403, 404 status codes (and\n # 500 when debug is disabled)\n if asbool(config['debug']):\n app = StatusCodeRedirect(app)\n else:\n app = StatusCodeRedirect(app, [400, 401, 403, 404, 500])\n\n # Establish the Registry for this application\n app = RegistryManager(app)\n\n if asbool(static_files):\n # Serve static files\n static_app = StaticURLParser(config['pylons.paths']['static_files'])\n app = Cascade([static_app, app])\n app.config = config\n return app"
] |
[
0.8407135009765625,
0.7970953583717346,
0.7849531769752502,
0.7834771275520325,
0.7799986004829407,
0.7793078422546387,
0.7776985764503479,
0.773426353931427,
0.7722349166870117,
0.7700160145759583,
0.7698983550071716,
0.7687246799468994
] |
Get application object by name or id.
|
def get_application(self, id=None, name=None):
""" Get application object by name or id.
"""
log.info("Picking application: %s (%s)" % (name, id))
return self.applications[id or name]
|
[
"def get_or_create_application(self, id=None, manifest=None, name=None):\n \"\"\" Get application by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_application(id=id)\n elif name:\n try:\n app = self.get_application(name=name)\n except exceptions.NotFoundError:\n app = self.create_application(name=name, manifest=manifest)\n return app\n raise exceptions.NotEnoughParams('Not enough parameters')",
"def get_application(name):\n \"\"\"Fetch an :class:`Application` associated with ``name`` if available.\n\n This function may return an :ref:`asynchronous component <coroutine>`.\n The application name is set during initialisation. Check the\n :attr:`Configurator.name` attribute for more information.\n \"\"\"\n actor = get_actor()\n\n if actor:\n if actor.is_arbiter():\n return _get_app(actor, name, False)\n else:\n return _get_remote_app(actor, name)",
"def application(self, id=None, manifest=None, name=None):\n \"\"\" Smart method. Creates, picks or modifies application.\n If application found by name or id and manifest not changed: return app.\n If app found by id, but other parameters differs: change them.\n If no application found, create.\n \"\"\"\n\n modify = False\n found = False\n\n # Try to find application by name or id\n if name and id:\n found = self.get_application(id=id)\n if not found.name == name:\n modify = True\n elif id:\n found = self.get_application(id=id)\n name = found.name\n elif name:\n try:\n found = self.get_application(name=name)\n except exceptions.NotFoundError:\n pass\n\n # If found - compare parameters\n if found:\n if manifest and not manifest == found.manifest:\n modify = True\n\n # We need to update application\n if found and modify:\n found.update(name=name, manifest=manifest)\n if not found:\n created = self.create_application(name=name, manifest=manifest)\n\n return found or created",
"def get(self, key, value):\n \"\"\"Get single app by one of id or name\n\n Supports resource cache\n\n Keyword Args:\n id (str): Full app id\n name (str): App name\n\n Returns:\n App: Corresponding App resource instance\n\n Raises:\n TypeError: No or multiple keyword arguments provided\n ValueError: No matching app found on server\n \"\"\"\n if key == 'id':\n # Server returns 204 instead of 404 for a non-existent app id\n response = self._swimlane.request('get', 'app/{}'.format(value))\n if response.status_code == 204:\n raise ValueError('No app with id \"{}\"'.format(value))\n\n return App(\n self._swimlane,\n response.json()\n )\n else:\n # Workaround for lack of support for get by name\n # Holdover from previous driver support, to be fixed as part of 3.x\n for app in self.list():\n if value and value == app.name:\n return app\n\n # No matching app found\n raise ValueError('No app with name \"{}\"'.format(value))",
"public static Application get(final BandwidthClient client, final String id) throws Exception {\n assert(id != null);\n final String applicationUri = client.getUserResourceInstanceUri(BandwidthConstants.APPLICATIONS_URI_PATH, id);\n final JSONObject applicationObj = toJSONObject( client.get(applicationUri, null) );\n \n final Application application = new Application(client, applicationObj);\n return application;\n }",
"def get_instance(self, id=None, name=None):\n \"\"\" Get instance object by name or id.\n If application set, search within the application.\n \"\"\"\n log.info(\"Picking instance: %s (%s)\" % (name, id))\n if id: # submodule instances are invisible for lists\n return Instance(id=id, organization=self).init_router(self._router)\n return Instance.get(self._router, self, name)",
"def get_application_by_name(self, team_name, application_name):\n \"\"\"\n Retrieves an application using the given team name and application name.\n :param team_name: The name of the team of the application to be retrieved.\n :param application_name: The name of the application to be retrieved.\n \"\"\"\n return self._request('GET', 'rest/applications/' + str(team_name) + '/lookup?name=' + str(application_name))",
"def _get_by_id(cls, id, parent=None, **ctx_options):\n \"\"\"Returns an instance of Model class by ID.\n\n This is really just a shorthand for Key(cls, id, ...).get().\n\n Args:\n id: A string or integer key ID.\n parent: Optional parent key of the model to get.\n namespace: Optional namespace.\n app: Optional app ID.\n **ctx_options: Context options.\n\n Returns:\n A model instance or None if not found.\n \"\"\"\n return cls._get_by_id_async(id, parent=parent, **ctx_options).get_result()",
"public static Application get(final String id) throws Exception {\n assert(id != null);\n\n final BandwidthClient client = BandwidthClient.getInstance();\n return Application.get(client ,id);\n }",
"def get_app(self, reference_app=None):\n \"\"\"Helper method that implements the logic to look up an\n application.\"\"\"\n\n if reference_app is not None:\n return reference_app\n\n if current_app:\n return current_app._get_current_object()\n\n if self.app is not None:\n return self.app\n\n raise RuntimeError(\n 'No application found. Either work inside a view function or push'\n ' an application context. See'\n ' http://flask-sqlalchemy.pocoo.org/contexts/.'\n )",
"def application_detail(request, application_id, state=None, label=None):\n \"\"\" A authenticated used is trying to access an application. \"\"\"\n application = base.get_application(pk=application_id)\n state_machine = base.get_state_machine(application)\n return state_machine.process(request, application, state, label)",
"def _get_app(self, appname):\n \"\"\"\n returns app object or None\n \"\"\"\n try:\n app = APPS.get_app_config(appname)\n except Exception as e:\n self.err(e)\n return\n return app"
] |
[
0.7887581586837769,
0.7730582356452942,
0.7647751569747925,
0.7630671262741089,
0.7434857487678528,
0.7432634830474854,
0.7394784092903137,
0.738411009311676,
0.737677276134491,
0.7369981408119202,
0.7270782589912415,
0.7241693139076233
] |
Get application by id or name.
If not found: create with given or generated parameters
|
def get_or_create_application(self, id=None, manifest=None, name=None):
""" Get application by id or name.
If not found: create with given or generated parameters
"""
if id:
return self.get_application(id=id)
elif name:
try:
app = self.get_application(name=name)
except exceptions.NotFoundError:
app = self.create_application(name=name, manifest=manifest)
return app
raise exceptions.NotEnoughParams('Not enough parameters')
|
[
"def application(self, id=None, manifest=None, name=None):\n \"\"\" Smart method. Creates, picks or modifies application.\n If application found by name or id and manifest not changed: return app.\n If app found by id, but other parameters differs: change them.\n If no application found, create.\n \"\"\"\n\n modify = False\n found = False\n\n # Try to find application by name or id\n if name and id:\n found = self.get_application(id=id)\n if not found.name == name:\n modify = True\n elif id:\n found = self.get_application(id=id)\n name = found.name\n elif name:\n try:\n found = self.get_application(name=name)\n except exceptions.NotFoundError:\n pass\n\n # If found - compare parameters\n if found:\n if manifest and not manifest == found.manifest:\n modify = True\n\n # We need to update application\n if found and modify:\n found.update(name=name, manifest=manifest)\n if not found:\n created = self.create_application(name=name, manifest=manifest)\n\n return found or created",
"def get_application(self, id=None, name=None):\n \"\"\" Get application object by name or id.\n \"\"\"\n log.info(\"Picking application: %s (%s)\" % (name, id))\n return self.applications[id or name]",
"def find_application(app_id=None, app_name=None):\n \"\"\"\n find the application according application id (prioritary) or application name\n :param app_id: the application id\n :param app_name: the application name\n :return: found application or None if not found\n \"\"\"\n LOGGER.debug(\"ApplicationService.find_application\")\n if (app_id is None or not app_id) and (app_name is None or not app_name):\n raise exceptions.ArianeCallParametersError('id and name')\n\n if (app_id is not None and app_id) and (app_name is not None and app_name):\n LOGGER.warn('ApplicationService.find_application - Both id and name are defined. '\n 'Will give you search on id.')\n app_name = None\n\n params = None\n if app_id is not None and app_id:\n params = {'id': app_id}\n elif app_name is not None and app_name:\n params = {'name': app_name}\n\n ret = None\n if params is not None:\n args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}\n response = ApplicationService.requester.call(args)\n if response.rc == 0:\n ret = Application.json_2_application(response.response_content)\n elif response.rc != 404:\n err_msg = 'ApplicationService.find_application - Problem while finding application (id:' + \\\n str(app_id) + ', name:' + str(app_name) + '). ' + \\\n 'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \\\n \" (\" + str(response.rc) + \")\"\n LOGGER.warning(\n err_msg\n )\n\n return ret",
"def get(self, key, value):\n \"\"\"Get single app by one of id or name\n\n Supports resource cache\n\n Keyword Args:\n id (str): Full app id\n name (str): App name\n\n Returns:\n App: Corresponding App resource instance\n\n Raises:\n TypeError: No or multiple keyword arguments provided\n ValueError: No matching app found on server\n \"\"\"\n if key == 'id':\n # Server returns 204 instead of 404 for a non-existent app id\n response = self._swimlane.request('get', 'app/{}'.format(value))\n if response.status_code == 204:\n raise ValueError('No app with id \"{}\"'.format(value))\n\n return App(\n self._swimlane,\n response.json()\n )\n else:\n # Workaround for lack of support for get by name\n # Holdover from previous driver support, to be fixed as part of 3.x\n for app in self.list():\n if value and value == app.name:\n return app\n\n # No matching app found\n raise ValueError('No app with name \"{}\"'.format(value))",
"def get_or_create_instance(self, id=None, application=None, revision=None, environment=None, name=None, parameters=None, submodules=None,\n destroyInterval=None):\n \"\"\" Get instance by id or name.\n If not found: create with given parameters\n \"\"\"\n try:\n instance = self.get_instance(id=id, name=name)\n if name and name != instance.name:\n instance.rename(name)\n instance.ready()\n return instance\n except exceptions.NotFoundError:\n return self.create_instance(application, revision, environment, name, parameters, submodules, destroyInterval)",
"def create_application(self, name=None, manifest=None):\n \"\"\" Creates application and returns Application object.\n \"\"\"\n if not manifest:\n raise exceptions.NotEnoughParams('Manifest not set')\n if not name:\n name = 'auto-generated-name'\n from qubell.api.private.application import Application\n return Application.new(self, name, manifest, self._router)",
"def get_or_create_environment(self, id=None, name=None, zone=None, default=False):\n \"\"\" Get environment by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_environment(id=id)\n elif name:\n try:\n env = self.get_environment(name=name)\n self._assert_env_and_zone(env, zone)\n except exceptions.NotFoundError:\n env = self.create_environment(name=name, zone=zone, default=default)\n return env\n else:\n name = 'auto-generated-env'\n return self.create_environment(name=name, zone=zone, default=default)",
"function getOrCreateApplication(name, owner_uuid, files, extra_opts, cb) {\n var self = this;\n var log = this.log;\n\n assert.string(name, 'name');\n assert.string(owner_uuid, 'owner_uuid');\n assert.ok(files, 'files'); // string or list of strings\n\n if (arguments.length === 4) {\n cb = extra_opts;\n extra_opts = null;\n }\n\n assert.func(cb, 'cb');\n\n log.info({\n name: name,\n owner_uuid: owner_uuid\n }, 'getting or creating application');\n\n async.waterfall([\n function (subcb) {\n var search = {};\n search.name = name;\n search.owner_uuid = owner_uuid;\n\n if (extra_opts && extra_opts.include_master)\n search.include_master = true;\n\n self.listApplications(search, function (err, apps) {\n if (err) {\n log.error(err, 'failed to list applications');\n return (subcb(err));\n }\n\n if (apps.length > 0) {\n log.debug({ app: apps[0] }, 'found application %s', name);\n return (cb(null, apps[0]));\n }\n\n return (subcb(null));\n });\n },\n function (subcb) {\n readAndMergeFiles(files, function (err, obj) {\n if (err)\n return (subcb(err));\n\n return (subcb(null, mergeOptions(obj, extra_opts)));\n });\n },\n function (opts, subcb) {\n assert.object(opts, 'opts');\n assert.func(subcb, 'subcb');\n\n self.createApplication(name, owner_uuid, opts,\n function (err, app) {\n if (err) {\n log.error(err, 'failed to create ' +\n 'application %s', name);\n return (subcb(err));\n }\n\n log.info('created application %s', app.uuid);\n return (subcb(null, app));\n });\n }\n ], cb);\n}",
"def __get_or_create_app(self, url, cache_key):\n \"\"\" Get the app from cache or generate a new one if required\n\n Because app object doesn't have etag/expiry, we have to make\n a head() call before, to have these informations first... \"\"\"\n headers = {\"Accept\": \"application/json\"}\n app_url = '%s?datasource=%s' % (url, self.datasource)\n\n cached = self.cache.get(cache_key, (None, None, 0))\n if cached is None or len(cached) != 3:\n self.cache.invalidate(cache_key)\n cached_app, cached_headers, cached_expiry = (cached, None, 0)\n else:\n cached_app, cached_headers, cached_expiry = cached\n\n if cached_app is not None and cached_headers is not None:\n # we didn't set custom expire, use header expiry\n expires = cached_headers.get('expires', None)\n cache_timeout = -1\n if self.expire is None and expires is not None:\n cache_timeout = get_cache_time_left(\n cached_headers['expires']\n )\n if cache_timeout >= 0:\n return cached_app\n\n # we set custom expire, check this instead\n else:\n if self.expire == 0 or cached_expiry >= time.time():\n return cached_app\n\n # if we have etags, add the header to use them\n etag = cached_headers.get('etag', None)\n if etag is not None:\n headers['If-None-Match'] = etag\n\n # if nothing makes us use the cache, invalidate it\n if ((expires is None or cache_timeout < 0 or\n cached_expiry < time.time()) and etag is None):\n self.cache.invalidate(cache_key)\n\n # set timeout value in case we have to cache it later\n timeout = 0\n if self.expire is not None and self.expire > 0:\n timeout = time.time() + self.expire\n\n # we are here, we know we have to make a head call...\n res = requests.head(app_url, headers=headers)\n if res.status_code == 304 and cached_app is not None:\n self.cache.set(\n cache_key,\n (cached_app, res.headers, timeout)\n )\n return cached_app\n\n # ok, cache is not accurate, make the full stuff\n app = App.create(app_url)\n if self.caching:\n self.cache.set(cache_key, (app, res.headers, timeout))\n\n return app",
"def get_application(name):\n \"\"\"Fetch an :class:`Application` associated with ``name`` if available.\n\n This function may return an :ref:`asynchronous component <coroutine>`.\n The application name is set during initialisation. Check the\n :attr:`Configurator.name` attribute for more information.\n \"\"\"\n actor = get_actor()\n\n if actor:\n if actor.is_arbiter():\n return _get_app(actor, name, False)\n else:\n return _get_remote_app(actor, name)",
"def new(self, name=None, stack='cedar', region=None):\n \"\"\"Creates a new app.\"\"\"\n\n payload = {}\n\n if name:\n payload['app[name]'] = name\n\n if stack:\n payload['app[stack]'] = stack\n\n if region:\n payload['app[region]'] = region\n\n r = self._h._http_resource(\n method='POST',\n resource=('apps',),\n data=payload\n )\n\n name = json.loads(r.content).get('name')\n return self._h.apps.get(name)",
"def create_app(name, config=None, flask_params=None):\n \"\"\"\n Create app\n Generalized way of creating a flask app. Use it in your concrete apps and\n do further configuration there: add app-specific options, extensions,\n listeners and other features.\n\n Note: application name should be its fully qualified __name__, something\n like project.api.app. This is how we fetch routing settings.\n \"\"\"\n from boiler.config import DefaultConfig\n if config is None:\n config = DefaultConfig()\n\n # get flask parameters\n options = dict(import_name=name)\n if flask_params is not None:\n options.update(flask_params)\n if config.get('FLASK_STATIC_URL') is not None:\n options['static_url_path'] = config.get('FLASK_STATIC_URL')\n if config.get('FLASK_STATIC_PATH') is not None:\n options['static_folder'] = config.get('FLASK_STATIC_PATH')\n\n # create an app\n app = Flask(**options)\n\n # configure app\n if config.__class__ is type:\n raise Exception('Config must be an object, got class instead.')\n\n app.config.from_object(DefaultConfig())\n app.config.from_object(config)\n\n # register error handler\n register_error_handler(app)\n\n # use kernel templates\n kernel_templates_path = path.realpath(path.dirname(__file__)+'/templates')\n fallback_loader = FileSystemLoader([kernel_templates_path])\n custom_loader = ChoiceLoader([app.jinja_loader, fallback_loader])\n app.jinja_loader = custom_loader\n\n # time restarts?\n if app.config.get('TIME_RESTARTS'):\n restart_timer.time_restarts(os.path.join(os.getcwd(), 'var', 'data'))\n\n # detect browsersync proxy\n @app.before_request\n def detect_browsersync():\n g.dev_proxy = False\n proxy_header = app.config.get('DEV_PROXY_HEADER')\n if proxy_header:\n g.dev_proxy = bool(request.headers.get(proxy_header))\n\n return app"
] |
[
0.8237614035606384,
0.7673298716545105,
0.7662159204483032,
0.7539357542991638,
0.7464581727981567,
0.7427664399147034,
0.7407477498054504,
0.727322518825531,
0.7265774011611938,
0.7235315442085266,
0.7223343253135681,
0.7066302299499512
] |
Smart method. Creates, picks or modifies application.
If application found by name or id and manifest not changed: return app.
If app found by id, but other parameters differs: change them.
If no application found, create.
|
def application(self, id=None, manifest=None, name=None):
""" Smart method. Creates, picks or modifies application.
If application found by name or id and manifest not changed: return app.
If app found by id, but other parameters differs: change them.
If no application found, create.
"""
modify = False
found = False
# Try to find application by name or id
if name and id:
found = self.get_application(id=id)
if not found.name == name:
modify = True
elif id:
found = self.get_application(id=id)
name = found.name
elif name:
try:
found = self.get_application(name=name)
except exceptions.NotFoundError:
pass
# If found - compare parameters
if found:
if manifest and not manifest == found.manifest:
modify = True
# We need to update application
if found and modify:
found.update(name=name, manifest=manifest)
if not found:
created = self.create_application(name=name, manifest=manifest)
return found or created
|
[
"def get_or_create_application(self, id=None, manifest=None, name=None):\n \"\"\" Get application by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_application(id=id)\n elif name:\n try:\n app = self.get_application(name=name)\n except exceptions.NotFoundError:\n app = self.create_application(name=name, manifest=manifest)\n return app\n raise exceptions.NotEnoughParams('Not enough parameters')",
"def create_application(self, name=None, manifest=None):\n \"\"\" Creates application and returns Application object.\n \"\"\"\n if not manifest:\n raise exceptions.NotEnoughParams('Manifest not set')\n if not name:\n name = 'auto-generated-name'\n from qubell.api.private.application import Application\n return Application.new(self, name, manifest, self._router)",
"def create_app(self, app_id, app, minimal=True):\n \"\"\"Create and start an app.\n\n :param str app_id: application ID\n :param :class:`marathon.models.app.MarathonApp` app: the application to create\n :param bool minimal: ignore nulls and empty collections\n\n :returns: the created app (on success)\n :rtype: :class:`marathon.models.app.MarathonApp` or False\n \"\"\"\n app.id = app_id\n data = app.to_json(minimal=minimal)\n response = self._do_request('POST', '/v2/apps', data=data)\n if response.status_code == 201:\n return self._parse_response(response, MarathonApp)\n else:\n return False",
"def create_app_id(self, app_id, policies, display_name=None, mount_point='app-id', **kwargs):\n \"\"\"POST /auth/<mount point>/map/app-id/<app_id>\n\n :param app_id:\n :type app_id:\n :param policies:\n :type policies:\n :param display_name:\n :type display_name:\n :param mount_point:\n :type mount_point:\n :param kwargs:\n :type kwargs:\n :return:\n :rtype:\n \"\"\"\n\n # app-id can have more than 1 policy. It is easier for the user to pass in the\n # policies as a list so if they do, we need to convert to a , delimited string.\n if isinstance(policies, (list, set, tuple)):\n policies = ','.join(policies)\n\n params = {\n 'value': policies\n }\n\n # Only use the display_name if it has a value. Made it a named param for user\n # convienence instead of leaving it as part of the kwargs\n if display_name:\n params['display_name'] = display_name\n\n params.update(kwargs)\n\n return self._adapter.post('/v1/auth/{}/map/app-id/{}'.format(mount_point, app_id), json=params)",
"function getOrCreateApplication(name, owner_uuid, files, extra_opts, cb) {\n var self = this;\n var log = this.log;\n\n assert.string(name, 'name');\n assert.string(owner_uuid, 'owner_uuid');\n assert.ok(files, 'files'); // string or list of strings\n\n if (arguments.length === 4) {\n cb = extra_opts;\n extra_opts = null;\n }\n\n assert.func(cb, 'cb');\n\n log.info({\n name: name,\n owner_uuid: owner_uuid\n }, 'getting or creating application');\n\n async.waterfall([\n function (subcb) {\n var search = {};\n search.name = name;\n search.owner_uuid = owner_uuid;\n\n if (extra_opts && extra_opts.include_master)\n search.include_master = true;\n\n self.listApplications(search, function (err, apps) {\n if (err) {\n log.error(err, 'failed to list applications');\n return (subcb(err));\n }\n\n if (apps.length > 0) {\n log.debug({ app: apps[0] }, 'found application %s', name);\n return (cb(null, apps[0]));\n }\n\n return (subcb(null));\n });\n },\n function (subcb) {\n readAndMergeFiles(files, function (err, obj) {\n if (err)\n return (subcb(err));\n\n return (subcb(null, mergeOptions(obj, extra_opts)));\n });\n },\n function (opts, subcb) {\n assert.object(opts, 'opts');\n assert.func(subcb, 'subcb');\n\n self.createApplication(name, owner_uuid, opts,\n function (err, app) {\n if (err) {\n log.error(err, 'failed to create ' +\n 'application %s', name);\n return (subcb(err));\n }\n\n log.info('created application %s', app.uuid);\n return (subcb(null, app));\n });\n }\n ], cb);\n}",
"def update_app(self, app_id, app, force=False, minimal=True):\n \"\"\"Update an app.\n\n Applies writable settings in `app` to `app_id`\n Note: this method can not be used to rename apps.\n\n :param str app_id: target application ID\n :param app: application settings\n :type app: :class:`marathon.models.app.MarathonApp`\n :param bool force: apply even if a deployment is in progress\n :param bool minimal: ignore nulls and empty collections\n\n :returns: a dict containing the deployment id and version\n :rtype: dict\n \"\"\"\n # Changes won't take if version is set - blank it for convenience\n app.version = None\n\n params = {'force': force}\n data = app.to_json(minimal=minimal)\n\n response = self._do_request(\n 'PUT', '/v2/apps/{app_id}'.format(app_id=app_id), params=params, data=data)\n return response.json()",
"function(request, response, next){\n\t\tvar apptkn;\n\t\tif (!utils.Misc.isNullOrUndefined(request.get(X_BOLT_APP_TOKEN))) {\n\t\t\tapptkn = request.get(X_BOLT_APP_TOKEN);\n\t\t}\n\t\telse {\n\t\t\tvar error = new Error(errors['110']);\n\t\t\tresponse.end(utils.Misc.createResponse(null, error, 110));\n\t\t\treturn;\n\t\t}\n\n\t\tvar name = __getAppFromAppToken(apptkn, request);\n\t\tif (utils.Misc.isNullOrUndefined(name)) {\n\t\t\tvar error = new Error(errors['113']);\n\t\t\tresponse.end(utils.Misc.createResponse(null, error, 113));\n\t\t\treturn;\n\t\t}\n\t\tvar appnm = utils.String.trim(name.toLowerCase());\n\t\trequest.appName = appnm;\n\n\t\tnext();\n\t}",
"def new_app(self, App, prefix=None, callable=None, **params):\n \"\"\"Invoke this method in the :meth:`build` method as many times\n as the number of :class:`Application` required by this\n :class:`MultiApp`.\n\n :param App: an :class:`Application` class.\n :param prefix: The prefix to use for the application,\n the prefix is appended to\n the application :ref:`config parameters <settings>` and to the\n application name. Each call to this method must use a different\n value of for this parameter. It can be ``None``.\n :param callable: optional callable (function of object) used during\n initialisation of *App* (the :class:`Application.callable`).\n :param params: additional key-valued parameters used when creating\n an instance of *App*.\n :return: a tuple used by the :meth:`apps` method.\n \"\"\"\n params.update(self.cfg.params)\n params.pop('name', None) # remove the name\n prefix = prefix or ''\n if not prefix and '' in self._apps:\n prefix = App.name or App.__name__.lower()\n if not prefix:\n name = self.name\n cfg = App.create_config(params, name=name)\n else:\n name = '%s_%s' % (prefix, self.name)\n cfg = App.create_config(params, prefix=prefix, name=name)\n # Add the config entry to the multi app config if not available\n for k in cfg.settings:\n if k not in self.cfg.settings:\n self.cfg.settings[k] = cfg.settings[k]\n return new_app(prefix, (App, name, callable, cfg))",
"def _create_or_update_version(app_name, version, app_spec, try_update=True):\n \"\"\"\n Creates a new version of the app. Returns an app_id, or None if the app has\n already been created and published.\n \"\"\"\n # This has a race condition since the app could have been created or\n # published since we last looked.\n try:\n app_id = dxpy.api.app_new(app_spec)[\"id\"]\n return app_id\n except dxpy.exceptions.DXAPIError as e:\n # TODO: detect this error more reliably\n if e.name == 'InvalidInput' and e.msg == 'Specified name and version conflict with an existing alias':\n print('App %s/%s already exists' % (app_spec[\"name\"], version), file=sys.stderr)\n # The version number was already taken, so app/new doesn't work.\n # However, maybe it hasn't been published yet, so we might be able\n # to app-xxxx/update it.\n app_describe = dxpy.api.app_describe(\"app-\" + app_name, alias=version)\n if app_describe.get(\"published\", 0) > 0:\n return None\n return _update_version(app_name, version, app_spec, try_update=try_update)\n raise e",
"def find_application(options = {})\n raise \"Expecting :application_id\" unless options[:application_id]\n \n if @current_application && @current_application.application_id != options[:application_id]\n @current_application = KynetxAmApi::Application.new(self, options[:application_id])\n else\n @current_application ||= KynetxAmApi::Application.new(self, options[:application_id])\n end\n return @current_application\n end",
"private ManagedApplication createNewApplication(\n\t\t\tString name,\n\t\t\tString description,\n\t\t\tApplicationTemplate tpl,\n\t\t\tFile configurationDirectory )\n\tthrows AlreadyExistingException, IOException {\n\n\t\tthis.logger.info( \"Creating application \" + name + \" from template \" + tpl + \"...\" );\n\t\tif( Utils.isEmptyOrWhitespaces( name ))\n\t\t\tthrow new IOException( \"An application name cannot be empty.\" );\n\n\t\tApplication app = new Application( name, tpl ).description( description );\n\t\tif( ! app.getName().matches( ParsingConstants.PATTERN_APP_NAME ))\n\t\t\tthrow new IOException( \"Application names cannot contain invalid characters. Letters, digits, dots, underscores, brackets, spaces and the minus symbol are allowed.\" );\n\n\t\tif( this.nameToManagedApplication.containsKey( name ))\n\t\t\tthrow new AlreadyExistingException( name );\n\n\t\t// Create the application's directory\n\t\tFile targetDirectory = ConfigurationUtils.findApplicationDirectory( app.getName(), configurationDirectory );\n\t\tUtils.createDirectory( targetDirectory );\n\t\tapp.setDirectory( targetDirectory );\n\n\t\t// Create a descriptor\n\t\tFile descFile = new File( targetDirectory, Constants.PROJECT_DIR_DESC + \"/\" + Constants.PROJECT_FILE_DESCRIPTOR );\n\t\tUtils.createDirectory( descFile.getParentFile());\n\t\tApplicationDescriptor.save( descFile, app );\n\n\t\t// Copy all the templates's directories, except the descriptor, graph and instances\n\t\tList<File> tplDirectories = Utils.listDirectories( tpl.getDirectory());\n\t\tList<String> toSkip = Arrays.asList( Constants.PROJECT_DIR_DESC, Constants.PROJECT_DIR_GRAPH, Constants.PROJECT_DIR_INSTANCES );\n\t\tfor( File dir : tplDirectories ) {\n\t\t\tif( toSkip.contains( dir.getName().toLowerCase()))\n\t\t\t\tcontinue;\n\n\t\t\tFile newDir = new File( targetDirectory, dir.getName());\n\t\t\tUtils.copyDirectory( dir, newDir );\n\t\t}\n\n\t\t// Update the application name in all the root instances\n\t\tfor( Instance rootInstance : app.getRootInstances())\n\t\t\trootInstance.data.put( Instance.APPLICATION_NAME, app.getName());\n\n\t\t// Read application bindings.\n\t\t// They are not supposed to exist for new applications, but let's be flexible about it.\n\t\tConfigurationUtils.loadApplicationBindings( app );\n\n\t\t// Register the application\n\t\tManagedApplication ma = new ManagedApplication( app );\n\t\tthis.nameToManagedApplication.put( app.getName(), ma );\n\n\t\t// Save the instances!\n\t\tConfigurationUtils.saveInstances( ma );\n\n\t\tthis.logger.info( \"Application \" + name + \" was successfully created from the template \" + tpl + \".\" );\n\t\treturn ma;\n\t}",
"def create_app(**config):\n \"\"\"Application Factory\n\n You can create a new He-Man application with::\n\n from heman.config import create_app\n\n app = create_app() # app can be uses as WSGI application\n app.run() # Or you can run as a simple web server\n \"\"\"\n app = Flask(\n __name__, static_folder=None\n )\n\n if 'MONGO_URI' in os.environ:\n app.config['MONGO_URI'] = os.environ['MONGO_URI']\n\n app.config['LOG_LEVEL'] = 'DEBUG'\n app.config['SECRET_KEY'] = '2205552d13b5431bb537732bbb051f1214414f5ab34d47'\n\n configure_logging(app)\n configure_sentry(app)\n configure_api(app)\n configure_mongodb(app)\n configure_login(app)\n\n return app"
] |
[
0.835046112537384,
0.7747156023979187,
0.7421285510063171,
0.7291101813316345,
0.7210679650306702,
0.7196460366249084,
0.7195618152618408,
0.7153690457344055,
0.7129143476486206,
0.7119056582450867,
0.7084406614303589,
0.7074099183082581
] |
Launches instance in application and returns Instance object.
|
def create_instance(self, application, revision=None, environment=None, name=None, parameters=None, submodules=None,
destroyInterval=None, manifestVersion=None):
""" Launches instance in application and returns Instance object.
"""
from qubell.api.private.instance import Instance
return Instance.new(self._router, application, revision, environment, name,
parameters, submodules, destroyInterval, manifestVersion=manifestVersion)
|
[
"def instance(self, id=None, application=None, name=None, revision=None, environment=None, parameters=None, submodules=None, destroyInterval=None):\n \"\"\" Smart method. It does everything, to return Instance with given parameters within the application.\n If instance found running and given parameters are actual: return it.\n If instance found, but parameters differs - reconfigure instance with new parameters.\n If instance not found: launch instance with given parameters.\n Return: Instance object.\n \"\"\"\n instance = self.get_or_create_instance(id, application, revision, environment, name, parameters, submodules, destroyInterval)\n\n reconfigure = False\n # if found:\n # if revision and revision is not found.revision:\n # reconfigure = True\n # if parameters and parameters is not found.parameters:\n # reconfigure = True\n\n # We need to reconfigure instance\n if reconfigure:\n instance.reconfigure(revision=revision, parameters=parameters)\n\n return instance",
"def launchApp(self, **kwargs):\n \"\"\"Launch Starcraft2 process in the background using this configuration.\n WARNING: if the same IP address and port are specified between multiple\n SC2 process instances, all subsequent processes after the first\n will fail to initialize and crash.\n \"\"\"\n app = self.installedApp\n # TODO -- launch host in window minimized/headless mode\n vers = self.getVersion()\n return app.start(version=vers,#game_version=vers.baseVersion, data_version=vers.dataHash,\n full_screen=self.fullscreen, verbose=self.debug, **kwargs)",
"def create(self, ami, count, config=None):\n \"\"\"Create an instance using the launcher.\"\"\"\n return self.Launcher(config=config).launch(ami, count)",
"def Launcher(self, config=None):\n \"\"\"Provides a configurable launcher for EC2 instances.\"\"\"\n class _launcher(EC2ApiClient):\n \"\"\"Configurable launcher for EC2 instances. Create the Launcher\n (passing an optional dict of its attributes), set its attributes\n (as described in the RunInstances API docs), then launch().\n \"\"\"\n def __init__(self, aws, config):\n super(_launcher, self).__init__(aws)\n self.config = config\n self._attr = list(self.__dict__.keys()) + ['_attr']\n\n def launch(self, ami, min_count, max_count=0):\n \"\"\"Use given AMI to launch min_count instances with the\n current configuration. Returns instance info list.\n \"\"\"\n params = config.copy()\n params.update(dict([i for i in self.__dict__.items()\n if i[0] not in self._attr]))\n return self.call(\"RunInstances\",\n ImageId=ami,\n MinCount=min_count,\n MaxCount=max_count or min_count,\n response_data_key=\"Instances\",\n **params)\n\n if not config:\n config = {}\n return _launcher(self._aws, config)",
"def _app(self):\n \"\"\"The application object to work with; this is either the app\n that we have been bound to, or the current application.\n \"\"\"\n if self.app is not None:\n return self.app\n\n ctx = _request_ctx_stack.top\n if ctx is not None:\n return ctx.app\n\n try:\n from flask import _app_ctx_stack\n app_ctx = _app_ctx_stack.top\n if app_ctx is not None:\n return app_ctx.app\n except ImportError:\n pass\n\n raise RuntimeError('assets instance not bound to an application, '+\n 'and no application in current context')",
"def _launch_instance(self):\n \"\"\"\n Create new test instance in a resource group with the same name.\n \"\"\"\n self.running_instance_id = ipa_utils.generate_instance_name(\n 'azure-ipa-test'\n )\n self.logger.debug('ID of instance: %s' % self.running_instance_id)\n self._set_default_resource_names()\n\n try:\n # Try block acts as a transaction. If an exception is raised\n # attempt to cleanup the resource group and all created resources.\n\n # Create resource group.\n self._create_resource_group(self.region, self.running_instance_id)\n\n if self.subnet_id:\n # Use existing vnet/subnet.\n subnet = self.network.subnets.get(\n self.vnet_resource_group, self.vnet_name, self.subnet_id\n )\n else:\n self.subnet_id = ''.join([self.running_instance_id, '-subnet'])\n self.vnet_name = ''.join([self.running_instance_id, '-vnet'])\n\n # Create new vnet\n self._create_virtual_network(\n self.region, self.running_instance_id, self.vnet_name\n )\n\n # Create new subnet in new vnet\n subnet = self._create_subnet(\n self.running_instance_id, self.subnet_id, self.vnet_name\n )\n\n # Setup interface and public ip in resource group.\n public_ip = self._create_public_ip(\n self.public_ip_name, self.running_instance_id, self.region\n )\n interface = self._create_network_interface(\n self.ip_config_name, self.nic_name, public_ip, self.region,\n self.running_instance_id, subnet, self.accelerated_networking\n )\n\n # Get dictionary of VM parameters and create instance.\n vm_config = self._create_vm_config(interface)\n self._create_vm(vm_config)\n except Exception:\n try:\n self._terminate_instance()\n except Exception:\n pass\n raise\n else:\n # Ensure VM is in the running state.\n self._wait_on_instance('VM running', timeout=self.timeout)",
"def launchApplication(self, pchAppKey):\n \"\"\"\n Launches the application. The existing scene application will exit and then the new application will start.\n This call is not valid for dashboard overlay applications.\n \"\"\"\n\n fn = self.function_table.launchApplication\n result = fn(pchAppKey)\n return result",
"def instance(cls, *args, **kwargs):\n \"\"\" Singleton getter \"\"\"\n if cls._instance is None:\n cls._instance = cls(*args, **kwargs)\n loaded = cls._instance.reload()\n logging.getLogger('luigi-interface').info('Loaded %r', loaded)\n\n return cls._instance",
"def get_app(self):\n '''Eliminate the builder by producing a new Bottle application.\n\n This should be the final call in your method chain. It uses all\n of the built up options to create a new Bottle application.\n\n :rtype: :class:`bottle.Bottle`\n '''\n if self.config is None:\n # If the user never sets a config instance, then just create\n # a default.\n self.config = Config()\n if self.mount_prefix is None:\n self.mount_prefix = self.config.config.get('url_prefix')\n\n self.inject('config', lambda: self.config)\n self.inject('kvlclient', lambda: self.config.kvlclient)\n self.inject('store', lambda: self.config.store)\n self.inject('label_store', lambda: self.config.label_store)\n self.inject('tags', lambda: self.config.tags)\n self.inject('search_engines', lambda: self.search_engines)\n self.inject('filters', lambda: self.filters)\n self.inject('request', lambda: bottle.request)\n self.inject('response', lambda: bottle.response)\n\n # DEPRECATED. Remove. ---AG\n self.inject('visid_to_dbid', lambda: self.visid_to_dbid)\n self.inject('dbid_to_visid', lambda: self.dbid_to_visid)\n\n # Also DEPRECATED.\n self.inject('label_hooks', lambda: [])\n\n # Load routes defined in entry points.\n for extroute in self.config.config.get('external_routes', []):\n mod, fun_name = extroute.split(':')\n logger.info('Loading external route: %s', extroute)\n fun = getattr(__import__(mod, fromlist=[fun_name]), fun_name)\n self.add_routes(fun())\n\n # This adds the `json=True` feature on routes, which always coerces\n # the output to JSON. Bottle, by default, only permits dictionaries\n # to be JSON, which is the correct behavior. (Because returning JSON\n # arrays is a hazard.)\n #\n # So we should fix the routes and then remove this. ---AG\n self.app.install(JsonPlugin())\n\n # Throw away the app and return it. Because this is elimination!\n app = self.app\n self.app = None\n if self.mount_prefix is not None:\n root = bottle.Bottle()\n root.mount(self.mount_prefix, app)\n return root\n else:\n return app",
"def launch(url, wait=False, locate=False):\n \"\"\"This function launches the given URL (or filename) in the default\n viewer application for this file type. If this is an executable, it\n might launch the executable in a new session. The return value is\n the exit code of the launched application. Usually, ``0`` indicates\n success.\n\n Examples::\n\n click.launch('https://click.palletsprojects.com/')\n click.launch('/my/downloaded/file', locate=True)\n\n .. versionadded:: 2.0\n\n :param url: URL or filename of the thing to launch.\n :param wait: waits for the program to stop.\n :param locate: if this is set to `True` then instead of launching the\n application associated with the URL it will attempt to\n launch a file manager with the file located. This\n might have weird effects if the URL does not point to\n the filesystem.\n \"\"\"\n from ._termui_impl import open_url\n return open_url(url, wait=wait, locate=locate)",
"def launch_app(self, app_id, force_launch=False, callback_function=False):\n \"\"\" Launches an app on the Chromecast.\n\n Will only launch if it is not currently running unless\n force_launch=True. \"\"\"\n\n if not force_launch and self.app_id is None:\n self.update_status(lambda response:\n self._send_launch_message(app_id, force_launch,\n callback_function))\n else:\n self._send_launch_message(app_id, force_launch, callback_function)",
"def launch_app(self, timeout=10):\n \"\"\"\n Launch Spotify application.\n\n Will raise a LaunchError exception if there is no response from the\n Spotify app within timeout seconds.\n \"\"\"\n\n def callback():\n \"\"\"Callback function\"\"\"\n self.send_message({\"type\": TYPE_STATUS,\n \"credentials\": self.access_token,\n \"expiresIn\": self.expires})\n\n self.launch(callback_function=callback)\n\n # Need to wait for Spotify to be launched on Chromecast completely\n while not self.is_launched and timeout:\n time.sleep(1)\n timeout -= 1\n\n if not self.is_launched:\n raise LaunchError(\n \"Timeout when waiting for status response from Spotify app\")"
] |
[
0.7488821148872375,
0.7323926091194153,
0.7287603616714478,
0.7244489789009094,
0.7187560796737671,
0.7177624702453613,
0.7174997329711914,
0.7119090557098389,
0.7052464485168457,
0.704052746295929,
0.703783392906189,
0.7011165022850037
] |
Get instance object by name or id.
If application set, search within the application.
|
def get_instance(self, id=None, name=None):
""" Get instance object by name or id.
If application set, search within the application.
"""
log.info("Picking instance: %s (%s)" % (name, id))
if id: # submodule instances are invisible for lists
return Instance(id=id, organization=self).init_router(self._router)
return Instance.get(self._router, self, name)
|
[
"def get_application(self, id=None, name=None):\n \"\"\" Get application object by name or id.\n \"\"\"\n log.info(\"Picking application: %s (%s)\" % (name, id))\n return self.applications[id or name]",
"def _get_by_id(cls, id, parent=None, **ctx_options):\n \"\"\"Returns an instance of Model class by ID.\n\n This is really just a shorthand for Key(cls, id, ...).get().\n\n Args:\n id: A string or integer key ID.\n parent: Optional parent key of the model to get.\n namespace: Optional namespace.\n app: Optional app ID.\n **ctx_options: Context options.\n\n Returns:\n A model instance or None if not found.\n \"\"\"\n return cls._get_by_id_async(id, parent=parent, **ctx_options).get_result()",
"def get(self, key, value):\n \"\"\"Get single app by one of id or name\n\n Supports resource cache\n\n Keyword Args:\n id (str): Full app id\n name (str): App name\n\n Returns:\n App: Corresponding App resource instance\n\n Raises:\n TypeError: No or multiple keyword arguments provided\n ValueError: No matching app found on server\n \"\"\"\n if key == 'id':\n # Server returns 204 instead of 404 for a non-existent app id\n response = self._swimlane.request('get', 'app/{}'.format(value))\n if response.status_code == 204:\n raise ValueError('No app with id \"{}\"'.format(value))\n\n return App(\n self._swimlane,\n response.json()\n )\n else:\n # Workaround for lack of support for get by name\n # Holdover from previous driver support, to be fixed as part of 3.x\n for app in self.list():\n if value and value == app.name:\n return app\n\n # No matching app found\n raise ValueError('No app with name \"{}\"'.format(value))",
"def get(self, arg):\n \"\"\"\n Return instance object with given EC2 ID or nametag.\n \"\"\"\n try:\n reservations = self.get_all_instances(filters={'tag:Name': [arg]})\n instance = reservations[0].instances[0]\n except IndexError:\n try:\n instance = self.get_all_instances([arg])[0].instances[0]\n except (_ResponseError, IndexError):\n # TODO: encapsulate actual exception for debugging\n err = \"Can't find any instance with name or ID '%s'\" % arg\n raise ValueError(err)\n return instance",
"def get_application(name):\n \"\"\"Fetch an :class:`Application` associated with ``name`` if available.\n\n This function may return an :ref:`asynchronous component <coroutine>`.\n The application name is set during initialisation. Check the\n :attr:`Configurator.name` attribute for more information.\n \"\"\"\n actor = get_actor()\n\n if actor:\n if actor.is_arbiter():\n return _get_app(actor, name, False)\n else:\n return _get_remote_app(actor, name)",
"def get_or_create_application(self, id=None, manifest=None, name=None):\n \"\"\" Get application by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_application(id=id)\n elif name:\n try:\n app = self.get_application(name=name)\n except exceptions.NotFoundError:\n app = self.create_application(name=name, manifest=manifest)\n return app\n raise exceptions.NotEnoughParams('Not enough parameters')",
"def _get_by_id_async(cls, id, parent=None, app=None, namespace=None,\n **ctx_options):\n \"\"\"Returns an instance of Model class by ID (and app, namespace).\n\n This is the asynchronous version of Model._get_by_id().\n \"\"\"\n key = Key(cls._get_kind(), id, parent=parent, app=app, namespace=namespace)\n return key.get_async(**ctx_options)",
"def locate_item(ident, stateless=False, cache_id=None):\n '''Locate a dash application, given either the\n slug of an instance or the name for a stateless app'''\n if stateless:\n dash_app = find_stateless_by_name(ident)\n else:\n dash_app = get_object_or_404(DashApp, slug=ident)\n\n app = dash_app.as_dash_instance(cache_id=cache_id)\n return dash_app, app",
"def get(cls, name=None):\n \"\"\"Gets the application wide depot instance.\n\n Might return ``None`` if :meth:`configure` has not been\n called yet.\n\n \"\"\"\n if name is None:\n name = cls._default_depot\n\n name = cls.resolve_alias(name) # resolve alias\n return cls._depots.get(name)",
"def find_application(app_id=None, app_name=None):\n \"\"\"\n find the application according application id (prioritary) or application name\n :param app_id: the application id\n :param app_name: the application name\n :return: found application or None if not found\n \"\"\"\n LOGGER.debug(\"ApplicationService.find_application\")\n if (app_id is None or not app_id) and (app_name is None or not app_name):\n raise exceptions.ArianeCallParametersError('id and name')\n\n if (app_id is not None and app_id) and (app_name is not None and app_name):\n LOGGER.warn('ApplicationService.find_application - Both id and name are defined. '\n 'Will give you search on id.')\n app_name = None\n\n params = None\n if app_id is not None and app_id:\n params = {'id': app_id}\n elif app_name is not None and app_name:\n params = {'name': app_name}\n\n ret = None\n if params is not None:\n args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}\n response = ApplicationService.requester.call(args)\n if response.rc == 0:\n ret = Application.json_2_application(response.response_content)\n elif response.rc != 404:\n err_msg = 'ApplicationService.find_application - Problem while finding application (id:' + \\\n str(app_id) + ', name:' + str(app_name) + '). ' + \\\n 'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \\\n \" (\" + str(response.rc) + \")\"\n LOGGER.warning(\n err_msg\n )\n\n return ret",
"def get_or_create_instance(self, id=None, application=None, revision=None, environment=None, name=None, parameters=None, submodules=None,\n destroyInterval=None):\n \"\"\" Get instance by id or name.\n If not found: create with given parameters\n \"\"\"\n try:\n instance = self.get_instance(id=id, name=name)\n if name and name != instance.name:\n instance.rename(name)\n instance.ready()\n return instance\n except exceptions.NotFoundError:\n return self.create_instance(application, revision, environment, name, parameters, submodules, destroyInterval)",
"def application(self, id=None, manifest=None, name=None):\n \"\"\" Smart method. Creates, picks or modifies application.\n If application found by name or id and manifest not changed: return app.\n If app found by id, but other parameters differs: change them.\n If no application found, create.\n \"\"\"\n\n modify = False\n found = False\n\n # Try to find application by name or id\n if name and id:\n found = self.get_application(id=id)\n if not found.name == name:\n modify = True\n elif id:\n found = self.get_application(id=id)\n name = found.name\n elif name:\n try:\n found = self.get_application(name=name)\n except exceptions.NotFoundError:\n pass\n\n # If found - compare parameters\n if found:\n if manifest and not manifest == found.manifest:\n modify = True\n\n # We need to update application\n if found and modify:\n found.update(name=name, manifest=manifest)\n if not found:\n created = self.create_application(name=name, manifest=manifest)\n\n return found or created"
] |
[
0.8040432929992676,
0.7904948592185974,
0.7791473865509033,
0.7608667612075806,
0.7448980808258057,
0.7438936233520508,
0.7370529770851135,
0.7324503064155579,
0.7296027541160583,
0.7280678153038025,
0.7261772155761719,
0.7245195508003235
] |
Get list of instances in json format converted to list
|
def list_instances_json(self, application=None, show_only_destroyed=False):
""" Get list of instances in json format converted to list"""
# todo: application should not be parameter here. Application should do its own list, just in sake of code reuse
q_filter = {'sortBy': 'byCreation', 'descending': 'true',
'mode': 'short',
'from': '0', 'to': '10000'}
if not show_only_destroyed:
q_filter['showDestroyed'] = 'false'
else:
q_filter['showDestroyed'] = 'true'
q_filter['showRunning'] = 'false'
q_filter['showError'] = 'false'
q_filter['showLaunching'] = 'false'
if application:
q_filter["applicationFilterId"] = application.applicationId
resp_json = self._router.get_instances(org_id=self.organizationId, params=q_filter).json()
if type(resp_json) == dict:
instances = [instance for g in resp_json['groups'] for instance in g['records']]
else: # TODO: This is compatibility fix for platform < 37.1
instances = resp_json
return instances
|
[
"def to_list(self):\n \"\"\"\n To a list of dicts (each dict is an instances)\n \"\"\"\n ret = []\n for instance in self.instances:\n ret.append(instance.to_dict())\n return ret",
"def from_json_to_list(cls, data: str,\n force_snake_case=True, force_cast: bool=False, restrict: bool=False) -> TList[T]:\n \"\"\"From json string to list of instance\n\n :param data: Json string\n :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True\n :param force_cast: Cast forcibly if True\n :param restrict: Prohibit extra parameters if True\n :return: List of instance\n\n Usage:\n\n >>> from owlmixin.samples import Human\n >>> humans: TList[Human] = Human.from_json_to_list('''[\n ... {\"id\": 1, \"name\": \"Tom\", \"favorites\": [{\"name\": \"Apple\"}]},\n ... {\"id\": 2, \"name\": \"John\", \"favorites\": [{\"name\": \"Orange\"}]}\n ... ]''')\n >>> humans[0].name\n 'Tom'\n >>> humans[1].name\n 'John'\n \"\"\"\n return cls.from_dicts(util.load_json(data),\n force_snake_case=force_snake_case,\n force_cast=force_cast,\n restrict=restrict)",
"def get(self, name, return_json=False, quiet=False):\n '''get is a list for a single instance. It is assumed to be running,\n and we need to look up the PID, etc.\n '''\n from spython.utils import check_install\n check_install()\n\n # Ensure compatible for singularity prior to 3.0, and after 3.0\n subgroup = \"instance.list\"\n\n if 'version 3' in self.version():\n subgroup = [\"instance\", \"list\"]\n\n cmd = self._init_command(subgroup)\n\n cmd.append(name)\n output = run_command(cmd, quiet=True)\n\n # Success, we have instances\n\n if output['return_code'] == 0:\n\n # Only print the table if we are returning json\n if quiet is False:\n print(''.join(output['message']))\n\n # Prepare json result from table\n\n header = ['daemon_name','pid','container_image']\n instances = parse_table(output['message'][0], header)\n\n # Does the user want instance objects instead?\n listing = []\n if return_json is False:\n for i in instances:\n new_instance = Instance(pid=i['pid'],\n name=i['daemon_name'],\n image=i['container_image'],\n start=False)\n\n listing.append(new_instance)\n instances = listing\n\n # Couldn't get UID\n\n elif output['return_code'] == 255:\n bot.error(\"Couldn't get UID\")\n \n # Return code of 0\n else:\n bot.info('No instances found.')\n\n # If we are given a name, return just one\n if name is not None and len(instances) == 1:\n instances = instances[0]\n\n return instances",
"def from_jsonf_to_list(cls, fpath: str, encoding: str='utf8',\n force_snake_case=True, force_cast: bool=False, restrict: bool=False) -> TList[T]:\n \"\"\"From json file path to list of instance\n\n :param fpath: Json file path\n :param encoding: Json file encoding\n :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True\n :param force_cast: Cast forcibly if True\n :param restrict: Prohibit extra parameters if True\n :return: List of instance\n \"\"\"\n return cls.from_dicts(util.load_jsonf(fpath, encoding),\n force_snake_case=force_snake_case,\n force_cast=force_cast,\n restrict=restrict)",
"def get(self, instance_ids=None, filters=None):\n \"\"\"List instance info.\"\"\"\n params = {}\n if filters:\n params[\"filters\"] = make_filters(filters)\n if instance_ids:\n params['InstanceIds'] = instance_ids\n reservations = self.call(\"DescribeInstances\",\n response_data_key=\"Reservations\",\n **params)\n if reservations:\n return list(chain(*(r[\"Instances\"] for r in reservations)))\n return []",
"def parse_list(cls, api, json_list):\n \"\"\"\n Parse a list of JSON objects into\n a result set of model instances.\n \"\"\"\n results = []\n for json_obj in json_list:\n if json_obj:\n obj = cls.parse(api, json_obj)\n results.append(obj)\n\n return results",
"def _batch_json_to_instances(self, json_dicts: List[JsonDict]) -> List[Instance]:\n \"\"\"\n Converts a list of JSON objects into a list of :class:`~allennlp.data.instance.Instance`s.\n By default, this expects that a \"batch\" consists of a list of JSON blobs which would\n individually be predicted by :func:`predict_json`. In order to use this method for\n batch prediction, :func:`_json_to_instance` should be implemented by the subclass, or\n if the instances have some dependency on each other, this method should be overridden\n directly.\n \"\"\"\n instances = []\n for json_dict in json_dicts:\n instances.append(self._json_to_instance(json_dict))\n return instances",
"def json_data(self, instance, default=None):\n \"\"\"Get a JSON compatible value\n \"\"\"\n value = self.get(instance)\n\n out = []\n for rel in value:\n if rel.isBroken():\n logger.warn(\"Skipping broken relation {}\".format(repr(rel)))\n continue\n obj = rel.to_object\n out.append(api.get_url_info(obj))\n return out",
"def list_instances(self):\n \"\"\"\n Lists the instances.\n\n Instances are returned in lexicographical order.\n\n :rtype: ~collections.Iterable[.Instance]\n \"\"\"\n # Server does not do pagination on listings of this resource.\n # Return an iterator anyway for similarity with other API methods\n response = self.get_proto(path='/instances')\n message = rest_pb2.ListInstancesResponse()\n message.ParseFromString(response.content)\n instances = getattr(message, 'instance')\n return iter([Instance(instance) for instance in instances])",
"async def get_instances(self, **kwargs) -> List[ApiResource]:\n \"\"\"Returns a list of resource instances.\n\n :raises PvApiError when a hub problem occurs.\"\"\"\n raw_resources = await self.get_resources(**kwargs)\n _instances = [\n self._resource_factory(_raw)\n for _raw in self._loop_raw(raw_resources)\n ]\n return _instances",
"private function getInstances()\n {\n $response = $this->call($this->tyrUrl . '/' . $this->version . '/instances/');\n\n $instancesN = json_decode($response);\n if (is_array($instancesN)) {\n $instances = array();\n foreach ($instancesN as $instance) {\n $instances[$instance->id] = $instance->name;\n }\n }\n\n return $instances;\n }",
"def list_to_instance_list(_self, _list, _Class):\n \"\"\"\n Takes a list of resource dicts and returns a list\n of resource instances, defined by the _Class param.\n\n :param _self: Original resource calling the method\n :type _self: core.MarvelObject\n :param _list: List of dicts describing a Resource.\n :type _list: list\n :param _Class: The Resource class to create a list of (Comic, Creator, etc).\n :type _Class: core.MarvelObject\n \n :returns: list -- List of Resource instances (Comic, Creator, etc).\n \"\"\"\n items = []\n for item in _list:\n items.append(_Class(_self.marvel, item))\n return items"
] |
[
0.7765924334526062,
0.7641329169273376,
0.746208667755127,
0.7422376275062561,
0.7317209839820862,
0.7262202501296997,
0.7249684929847717,
0.7231140732765198,
0.7216655015945435,
0.7160393595695496,
0.7155121564865112,
0.7132692933082581
] |
Get instance by id or name.
If not found: create with given parameters
|
def get_or_create_instance(self, id=None, application=None, revision=None, environment=None, name=None, parameters=None, submodules=None,
destroyInterval=None):
""" Get instance by id or name.
If not found: create with given parameters
"""
try:
instance = self.get_instance(id=id, name=name)
if name and name != instance.name:
instance.rename(name)
instance.ready()
return instance
except exceptions.NotFoundError:
return self.create_instance(application, revision, environment, name, parameters, submodules, destroyInterval)
|
[
"def get_or_create_application(self, id=None, manifest=None, name=None):\n \"\"\" Get application by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_application(id=id)\n elif name:\n try:\n app = self.get_application(name=name)\n except exceptions.NotFoundError:\n app = self.create_application(name=name, manifest=manifest)\n return app\n raise exceptions.NotEnoughParams('Not enough parameters')",
"def get_instance(self, id=None, name=None):\n \"\"\" Get instance object by name or id.\n If application set, search within the application.\n \"\"\"\n log.info(\"Picking instance: %s (%s)\" % (name, id))\n if id: # submodule instances are invisible for lists\n return Instance(id=id, organization=self).init_router(self._router)\n return Instance.get(self._router, self, name)",
"def get(self, arg):\n \"\"\"\n Return instance object with given EC2 ID or nametag.\n \"\"\"\n try:\n reservations = self.get_all_instances(filters={'tag:Name': [arg]})\n instance = reservations[0].instances[0]\n except IndexError:\n try:\n instance = self.get_all_instances([arg])[0].instances[0]\n except (_ResponseError, IndexError):\n # TODO: encapsulate actual exception for debugging\n err = \"Can't find any instance with name or ID '%s'\" % arg\n raise ValueError(err)\n return instance",
"def get_or_create_environment(self, id=None, name=None, zone=None, default=False):\n \"\"\" Get environment by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_environment(id=id)\n elif name:\n try:\n env = self.get_environment(name=name)\n self._assert_env_and_zone(env, zone)\n except exceptions.NotFoundError:\n env = self.create_environment(name=name, zone=zone, default=default)\n return env\n else:\n name = 'auto-generated-env'\n return self.create_environment(name=name, zone=zone, default=default)",
"def _get_by_id(cls, id, parent=None, **ctx_options):\n \"\"\"Returns an instance of Model class by ID.\n\n This is really just a shorthand for Key(cls, id, ...).get().\n\n Args:\n id: A string or integer key ID.\n parent: Optional parent key of the model to get.\n namespace: Optional namespace.\n app: Optional app ID.\n **ctx_options: Context options.\n\n Returns:\n A model instance or None if not found.\n \"\"\"\n return cls._get_by_id_async(id, parent=parent, **ctx_options).get_result()",
"def instance(instance_group_name, index_or_id, options = { deployment_name: Deployments::DEFAULT_DEPLOYMENT_NAME })\n find_instance(instances(options), instance_group_name, index_or_id)\n end",
"def get_by_id_or_404(self, id, **kwargs):\n \"\"\"Gets by a instance instance r raises a 404 is one isn't found.\"\"\"\n obj = self.get_by_id(id=id, **kwargs)\n\n if obj:\n return obj\n\n raise Http404",
"def instance(self, name=None, *args, **kwargs):\n \"\"\"Create a new instance using key ``name``.\n\n :param name: the name of the class (by default) or the key name of the\n class used to find the class\n :param args: given to the ``__init__`` method\n :param kwargs: given to the ``__init__`` method\n\n \"\"\"\n logger.info(f'new instance of {name}')\n t0 = time()\n name = self.default_name if name is None else name\n logger.debug(f'creating instance of {name}')\n class_name, params = self._class_name_params(name)\n cls = self._find_class(class_name)\n params.update(kwargs)\n if self._has_init_config(cls):\n logger.debug(f'found config parameter')\n params['config'] = self.config\n if self._has_init_name(cls):\n logger.debug(f'found name parameter')\n params['name'] = name\n if logger.level >= logging.DEBUG:\n for k, v in params.items():\n logger.debug(f'populating {k} -> {v} ({type(v)})')\n inst = self._instance(cls, *args, **params)\n logger.info(f'created {name} instance of {cls.__name__} ' +\n f'in {(time() - t0):.2f}s')\n return inst",
"def instance(self, id=None, application=None, name=None, revision=None, environment=None, parameters=None, submodules=None, destroyInterval=None):\n \"\"\" Smart method. It does everything, to return Instance with given parameters within the application.\n If instance found running and given parameters are actual: return it.\n If instance found, but parameters differs - reconfigure instance with new parameters.\n If instance not found: launch instance with given parameters.\n Return: Instance object.\n \"\"\"\n instance = self.get_or_create_instance(id, application, revision, environment, name, parameters, submodules, destroyInterval)\n\n reconfigure = False\n # if found:\n # if revision and revision is not found.revision:\n # reconfigure = True\n # if parameters and parameters is not found.parameters:\n # reconfigure = True\n\n # We need to reconfigure instance\n if reconfigure:\n instance.reconfigure(revision=revision, parameters=parameters)\n\n return instance",
"def get(cls, parent, name):\n \"\"\"Get an instance matching the parent and name\"\"\"\n return cls.query.filter_by(parent=parent, name=name).one_or_none()",
"def _get_by_id_async(cls, id, parent=None, app=None, namespace=None,\n **ctx_options):\n \"\"\"Returns an instance of Model class by ID (and app, namespace).\n\n This is the asynchronous version of Model._get_by_id().\n \"\"\"\n key = Key(cls._get_kind(), id, parent=parent, app=app, namespace=namespace)\n return key.get_async(**ctx_options)",
"def get_or_create(cls, name):\n \"\"\"\n Return the instance of the class with the specified name. If it doesn't\n already exist, create it.\n \"\"\"\n obj = cls.query.filter_by(name=name).one_or_none()\n if obj:\n return obj\n try:\n with session.begin_nested():\n obj = cls(name=name)\n session.add(obj)\n session.flush()\n return obj\n except IntegrityError:\n log.debug('Collision when adding %s(name=\"%s\"), returning existing object',\n cls.__name__, name)\n return cls.query.filter_by(name=name).one()"
] |
[
0.7630710601806641,
0.7392506003379822,
0.7307499051094055,
0.7299924492835999,
0.7236011028289795,
0.7102996706962585,
0.7102484107017517,
0.7079905867576599,
0.7051061987876892,
0.7030153870582581,
0.6990081071853638,
0.695988655090332
] |
Smart method. It does everything, to return Instance with given parameters within the application.
If instance found running and given parameters are actual: return it.
If instance found, but parameters differs - reconfigure instance with new parameters.
If instance not found: launch instance with given parameters.
Return: Instance object.
|
def instance(self, id=None, application=None, name=None, revision=None, environment=None, parameters=None, submodules=None, destroyInterval=None):
""" Smart method. It does everything, to return Instance with given parameters within the application.
If instance found running and given parameters are actual: return it.
If instance found, but parameters differs - reconfigure instance with new parameters.
If instance not found: launch instance with given parameters.
Return: Instance object.
"""
instance = self.get_or_create_instance(id, application, revision, environment, name, parameters, submodules, destroyInterval)
reconfigure = False
# if found:
# if revision and revision is not found.revision:
# reconfigure = True
# if parameters and parameters is not found.parameters:
# reconfigure = True
# We need to reconfigure instance
if reconfigure:
instance.reconfigure(revision=revision, parameters=parameters)
return instance
|
[
"def get_or_create_instance(self, id=None, application=None, revision=None, environment=None, name=None, parameters=None, submodules=None,\n destroyInterval=None):\n \"\"\" Get instance by id or name.\n If not found: create with given parameters\n \"\"\"\n try:\n instance = self.get_instance(id=id, name=name)\n if name and name != instance.name:\n instance.rename(name)\n instance.ready()\n return instance\n except exceptions.NotFoundError:\n return self.create_instance(application, revision, environment, name, parameters, submodules, destroyInterval)",
"def create_instance(self, application, revision=None, environment=None, name=None, parameters=None, submodules=None,\n destroyInterval=None, manifestVersion=None):\n \"\"\" Launches instance in application and returns Instance object.\n \"\"\"\n from qubell.api.private.instance import Instance\n return Instance.new(self._router, application, revision, environment, name,\n parameters, submodules, destroyInterval, manifestVersion=manifestVersion)",
"def Launcher(self, config=None):\n \"\"\"Provides a configurable launcher for EC2 instances.\"\"\"\n class _launcher(EC2ApiClient):\n \"\"\"Configurable launcher for EC2 instances. Create the Launcher\n (passing an optional dict of its attributes), set its attributes\n (as described in the RunInstances API docs), then launch().\n \"\"\"\n def __init__(self, aws, config):\n super(_launcher, self).__init__(aws)\n self.config = config\n self._attr = list(self.__dict__.keys()) + ['_attr']\n\n def launch(self, ami, min_count, max_count=0):\n \"\"\"Use given AMI to launch min_count instances with the\n current configuration. Returns instance info list.\n \"\"\"\n params = config.copy()\n params.update(dict([i for i in self.__dict__.items()\n if i[0] not in self._attr]))\n return self.call(\"RunInstances\",\n ImageId=ami,\n MinCount=min_count,\n MaxCount=max_count or min_count,\n response_data_key=\"Instances\",\n **params)\n\n if not config:\n config = {}\n return _launcher(self._aws, config)",
"def start_instance(self,\n # these are common to any\n # CloudProvider.start_instance() call\n key_name, public_key_path, private_key_path,\n security_group, flavor, image_id, image_userdata,\n username=None,\n # these params are specific to the\n # GoogleCloudProvider\n node_name=None,\n boot_disk_type='pd-standard',\n boot_disk_size=10,\n tags=None,\n scheduling=None,\n **kwargs):\n \"\"\"Starts a new instance with the given properties and returns\n the instance id.\n\n :param str key_name: name of the ssh key to connect\n :param str public_key_path: path to ssh public key\n :param str private_key_path: path to ssh private key\n :param str security_group: firewall rule definition to apply on the\n instance\n :param str flavor: machine type to use for the instance\n :param str image_id: image type (os) to use for the instance\n :param str image_userdata: command to execute after startup\n :param str username: username for the given ssh key, default None\n :param str node_name: name of the instance\n :param str tags: comma-separated list of \"tags\" to label the instance\n :param str scheduling: scheduling option to use for the instance (\"preemptible\")\n :param str|Sequence tags: \"Tags\" to label the instance.\n\n Can be either a single string (individual tags are comma-separated),\n or a sequence of strings (each string being a single tag).\n\n :return: str - instance id of the started instance\n \"\"\"\n # construct URLs\n project_url = '%s%s' % (GCE_URL, self._project_id)\n machine_type_url = '%s/zones/%s/machineTypes/%s' \\\n % (project_url, self._zone, flavor)\n boot_disk_type_url = '%s/zones/%s/diskTypes/%s' \\\n % (project_url, self._zone, boot_disk_type)\n # FIXME: `conf.py` should ensure that `boot_disk_size` has the right\n # type, so there would be no need to convert here\n boot_disk_size_gb = int(boot_disk_size)\n network_url = '%s/global/networks/%s' % (project_url, self._network)\n if image_id.startswith('http://') or image_id.startswith('https://'):\n image_url = image_id\n else:\n # The image names and full resource URLs for several Google-\n # provided images (debian, centos, etc.) follow a consistent\n # pattern, and so elasticluster supports a short-hand of just\n # an image name, such as\n # \"debian-7-wheezy-v20150526\".\n # The cloud project in this case is then \"debian-cloud\".\n #\n # Several images do not follow this convention, and so are\n # special-cased here:\n # backports-debian -> debian-cloud\n # ubuntu -> ubuntu-os-cloud\n # containter-vm -> google-containers\n if image_id.startswith('container-vm-'):\n os_cloud = 'google-containers'\n elif image_id.startswith('backports-debian-'):\n os_cloud = 'debian-cloud'\n elif image_id.startswith('ubuntu-'):\n os_cloud = 'ubuntu-os-cloud'\n else:\n os = image_id.split(\"-\")[0]\n os_cloud = \"%s-cloud\" % os\n\n image_url = '%s%s/global/images/%s' % (\n GCE_URL, os_cloud, image_id)\n\n if scheduling is None:\n # use GCE's default\n scheduling_option = {}\n elif scheduling == 'preemptible':\n scheduling_option = {\n 'preemptible': True\n }\n else:\n raise InstanceError(\"Unknown scheduling option: '%s'\" % scheduling)\n\n if isinstance(tags, types.StringTypes):\n tags = tags.split(',')\n elif isinstance(tags, collections.Sequence):\n # ok, nothing to do\n pass\n elif tags is not None:\n raise TypeError(\n \"The `tags` argument to `gce.start_instance`\"\n \" should be a string or a list, got {T} instead\"\n .format(T=type(tags)))\n\n # construct the request body\n if node_name:\n instance_id = node_name.lower().replace('_', '-') # GCE doesn't allow \"_\"\n else:\n instance_id = 'elasticluster-%s' % uuid.uuid4()\n\n with open(public_key_path, 'r') as f:\n public_key_content = f.read()\n\n instance = {\n 'name': instance_id,\n 'machineType': machine_type_url,\n 'tags': {\n 'items': tags,\n },\n 'scheduling': scheduling_option,\n 'disks': [{\n 'autoDelete': 'true',\n 'boot': 'true',\n 'type': 'PERSISTENT',\n 'initializeParams' : {\n 'diskName': \"%s-disk\" % instance_id,\n 'diskType': boot_disk_type_url,\n 'diskSizeGb': boot_disk_size_gb,\n 'sourceImage': image_url\n }\n }],\n 'networkInterfaces': [\n {'accessConfigs': [\n {'type': 'ONE_TO_ONE_NAT',\n 'name': 'External NAT'\n }],\n 'network': network_url\n }],\n 'serviceAccounts': [\n {'email': self._email,\n 'scopes': GCE_DEFAULT_SCOPES\n }],\n \"metadata\": {\n \"kind\": \"compute#metadata\",\n \"items\": [\n {\n \"key\": \"sshKeys\",\n \"value\": \"%s:%s\" % (username, public_key_content)\n }\n ]\n }\n }\n\n # create the instance\n gce = self._connect()\n request = gce.instances().insert(\n project=self._project_id, body=instance, zone=self._zone)\n try:\n response = self._execute_request(request)\n response = self._wait_until_done(response)\n self._check_response(response)\n return instance_id\n except (HttpError, CloudProviderError) as e:\n log.error(\"Error creating instance `%s`\" % e)\n raise InstanceError(\"Error creating instance `%s`\" % e)",
"def create(cls, config_file=None, logical_volume = None, cfg = None, **params):\n \"\"\"\n Create a new instance based on the specified configuration file or the specified\n configuration and the passed in parameters.\n \n If the config_file argument is not None, the configuration is read from there. \n Otherwise, the cfg argument is used.\n \n The config file may include other config files with a #import reference. The included\n config files must reside in the same directory as the specified file. \n \n The logical_volume argument, if supplied, will be used to get the current physical \n volume ID and use that as an override of the value specified in the config file. This \n may be useful for debugging purposes when you want to debug with a production config \n file but a test Volume. \n \n The dictionary argument may be used to override any EC2 configuration values in the \n config file. \n \"\"\"\n if config_file:\n cfg = Config(path=config_file)\n if cfg.has_section('EC2'):\n # include any EC2 configuration values that aren't specified in params:\n for option in cfg.options('EC2'):\n if option not in params:\n params[option] = cfg.get('EC2', option)\n getter = CommandLineGetter()\n getter.get(cls, params)\n region = params.get('region')\n ec2 = region.connect()\n cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key)\n ami = params.get('ami')\n kp = params.get('keypair')\n group = params.get('group')\n zone = params.get('zone')\n # deal with possibly passed in logical volume:\n if logical_volume != None:\n cfg.set('EBS', 'logical_volume_name', logical_volume.name) \n cfg_fp = StringIO.StringIO()\n cfg.write(cfg_fp)\n # deal with the possibility that zone and/or keypair are strings read from the config file:\n if isinstance(zone, Zone):\n zone = zone.name\n if isinstance(kp, KeyPair):\n kp = kp.name\n reservation = ami.run(min_count=1,\n max_count=params.get('quantity', 1),\n key_name=kp,\n security_groups=[group],\n instance_type=params.get('instance_type'),\n placement = zone,\n user_data = cfg_fp.getvalue())\n l = []\n i = 0\n elastic_ip = params.get('elastic_ip')\n instances = reservation.instances\n if elastic_ip != None and instances.__len__() > 0:\n instance = instances[0]\n print 'Waiting for instance to start so we can set its elastic IP address...'\n # Sometimes we get a message from ec2 that says that the instance does not exist.\n # Hopefully the following delay will giv eec2 enough time to get to a stable state:\n time.sleep(5) \n while instance.update() != 'running':\n time.sleep(1)\n instance.use_ip(elastic_ip)\n print 'set the elastic IP of the first instance to %s' % elastic_ip\n for instance in instances:\n s = cls()\n s.ec2 = ec2\n s.name = params.get('name') + '' if i==0 else str(i)\n s.description = params.get('description')\n s.region_name = region.name\n s.instance_id = instance.id\n if elastic_ip and i == 0:\n s.elastic_ip = elastic_ip\n s.put()\n l.append(s)\n i += 1\n return l",
"def launch_instance(self, ami_id, key_name, subnet_id, security_group_id=None, security_group_list=None,\n user_data_script_path=None, instance_type='t2.small', root_device_name='/dev/xvda'):\n \"\"\"Launches an EC2 instance with the specified parameters, intended to launch\n an instance for creation of a CONS3RT template.\n\n :param ami_id: (str) ID of the AMI to launch from\n :param key_name: (str) Name of the key-pair to use\n :param subnet_id: (str) IF of the VPC subnet to attach the instance to\n :param security_group_id: (str) ID of the security group, of not provided the default will be applied\n appended to security_group_list if provided\n :param security_group_id_list: (list) of IDs of the security group, if not provided the default will be applied\n :param user_data_script_path: (str) Path to the user-data script to run\n :param instance_type: (str) Instance Type (e.g. t2.micro)\n :param root_device_name: (str) The device name for the root volume\n :return:\n \"\"\"\n log = logging.getLogger(self.cls_logger + '.launch_instance')\n log.info('Launching with AMI ID: {a}'.format(a=ami_id))\n log.info('Launching with Key Pair: {k}'.format(k=key_name))\n\n if security_group_list:\n if not isinstance(security_group_list, list):\n raise EC2UtilError('security_group_list must be a list')\n\n if security_group_id and security_group_list:\n security_group_list.append(security_group_id)\n elif security_group_id and not security_group_list:\n security_group_list = [security_group_id]\n log.info('Launching with security group list: {s}'.format(s=security_group_list))\n user_data = None\n if user_data_script_path is not None:\n if os.path.isfile(user_data_script_path):\n with open(user_data_script_path, 'r') as f:\n user_data = f.read()\n monitoring = {'Enabled': False}\n block_device_mappings = [\n {\n 'DeviceName': root_device_name,\n 'Ebs': {\n 'VolumeSize': 100,\n 'DeleteOnTermination': True\n }\n }\n ]\n log.info('Attempting to launch the EC2 instance now...')\n try:\n response = self.client.run_instances(\n DryRun=False,\n ImageId=ami_id,\n MinCount=1,\n MaxCount=1,\n KeyName=key_name,\n SecurityGroupIds=security_group_list,\n UserData=user_data,\n InstanceType=instance_type,\n Monitoring=monitoring,\n SubnetId=subnet_id,\n InstanceInitiatedShutdownBehavior='stop',\n BlockDeviceMappings=block_device_mappings\n )\n except ClientError:\n _, ex, trace = sys.exc_info()\n msg = '{n}: There was a problem launching the EC2 instance\\n{e}'.format(n=ex.__class__.__name__, e=str(ex))\n raise EC2UtilError, msg, trace\n instance_id = response['Instances'][0]['InstanceId']\n output = {\n 'InstanceId': instance_id,\n 'InstanceInfo': response['Instances'][0]\n }\n return output",
"def launch_instance(instance_name,\n command,\n existing_ip=None,\n cpu=1,\n mem=4,\n code_dir=None,\n setup_command=None):\n \"\"\"Launch a GCE instance.\"\"\"\n # Create instance\n ip = existing_ip or create_instance(instance_name, cpu=cpu, mem=mem)\n tf.logging.info(\"Waiting for SSH %s\", instance_name)\n ready = wait_for_ssh(ip)\n if not ready:\n raise ValueError(\"Instance %s never ready for SSH\" % instance_name)\n\n # Copy code\n if code_dir:\n shell_run_with_retry(COPY_CODE, retries=2,\n local_dir=code_dir, instance_name=instance_name)\n\n # Run setup\n if setup_command:\n tf.logging.info(\"Running setup on %s\", instance_name)\n remote_run(setup_command, instance_name)\n\n # Run command\n tf.logging.info(\"Running command on %s\", instance_name)\n remote_run(command, instance_name, detach=True)",
"def startup(self):\n \"\"\"Startup the ec2 instance\n \"\"\"\n import boto.ec2\n\n if not self.browser_config.get('launch'):\n self.warning_log(\"Skipping launch\")\n return True\n\n self.info_log(\"Starting up\")\n\n instance = None\n try:\n\n # KEY NAME\n key_name = self.browser_config.get(\n \"ssh_key_path\"\n ).split(os.sep)[-1][:-4]\n # SECURITY GROUP\n\n if type(self.browser_config.get(\"security_group_ids\")) == str:\n security_group_ids = [\n self.browser_config.get(\"security_group_ids\")\n ]\n\n elif type(self.browser_config.get(\"security_group_ids\")) == list:\n security_group_ids = self.browser_config.get(\n \"security_group_ids\"\n )\n\n else:\n msg = \"The config security_group_ids must be a string or a list of string\" # noqa\n self.critial_log(msg)\n raise Exception(msg)\n\n # LAUNCH INSTANCE\n ec2 = boto.ec2.connect_to_region(self.browser_config.get(\"region\"))\n reservation = ec2.run_instances(\n self.browser_config.get('amiid'),\n key_name=key_name,\n instance_type=self.browser_config.get(\"instance_type\"),\n security_group_ids=security_group_ids\n )\n\n wait_after_instance_launched = BROME_CONFIG['ec2']['wait_after_instance_launched'] # noqa\n if wait_after_instance_launched:\n self.info_log(\n \"Waiting after instance launched: %s seconds...\" %\n wait_after_instance_launched\n )\n sleep(wait_after_instance_launched)\n\n else:\n self.warning_log(\"Skipping waiting after instance launched\")\n\n try:\n instance = reservation.instances[0]\n\n except Exception as e:\n self.critical_log(\n 'Instance reservation exception: %s' % str(e)\n )\n raise\n\n self.instance_id = instance.id\n\n self.info_log('Waiting for the instance to start...')\n\n for i in range(60*5):\n try:\n status = instance.update()\n if status == 'running':\n break\n\n else:\n sleep(1)\n except Exception as e:\n self.error_log(\n 'Exception while wait pending: %s' % str(e)\n )\n sleep(1)\n\n # Wait until instance is running\n status = instance.update()\n if status == 'running':\n instance.add_tag(\n \"Name\",\n \"%s-selenium-node-%s-%s\" %\n (\n self.browser_config.get('platform'),\n self.browser_config.get('browserName'),\n self.index\n )\n )\n\n self.info_log(\n \"New instance (%s) public ip (%s) private ip (%s)\" % (\n instance.id,\n instance.ip_address,\n instance.private_ip_address\n )\n )\n else:\n msg = \"Instance status is %s and should be (running)\" % status\n self.error_log(msg)\n raise Exception(msg)\n\n if BROME_CONFIG['ec2']['wait_until_system_and_instance_check_performed']: # noqa\n check_successful = False\n\n for i in range(5*60):\n\n try:\n\n if not i % 60:\n if not type(status) == str:\n self.info_log(\n 'System_status: %s, instance_status: %s' %\n (\n status.system_status,\n status.instance_status\n )\n )\n\n status = ec2.get_all_instance_status(\n instance_ids=[instance.id]\n )[0]\n if status.system_status.status == u'ok' and status.instance_status.status == u'ok': # noqa\n\n self.info_log('system_status: %s, instance_status: %s' % (status.system_status, status.instance_status)) # noqa\n check_successful = True\n break\n\n except Exception as e:\n self.error_log('Waiting instance ready exception: %s' % str(e)) # noqa\n sleep(1)\n\n if not check_successful:\n msg = \"System and instance check were not successful\"\n self.warning_log(msg)\n raise Exception(msg)\n else:\n self.warning_log(\"Skipping wait until system and instance check performed\") # noqa\n\n self.info_log('Starting the selenium node server')\n\n self.private_ip = instance.private_ip_address\n self.public_dns = instance.public_dns_name\n self.private_dns = instance.private_dns_name\n self.public_ip = instance.ip_address\n\n # LINUX\n if self.browser_config.get('platform').lower() == \"linux\":\n command = self.browser_config.get(\n \"selenium_command\"\n ).format(**self.browser_config.config)\n self.execute_command(command)\n\n elif self.browser_config.get('platform').upper() == \"windows\":\n\n # TODO this code is out of date\n config = self.browser_config.config.copy()\n config['instance_ip_address'] = instance.ip_address\n command = self.browser_config(\n \"selenium_command\"\n ).format(**config)\n process = Popen(\n command.split(\" \"),\n stdout=devnull,\n stderr=devnull\n )\n self.runner.xvfb_pids.append(process.pid)\n\n else:\n\n msg = \"The provided platform name is not supported: select either (WINDOWS) or (LINUX)\" # noqa\n self.critical_log(msg)\n raise Exception(msg)\n\n return True\n\n except Exception as e:\n self.error_log('Startup exception: %s' % str(e))\n raise",
"def run(image_id, name=None, tags=None, key_name=None, security_groups=None,\n user_data=None, instance_type='m1.small', placement=None,\n kernel_id=None, ramdisk_id=None, monitoring_enabled=None, vpc_id=None,\n vpc_name=None, subnet_id=None, subnet_name=None, private_ip_address=None,\n block_device_map=None, disable_api_termination=None,\n instance_initiated_shutdown_behavior=None, placement_group=None,\n client_token=None, security_group_ids=None, security_group_names=None,\n additional_info=None, tenancy=None, instance_profile_arn=None,\n instance_profile_name=None, ebs_optimized=None,\n network_interface_id=None, network_interface_name=None,\n region=None, key=None, keyid=None, profile=None, network_interfaces=None):\n #TODO: support multi-instance reservations\n '''\n Create and start an EC2 instance.\n\n Returns True if the instance was created; otherwise False.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion boto_ec2.run ami-b80c2b87 name=myinstance\n\n image_id\n (string) – The ID of the image to run.\n name\n (string) - The name of the instance.\n tags\n (dict of key: value pairs) - tags to apply to the instance.\n key_name\n (string) – The name of the key pair with which to launch instances.\n security_groups\n (list of strings) – The names of the EC2 classic security groups with\n which to associate instances\n user_data\n (string) – The Base64-encoded MIME user data to be made available to the\n instance(s) in this reservation.\n instance_type\n (string) – The type of instance to run. Note that some image types\n (e.g. hvm) only run on some instance types.\n placement\n (string) – The Availability Zone to launch the instance into.\n kernel_id\n (string) – The ID of the kernel with which to launch the instances.\n ramdisk_id\n (string) – The ID of the RAM disk with which to launch the instances.\n monitoring_enabled\n (bool) – Enable detailed CloudWatch monitoring on the instance.\n vpc_id\n (string) - ID of a VPC to bind the instance to. Exclusive with vpc_name.\n vpc_name\n (string) - Name of a VPC to bind the instance to. Exclusive with vpc_id.\n subnet_id\n (string) – The subnet ID within which to launch the instances for VPC.\n subnet_name\n (string) – The name of a subnet within which to launch the instances for VPC.\n private_ip_address\n (string) – If you’re using VPC, you can optionally use this parameter to\n assign the instance a specific available IP address from the subnet\n (e.g. 10.0.0.25).\n block_device_map\n (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping\n data structure describing the EBS volumes associated with the Image.\n (string) - A string representation of a BlockDeviceMapping structure\n (dict) - A dict describing a BlockDeviceMapping structure\n\n YAML example:\n\n .. code-block:: yaml\n\n device-maps:\n /dev/sdb:\n ephemeral_name: ephemeral0\n /dev/sdc:\n ephemeral_name: ephemeral1\n /dev/sdd:\n ephemeral_name: ephemeral2\n /dev/sde:\n ephemeral_name: ephemeral3\n /dev/sdf:\n size: 20\n volume_type: gp2\n\n disable_api_termination\n (bool) – If True, the instances will be locked and will not be able to\n be terminated via the API.\n instance_initiated_shutdown_behavior\n (string) – Specifies whether the instance stops or terminates on\n instance-initiated shutdown. Valid values are: stop, terminate\n placement_group\n (string) – If specified, this is the name of the placement group in\n which the instance(s) will be launched.\n client_token\n (string) – Unique, case-sensitive identifier you provide to ensure\n idempotency of the request. Maximum 64 ASCII characters.\n security_group_ids\n (list of strings) – The ID(s) of the VPC security groups with which to\n associate instances.\n security_group_names\n (list of strings) – The name(s) of the VPC security groups with which to\n associate instances.\n additional_info\n (string) – Specifies additional information to make available to the\n instance(s).\n tenancy\n (string) – The tenancy of the instance you want to launch. An instance\n with a tenancy of ‘dedicated’ runs on single-tenant hardware and can\n only be launched into a VPC. Valid values are:”default” or “dedicated”.\n NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well.\n instance_profile_arn\n (string) – The Amazon resource name (ARN) of the IAM Instance Profile\n (IIP) to associate with the instances.\n instance_profile_name\n (string) – The name of the IAM Instance Profile (IIP) to associate with\n the instances.\n ebs_optimized\n (bool) – Whether the instance is optimized for EBS I/O. This\n optimization provides dedicated throughput to Amazon EBS and an\n optimized configuration stack to provide optimal EBS I/O performance.\n This optimization isn’t available with all instance types.\n network_interfaces\n (boto.ec2.networkinterface.NetworkInterfaceCollection) – A\n NetworkInterfaceCollection data structure containing the ENI\n specifications for the instance.\n network_interface_id\n (string) - ID of the network interface to attach to the instance\n network_interface_name\n (string) - Name of the network interface to attach to the instance\n\n '''\n if all((subnet_id, subnet_name)):\n raise SaltInvocationError('Only one of subnet_name or subnet_id may be '\n 'provided.')\n if subnet_name:\n r = __salt__['boto_vpc.get_resource_id']('subnet', subnet_name,\n region=region, key=key,\n keyid=keyid, profile=profile)\n if 'id' not in r:\n log.warning('Couldn\\'t resolve subnet name %s.', subnet_name)\n return False\n subnet_id = r['id']\n\n if all((security_group_ids, security_group_names)):\n raise SaltInvocationError('Only one of security_group_ids or '\n 'security_group_names may be provided.')\n if security_group_names:\n security_group_ids = []\n for sgn in security_group_names:\n r = __salt__['boto_secgroup.get_group_id'](sgn, vpc_name=vpc_name,\n region=region, key=key,\n keyid=keyid, profile=profile)\n if not r:\n log.warning('Couldn\\'t resolve security group name %s', sgn)\n return False\n security_group_ids += [r]\n\n network_interface_args = list(map(int, [network_interface_id is not None,\n network_interface_name is not None,\n network_interfaces is not None]))\n\n if sum(network_interface_args) > 1:\n raise SaltInvocationError('Only one of network_interface_id, '\n 'network_interface_name or '\n 'network_interfaces may be provided.')\n if network_interface_name:\n result = get_network_interface_id(network_interface_name,\n region=region, key=key,\n keyid=keyid,\n profile=profile)\n network_interface_id = result['result']\n if not network_interface_id:\n log.warning(\n \"Given network_interface_name '%s' cannot be mapped to an \"\n \"network_interface_id\", network_interface_name\n )\n\n if network_interface_id:\n interface = NetworkInterfaceSpecification(\n network_interface_id=network_interface_id,\n device_index=0)\n else:\n interface = NetworkInterfaceSpecification(\n subnet_id=subnet_id,\n groups=security_group_ids,\n device_index=0)\n\n if network_interfaces:\n interfaces_specs = [NetworkInterfaceSpecification(**x) for x in network_interfaces]\n interfaces = NetworkInterfaceCollection(*interfaces_specs)\n else:\n interfaces = NetworkInterfaceCollection(interface)\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n reservation = conn.run_instances(image_id, key_name=key_name, security_groups=security_groups,\n user_data=user_data, instance_type=instance_type,\n placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id,\n monitoring_enabled=monitoring_enabled,\n private_ip_address=private_ip_address,\n block_device_map=_to_blockdev_map(block_device_map),\n disable_api_termination=disable_api_termination,\n instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior,\n placement_group=placement_group, client_token=client_token,\n additional_info=additional_info,\n tenancy=tenancy, instance_profile_arn=instance_profile_arn,\n instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized,\n network_interfaces=interfaces)\n if not reservation:\n log.warning('Instance could not be reserved')\n return False\n\n instance = reservation.instances[0]\n\n status = 'pending'\n while status == 'pending':\n time.sleep(5)\n status = instance.update()\n if status == 'running':\n if name:\n instance.add_tag('Name', name)\n if tags:\n instance.add_tags(tags)\n return {'instance_id': instance.id}\n else:\n log.warning(\n 'Instance could not be started -- status is \"%s\"',\n status\n )",
"async def runInstance(self, *args, **kwargs):\n \"\"\"\n Run an instance\n\n Request an instance of a worker type\n\n This method takes input: ``v1/run-instance-request.json#``\n\n This method is ``experimental``\n \"\"\"\n\n return await self._makeApiCall(self.funcinfo[\"runInstance\"], *args, **kwargs)",
"def _instance(self):\n global clients\n global runtimeInstances\n \"\"\"\n This method is used to get the Application instance previously created\n managing on this, it is possible to switch to \"single instance for\n multiple clients\" or \"multiple instance for multiple clients\" execution way\n \"\"\"\n\n self.session = 0\n #checking previously defined session\n if 'cookie' in self.headers:\n self.session = parse_session_cookie(self.headers['cookie'])\n #if not a valid session id\n if self.session == None:\n self.session = 0\n if not self.session in clients.keys():\n self.session = 0\n\n #if no session id\n if self.session == 0:\n if self.server.multiple_instance:\n self.session = int(time.time()*1000)\n #send session to browser\n del self.headers['cookie']\n\n #if the client instance doesn't exist\n if not(self.session in clients):\n self.update_interval = self.server.update_interval\n\n from remi import gui\n \n head = gui.HEAD(self.server.title)\n # use the default css, but append a version based on its hash, to stop browser caching\n head.add_child('internal_css', \"<link href='/res:style.css' rel='stylesheet' />\\n\")\n \n body = gui.BODY()\n body.onload.connect(self.onload)\n body.onerror.connect(self.onerror)\n body.ononline.connect(self.ononline)\n body.onpagehide.connect(self.onpagehide)\n body.onpageshow.connect(self.onpageshow)\n body.onresize.connect(self.onresize)\n self.page = gui.HTML()\n self.page.add_child('head', head)\n self.page.add_child('body', body)\n\n if not hasattr(self, 'websockets'):\n self.websockets = []\n\n self.update_lock = threading.RLock()\n\n if not hasattr(self, '_need_update_flag'):\n self._need_update_flag = False\n self._stop_update_flag = False\n if self.update_interval > 0:\n self._update_thread = threading.Thread(target=self._idle_loop)\n self._update_thread.setDaemon(True)\n self._update_thread.start()\n\n runtimeInstances[str(id(self))] = self\n clients[self.session] = self\n else:\n #restore instance attributes\n client = clients[self.session]\n\n self.websockets = client.websockets\n self.page = client.page\n\n self.update_lock = client.update_lock\n\n self.update_interval = client.update_interval\n self._need_update_flag = client._need_update_flag\n if hasattr(client, '_update_thread'):\n self._update_thread = client._update_thread\n \n net_interface_ip = self.headers.get('Host', \"%s:%s\"%(self.connection.getsockname()[0],self.server.server_address[1]))\n websocket_timeout_timer_ms = str(self.server.websocket_timeout_timer_ms)\n pending_messages_queue_length = str(self.server.pending_messages_queue_length)\n self.page.children['head'].set_internal_js(net_interface_ip, pending_messages_queue_length, websocket_timeout_timer_ms)",
"def _get_instance(self):\n \"\"\"Retrieve instance matching instance_id.\"\"\"\n resource = self._connect()\n\n try:\n instance = resource.Instance(self.running_instance_id)\n except Exception:\n raise EC2CloudException(\n 'Instance with ID: {instance_id} not found.'.format(\n instance_id=self.running_instance_id\n )\n )\n return instance"
] |
[
0.741840660572052,
0.711973249912262,
0.7039064168930054,
0.6844360828399658,
0.6764575839042664,
0.670925498008728,
0.6708016395568848,
0.667149007320404,
0.665036678314209,
0.6630590558052063,
0.6624376177787781,
0.6600350737571716
] |
Creates environment and returns Environment object.
|
def create_environment(self, name, default=False, zone=None):
""" Creates environment and returns Environment object.
"""
from qubell.api.private.environment import Environment
return Environment.new(organization=self, name=name, zone_id=zone, default=default, router=self._router)
|
[
"def environment(**options):\n \"\"\"\n Add ``static`` and ``url`` functions to the ``environment`` context\n processor and return as a Jinja2 ``Environment`` object.\n \"\"\"\n env = Environment(**options)\n env.globals.update({\n 'static': staticfiles_storage.url,\n 'url': reverse,\n })\n env.globals.update(context_processors.environment())\n return env",
"def _create_environment(config, outdir):\n \"\"\"Constructor for an instance of the environment.\n\n Args:\n config: Object providing configurations via attributes.\n outdir: Directory to store videos in.\n\n Raises:\n NotImplementedError: For action spaces other than Box and Discrete.\n\n Returns:\n Wrapped OpenAI Gym environment.\n \"\"\"\n if isinstance(config.env, str):\n env = gym.make(config.env)\n else:\n env = config.env()\n # Ensure that the environment has the specification attribute set as expected\n # by the monitor wrapper.\n if not hasattr(env, 'spec'):\n setattr(env, 'spec', getattr(env, 'spec', None))\n if config.max_length:\n env = tools.wrappers.LimitDuration(env, config.max_length)\n env = gym.wrappers.Monitor(\n env, outdir, lambda unused_episode_number: True)\n if isinstance(env.action_space, gym.spaces.Box):\n env = tools.wrappers.RangeNormalize(env)\n env = tools.wrappers.ClipAction(env)\n elif isinstance(env.action_space, gym.spaces.Discrete):\n env = tools.wrappers.RangeNormalize(env, action=False)\n else:\n message = \"Unsupported action space '{}'\".format(type(env.action_space))\n raise NotImplementedError(message)\n env = tools.wrappers.ConvertTo32Bit(env)\n env = tools.wrappers.CacheSpaces(env)\n return env",
"def create_ambiente(self):\n \"\"\"Get an instance of ambiente services facade.\"\"\"\n return Ambiente(\n self.networkapi_url,\n self.user,\n self.password,\n self.user_ldap)",
"def create_environment(self, **kwargs):\n \"\"\"\n Return a new Jinja environment.\n \n Derived classes may override method to pass additional parameters or to change the template\n loader type.\n \"\"\"\n return jinja2.Environment(\n loader=jinja2.FileSystemLoader(self.templates_path),\n **kwargs\n )",
"def create_environment(self, **kwargs):\n \"\"\"\n Return a new Jinja environment.\n\n Derived classes may override method to pass additional parameters or to change the template\n loader type.\n \"\"\"\n environment = super().create_environment(**kwargs)\n environment.tests.update({\n 'type': self.test_type,\n 'kind': self.test_kind,\n 'opposite_before_self': self.test_opposite_before_self,\n })\n environment.filters.update({\n 'docstringline': self.filter_docstringline,\n 'pyquotesingle': self.filter_pyquotesingle,\n 'derivedname': self.filter_derived_name,\n 'refqualifiers': self.filter_refqualifiers,\n 'attrqualifiers': self.filter_attrqualifiers,\n 'supertypes': self.filter_supertypes,\n 'all_contents': self.filter_all_contents,\n 'pyfqn': self.filter_pyfqn,\n 're_sub': lambda v, p, r: re.sub(p, r, v),\n 'set': self.filter_set,\n })\n\n from pyecore import ecore\n environment.globals.update({'ecore': ecore})\n\n return environment",
"def environment(self, id=None, name=None, zone=None, default=False):\n \"\"\" Smart method. Creates, picks or modifies environment.\n If environment found by name or id parameters not changed: return env.\n If env found by id, but other parameters differs: change them.\n If no environment found, create with given parameters.\n \"\"\"\n\n found = False\n\n # Try to find environment by name or id\n if name and id:\n found = self.get_environment(id=id)\n elif id:\n found = self.get_environment(id=id)\n name = found.name\n elif name:\n try:\n found = self.get_environment(name=name)\n except exceptions.NotFoundError:\n pass\n\n # If found - compare parameters\n if found:\n self._assert_env_and_zone(found, zone)\n if default and not found.isDefault:\n found.set_as_default()\n # TODO: add abilities to change name.\n if not found:\n created = self.create_environment(name=name, zone=zone, default=default)\n return found or created",
"def _create_environment(config):\n \"\"\"Constructor for an instance of the environment.\n\n Args:\n config: Object providing configurations via attributes.\n\n Raises:\n NotImplementedError: For action spaces other than Box and Discrete.\n\n Returns:\n Wrapped OpenAI Gym environment.\n \"\"\"\n if isinstance(config.env, str):\n env = gym.make(config.env)\n else:\n env = config.env()\n if config.max_length:\n env = tools.wrappers.LimitDuration(env, config.max_length)\n if isinstance(env.action_space, gym.spaces.Box):\n if config.normalize_ranges:\n env = tools.wrappers.RangeNormalize(env)\n env = tools.wrappers.ClipAction(env)\n elif isinstance(env.action_space, gym.spaces.Discrete):\n if config.normalize_ranges:\n env = tools.wrappers.RangeNormalize(env, action=False)\n else:\n message = \"Unsupported action space '{}'\".format(type(env.action_space))\n raise NotImplementedError(message)\n env = tools.wrappers.ConvertTo32Bit(env)\n env = tools.wrappers.CacheSpaces(env)\n return env",
"def make(self):\n \"\"\"Instantiates an instance of the environment with appropriate kwargs\"\"\"\n if self._entry_point is None:\n raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))\n\n cls = load(self._entry_point)\n env = cls(**self._kwargs)\n\n # Make the enviroment aware of which spec it came from.\n env.spec = self\n env = env.build(extra_wrappers=self._wrappers)\n\n return env",
"def create(opts):\n \"\"\"Create a new environment\n\nUsage:\n datacats create [-bin] [--interactive] [-s NAME] [--address=IP] [--syslog]\n [--ckan=CKAN_VERSION] [--no-datapusher] [--site-url SITE_URL]\n [--no-init-db] ENVIRONMENT_DIR [PORT]\n\nOptions:\n --address=IP Address to listen on (Linux-only)\n --ckan=CKAN_VERSION Use CKAN version CKAN_VERSION [default: 2.4]\n -b --bare Bare CKAN site with no example extension\n -i --image-only Create the environment but don't start containers\n --interactive Doesn't detach from the web container\n --no-datapusher Don't install/enable ckanext-datapusher\n --no-init-db Don't initialize the database. Useful for importing CKANs.\n -n --no-sysadmin Don't prompt for an initial sysadmin user account\n -s --site=NAME Pick a site to create [default: primary]\n --site-url SITE_URL The site_url to use in API responses (e.g. http://example.org:{port}/)\n --syslog Log to the syslog\n\nENVIRONMENT_DIR is a path for the new environment directory. The last\npart of this path will be used as the environment name.\n\"\"\"\n if opts['--address'] and is_boot2docker():\n raise DatacatsError('Cannot specify address on boot2docker.')\n return create_environment(\n environment_dir=opts['ENVIRONMENT_DIR'],\n port=opts['PORT'],\n create_skin=not opts['--bare'],\n start_web=not opts['--image-only'],\n create_sysadmin=not opts['--no-sysadmin'],\n site_name=opts['--site'],\n ckan_version=opts['--ckan'],\n address=opts['--address'],\n log_syslog=opts['--syslog'],\n datapusher=not opts['--no-datapusher'],\n site_url=opts['--site-url'],\n interactive=opts['--interactive'],\n init_db=not opts['--no-init-db'],\n )",
"def create_jinja_environment(self) -> Environment:\n \"\"\"Create and return the jinja environment.\n\n This will create the environment based on the\n :attr:`jinja_options` and configuration settings. The\n environment will include the Quart globals by default.\n \"\"\"\n options = dict(self.jinja_options)\n if 'autoescape' not in options:\n options['autoescape'] = self.select_jinja_autoescape\n if 'auto_reload' not in options:\n options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD'] or self.debug\n jinja_env = self.jinja_environment(self, **options)\n jinja_env.globals.update({\n 'config': self.config,\n 'g': g,\n 'get_flashed_messages': get_flashed_messages,\n 'request': request,\n 'session': session,\n 'url_for': url_for,\n })\n jinja_env.filters['tojson'] = tojson_filter\n return jinja_env",
"def new(cls, path, ckan_version, site_name, **kwargs):\n \"\"\"\n Return a Environment object with settings for a new project.\n No directories or containers are created by this call.\n\n :params path: location for new project directory, may be relative\n :params ckan_version: release of CKAN to install\n :params site_name: The name of the site to install database and solr \\\n eventually.\n\n For additional keyword arguments see the __init__ method.\n\n Raises DatcatsError if directories or project with same\n name already exits.\n \"\"\"\n if ckan_version == 'master':\n ckan_version = 'latest'\n name, datadir, srcdir = task.new_environment_check(path, site_name, ckan_version)\n environment = cls(name, srcdir, datadir, site_name, ckan_version, **kwargs)\n environment._generate_passwords()\n return environment",
"def make_env(*args, **kwargs):\n \"\"\"Creates an :py:class:`~jinja2.Environment` with different defaults.\n\n Per default, ``autoescape`` will be disabled and ``trim_blocks`` enabled.\n All start/end/prefix strings will be changed for a more LaTeX-friendly\n version (see the docs for details).\n\n Any arguments will be passed on to the :py:class:`~jinja2.Environment`\n constructor and override new values.\n\n Finally, the ``|e``, ``|escape`` and ``|forceescape`` filters will be\n replaced with a call to :func:`latex.escape`.\"\"\"\n ka = ENV_ARGS.copy()\n ka.update(kwargs)\n\n env = Environment(*args, **ka)\n env.filters['e'] = LatexMarkup.escape\n env.filters['escape'] = LatexMarkup.escape\n env.filters['forceescape'] = LatexMarkup.escape # FIXME: this is a bug\n return env"
] |
[
0.7870877981185913,
0.7774751782417297,
0.7761436700820923,
0.7749801874160767,
0.7747286558151245,
0.7666007876396179,
0.7661817669868469,
0.7641677260398865,
0.7624807357788086,
0.7603580951690674,
0.7593848705291748,
0.7571864128112793
] |
Get environment object by name or id.
|
def get_environment(self, id=None, name=None):
""" Get environment object by name or id.
"""
log.info("Picking environment: %s (%s)" % (name, id))
return self.environments[id or name]
|
[
"def get_environment(id=None, name=None):\n \"\"\"\n Get a specific Environment by name or ID\n \"\"\"\n data = get_environment_raw(id, name)\n if data:\n return utils.format_json(data)",
"def environment(self, id=None, name=None, zone=None, default=False):\n \"\"\" Smart method. Creates, picks or modifies environment.\n If environment found by name or id parameters not changed: return env.\n If env found by id, but other parameters differs: change them.\n If no environment found, create with given parameters.\n \"\"\"\n\n found = False\n\n # Try to find environment by name or id\n if name and id:\n found = self.get_environment(id=id)\n elif id:\n found = self.get_environment(id=id)\n name = found.name\n elif name:\n try:\n found = self.get_environment(name=name)\n except exceptions.NotFoundError:\n pass\n\n # If found - compare parameters\n if found:\n self._assert_env_and_zone(found, zone)\n if default and not found.isDefault:\n found.set_as_default()\n # TODO: add abilities to change name.\n if not found:\n created = self.create_environment(name=name, zone=zone, default=default)\n return found or created",
"def get_or_create_environment(self, id=None, name=None, zone=None, default=False):\n \"\"\" Get environment by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_environment(id=id)\n elif name:\n try:\n env = self.get_environment(name=name)\n self._assert_env_and_zone(env, zone)\n except exceptions.NotFoundError:\n env = self.create_environment(name=name, zone=zone, default=default)\n return env\n else:\n name = 'auto-generated-env'\n return self.create_environment(name=name, zone=zone, default=default)",
"def envGet(self, name, default=None, conv=None):\n \"\"\"Return value for environment variable or None. \n \n @param name: Name of environment variable.\n @param default: Default value if variable is undefined.\n @param conv: Function for converting value to desired type.\n @return: Value of environment variable.\n \n \"\"\"\n if self._env.has_key(name):\n if conv is not None:\n return conv(self._env.get(name))\n else:\n return self._env.get(name)\n else:\n return default",
"def get(self, request, bot_id, id, format=None):\n \"\"\"\n Get environment variable by id\n ---\n serializer: EnvironmentVarSerializer\n responseMessages:\n - code: 401\n message: Not authenticated\n \"\"\" \n return super(EnvironmentVarDetail, self).get(request, bot_id, id, format)",
"def getenv(name, **kwargs):\n \"\"\"\n Retrieves environment variable by name and casts the value to desired type.\n If desired type is list or tuple - uses separator to split the value.\n \"\"\"\n default_value = kwargs.pop('default', None)\n desired_type = kwargs.pop('type', str)\n list_separator = kwargs.pop('separator', ',')\n\n value = os.getenv(name, None)\n\n if value is None:\n if default_value is None:\n return None\n else:\n return default_value\n\n if desired_type is bool:\n if value.lower() in ['false', '0']:\n return False\n else:\n return bool(value)\n\n if desired_type is list or desired_type is tuple:\n value = value.split(list_separator)\n return desired_type(value)\n\n if desired_type is dict:\n return dict(literal_eval(value))\n\n return desired_type(value)",
"def get(name, required=False, default=empty, type=None):\n \"\"\"Generic getter for environment variables. Handles defaults,\n required-ness, and what type to expect.\n\n :param name: The name of the environment variable be pulled\n :type name: str\n\n :param required: Whether the environment variable is required. If ``True``\n and the variable is not present, a ``KeyError`` is raised.\n :type required: bool\n\n :param default: The value to return if the environment variable is not\n present. (Providing a default alongside setting ``required=True`` will raise\n a ``ValueError``)\n :type default: bool\n\n :param type: The type of variable expected.\n :param type: str or type\n \"\"\"\n fn = {\n 'int': env_int,\n int: env_int,\n\n 'bool': env_bool,\n bool: env_bool,\n\n 'string': env_string,\n str: env_string,\n\n 'list': env_list,\n list: env_list,\n\n 'timestamp': env_timestamp,\n datetime.time: env_timestamp,\n\n 'datetime': env_iso8601,\n datetime.datetime: env_iso8601,\n }.get(type, env_string)\n return fn(name, default=default, required=required)",
"def get(self, name, default=None):\n \"\"\"Get value.\"\"\"\n default = default if default is not None else self.default\n try:\n value = getattr(_settings, name)\n except AttributeError:\n value = os.environ.get(name, default) if self.from_env else default\n # Convert env variable.\n if value != default:\n value = self.env_clean(value)\n\n return self.validate(name, value)",
"def get_item(env, name, default=None):\n \"\"\" Get an item from a dictionary, handling nested lookups with dotted notation.\n\n Args:\n env: the environment (dictionary) to use to look up the name.\n name: the name to look up, in dotted notation.\n default: the value to return if the name if not found.\n\n Returns:\n The result of looking up the name, if found; else the default.\n \"\"\"\n # TODO: handle attributes\n for key in name.split('.'):\n if isinstance(env, dict) and key in env:\n env = env[key]\n elif isinstance(env, types.ModuleType) and key in env.__dict__:\n env = env.__dict__[key]\n else:\n return default\n return env",
"def get(self, key, namespace=None):\n \"\"\"Retrieve value for key.\"\"\"\n full_key = generate_uppercase_key(key, namespace)\n logger.debug('Searching %s for %s', self, full_key)\n return get_key_from_envs(os.environ, full_key)",
"def get_environment(self, environment_ids):\n \"\"\"\n Method to get environment\n \"\"\"\n\n uri = 'api/v3/environment/%s/' % environment_ids\n\n return super(ApiEnvironment, self).get(uri)",
"def get(name: str,\n required: bool=False,\n default: Union[Type[empty], T]=empty,\n type: Type[T]=None) -> T:\n \"\"\"Generic getter for environment variables. Handles defaults,\n required-ness, and what type to expect.\n\n :param name: The name of the environment variable be pulled\n :type name: str\n\n :param required: Whether the environment variable is required. If ``True``\n and the variable is not present, a ``KeyError`` is raised.\n :type required: bool\n\n :param default: The value to return if the environment variable is not\n present. (Providing a default alongside setting ``required=True`` will raise\n a ``ValueError``)\n :type default: bool\n\n :param type: The type of variable expected.\n :param type: str or type\n \"\"\"\n fns = {\n 'int': env_int,\n int: env_int,\n\n # 'float': env_float,\n # float: env_float,\n\n 'bool': env_bool,\n bool: env_bool,\n\n 'string': env_string,\n str: env_string,\n\n 'list': env_list,\n list: env_list,\n } # type: Dict[Union[str, Type[Any]], Callable[..., Any]]\n\n fn = fns.get(type, env_string)\n return fn(name, default=default, required=required)"
] |
[
0.8362326622009277,
0.7800267934799194,
0.779765784740448,
0.7632917761802673,
0.7480002045631409,
0.7470816969871521,
0.735478401184082,
0.7275164127349854,
0.7215942740440369,
0.7150940299034119,
0.7124724984169006,
0.7107515335083008
] |
Get environment by id or name.
If not found: create with given or generated parameters
|
def get_or_create_environment(self, id=None, name=None, zone=None, default=False):
""" Get environment by id or name.
If not found: create with given or generated parameters
"""
if id:
return self.get_environment(id=id)
elif name:
try:
env = self.get_environment(name=name)
self._assert_env_and_zone(env, zone)
except exceptions.NotFoundError:
env = self.create_environment(name=name, zone=zone, default=default)
return env
else:
name = 'auto-generated-env'
return self.create_environment(name=name, zone=zone, default=default)
|
[
"def environment(self, id=None, name=None, zone=None, default=False):\n \"\"\" Smart method. Creates, picks or modifies environment.\n If environment found by name or id parameters not changed: return env.\n If env found by id, but other parameters differs: change them.\n If no environment found, create with given parameters.\n \"\"\"\n\n found = False\n\n # Try to find environment by name or id\n if name and id:\n found = self.get_environment(id=id)\n elif id:\n found = self.get_environment(id=id)\n name = found.name\n elif name:\n try:\n found = self.get_environment(name=name)\n except exceptions.NotFoundError:\n pass\n\n # If found - compare parameters\n if found:\n self._assert_env_and_zone(found, zone)\n if default and not found.isDefault:\n found.set_as_default()\n # TODO: add abilities to change name.\n if not found:\n created = self.create_environment(name=name, zone=zone, default=default)\n return found or created",
"def get_environment(self, id=None, name=None):\n \"\"\" Get environment object by name or id.\n \"\"\"\n log.info(\"Picking environment: %s (%s)\" % (name, id))\n return self.environments[id or name]",
"def get_environment(id=None, name=None):\n \"\"\"\n Get a specific Environment by name or ID\n \"\"\"\n data = get_environment_raw(id, name)\n if data:\n return utils.format_json(data)",
"def find_environment(env_id=None, env_name=None):\n \"\"\"\n find the environment according environment id (prioritary) or environment name\n :param env_id: the environment id\n :param env_name: the environment name\n :return: found environment or None if not found\n \"\"\"\n LOGGER.debug(\"EnvironmentService.find_environment\")\n if (env_id is None or not env_id) and (env_name is None or not env_name):\n raise exceptions.ArianeCallParametersError('id and name')\n\n if (env_id is not None and env_id) and (env_name is not None and env_name):\n LOGGER.warn('Both id and name are defined. Will give you search on id.')\n env_name = None\n\n params = None\n if env_id is not None and env_id:\n params = {'id': env_id}\n elif env_name is not None and env_name:\n params = {'name': env_name}\n\n ret = None\n if params is not None:\n args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}\n response = EnvironmentService.requester.call(args)\n if response.rc == 0:\n ret = Environment.json_2_environment(response.response_content)\n elif response.rc != 404:\n err_msg = 'EnvironmentService.find_environment - Problem while finding environment (id:' + \\\n str(env_id) + ', name:' + str(env_name) + '). ' + \\\n 'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \\\n \" (\" + str(response.rc) + \")\"\n LOGGER.warning(\n err_msg\n )\n\n return ret",
"def get_or_create_application(self, id=None, manifest=None, name=None):\n \"\"\" Get application by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_application(id=id)\n elif name:\n try:\n app = self.get_application(name=name)\n except exceptions.NotFoundError:\n app = self.create_application(name=name, manifest=manifest)\n return app\n raise exceptions.NotEnoughParams('Not enough parameters')",
"def create(self, name=None, prefix=None, pkgs=None, channels=None):\n \"\"\"Create an environment with a specified set of packages.\"\"\"\n logger.debug(str((prefix, pkgs, channels)))\n\n # TODO: Fix temporal hack\n if (not pkgs or (not isinstance(pkgs, (list, tuple)) and\n not is_text_string(pkgs))):\n raise TypeError('must specify a list of one or more packages to '\n 'install into new environment')\n\n cmd_list = ['create', '--yes', '--json', '--mkdir']\n if name:\n ref = name\n search = [os.path.join(d, name) for d in\n self.info().communicate()[0]['envs_dirs']]\n cmd_list.extend(['--name', name])\n elif prefix:\n ref = prefix\n search = [prefix]\n cmd_list.extend(['--prefix', prefix])\n else:\n raise TypeError('must specify either an environment name or a '\n 'path for new environment')\n\n if any(os.path.exists(prefix) for prefix in search):\n raise CondaEnvExistsError('Conda environment {0} already '\n 'exists'.format(ref))\n\n # TODO: Fix temporal hack\n if isinstance(pkgs, (list, tuple)):\n cmd_list.extend(pkgs)\n elif is_text_string(pkgs):\n cmd_list.extend(['--file', pkgs])\n\n # TODO: Check if correct\n if channels:\n cmd_list.extend(['--override-channels'])\n\n for channel in channels:\n cmd_list.extend(['--channel'])\n cmd_list.extend([channel])\n\n return self._call_and_parse(cmd_list)",
"def get_or_create_instance(self, id=None, application=None, revision=None, environment=None, name=None, parameters=None, submodules=None,\n destroyInterval=None):\n \"\"\" Get instance by id or name.\n If not found: create with given parameters\n \"\"\"\n try:\n instance = self.get_instance(id=id, name=name)\n if name and name != instance.name:\n instance.rename(name)\n instance.ready()\n return instance\n except exceptions.NotFoundError:\n return self.create_instance(application, revision, environment, name, parameters, submodules, destroyInterval)",
"def create_environment(self, topology_name, topology={}, id=None, **kwargs):\n '''\n create_environment(self, topology_name, topology={}, id=None, **kwargs)\n\n Create a new environment\n\n :Parameters:\n\n * *topology_name* (`string`) -- The topology identifier. Must be provided to create an environment.\n * *topology* (`object`) -- Topology data (must match the topology json schema)\n * *id* (`object`) -- The environment identifier. If none provided when creating environment, Opereto will automatically assign a unique identifier.\n\n :return: id of the created environment\n\n '''\n request_data = {'topology_name': topology_name,'id': id, 'topology':topology, 'add_only':True}\n request_data.update(**kwargs)\n return self._call_rest_api('post', '/environments', data=request_data, error='Failed to create environment')",
"def environment(name)\n yaml_path = yaml_path_if_exists(name)\n rb_path = rb_path_if_exists(name)\n\n raise \"found multiple env files for same env #{name}.\" if !yaml_path.nil? && !rb_path.nil?\n raise \"TODO: implement Ruby environments.\" unless rb_path.nil?\n\n env = Environments::Environment.load_yaml_file(yaml_path) unless yaml_path.nil?\n\n raise \"no env found for '#{name}'.\" if env.nil?\n\n IceNine.deep_freeze(env)\n env\n end",
"def getenv(name, **kwargs):\n \"\"\"\n Retrieves environment variable by name and casts the value to desired type.\n If desired type is list or tuple - uses separator to split the value.\n \"\"\"\n default_value = kwargs.pop('default', None)\n desired_type = kwargs.pop('type', str)\n list_separator = kwargs.pop('separator', ',')\n\n value = os.getenv(name, None)\n\n if value is None:\n if default_value is None:\n return None\n else:\n return default_value\n\n if desired_type is bool:\n if value.lower() in ['false', '0']:\n return False\n else:\n return bool(value)\n\n if desired_type is list or desired_type is tuple:\n value = value.split(list_separator)\n return desired_type(value)\n\n if desired_type is dict:\n return dict(literal_eval(value))\n\n return desired_type(value)",
"def make(env_name, *args, **kwargs):\n \"\"\"Try to get the equivalent functionality of gym.make in a sloppy way.\"\"\"\n if env_name not in REGISTERED_ENVS:\n raise Exception(\n \"Environment {} not found. Make sure it is a registered environment among: {}\".format(\n env_name, \", \".join(REGISTERED_ENVS)\n )\n )\n return REGISTERED_ENVS[env_name](*args, **kwargs)",
"public EnvironmentInner get(String resourceGroupName, String labAccountName, String labName, String environmentSettingName, String environmentName, String expand) {\n return getWithServiceResponseAsync(resourceGroupName, labAccountName, labName, environmentSettingName, environmentName, expand).toBlocking().single().body();\n }"
] |
[
0.8424941301345825,
0.7962385416030884,
0.7916843891143799,
0.7641311287879944,
0.7430555820465088,
0.7369561791419983,
0.731006920337677,
0.7135235071182251,
0.7063538432121277,
0.7017119526863098,
0.6990259885787964,
0.6980457901954651
] |
Smart method. Creates, picks or modifies environment.
If environment found by name or id parameters not changed: return env.
If env found by id, but other parameters differs: change them.
If no environment found, create with given parameters.
|
def environment(self, id=None, name=None, zone=None, default=False):
""" Smart method. Creates, picks or modifies environment.
If environment found by name or id parameters not changed: return env.
If env found by id, but other parameters differs: change them.
If no environment found, create with given parameters.
"""
found = False
# Try to find environment by name or id
if name and id:
found = self.get_environment(id=id)
elif id:
found = self.get_environment(id=id)
name = found.name
elif name:
try:
found = self.get_environment(name=name)
except exceptions.NotFoundError:
pass
# If found - compare parameters
if found:
self._assert_env_and_zone(found, zone)
if default and not found.isDefault:
found.set_as_default()
# TODO: add abilities to change name.
if not found:
created = self.create_environment(name=name, zone=zone, default=default)
return found or created
|
[
"def get_or_create_environment(self, id=None, name=None, zone=None, default=False):\n \"\"\" Get environment by id or name.\n If not found: create with given or generated parameters\n \"\"\"\n if id:\n return self.get_environment(id=id)\n elif name:\n try:\n env = self.get_environment(name=name)\n self._assert_env_and_zone(env, zone)\n except exceptions.NotFoundError:\n env = self.create_environment(name=name, zone=zone, default=default)\n return env\n else:\n name = 'auto-generated-env'\n return self.create_environment(name=name, zone=zone, default=default)",
"def modify_environment(self, environment_id, **kwargs):\n '''\n modify_environment(self, environment_id, **kwargs)\n\n Modifies an existing environment\n\n :Parameters:\n * *environment_id* (`string`) -- The environment identifier\n\n Keywords args:\n The variables to change in the environment\n\n :return: id of the created environment\n\n '''\n request_data = {'id': environment_id}\n request_data.update(**kwargs)\n return self._call_rest_api('post', '/environments', data=request_data, error='Failed to modify environment')",
"def create(self, name=None, prefix=None, pkgs=None, channels=None):\n \"\"\"Create an environment with a specified set of packages.\"\"\"\n logger.debug(str((prefix, pkgs, channels)))\n\n # TODO: Fix temporal hack\n if (not pkgs or (not isinstance(pkgs, (list, tuple)) and\n not is_text_string(pkgs))):\n raise TypeError('must specify a list of one or more packages to '\n 'install into new environment')\n\n cmd_list = ['create', '--yes', '--json', '--mkdir']\n if name:\n ref = name\n search = [os.path.join(d, name) for d in\n self.info().communicate()[0]['envs_dirs']]\n cmd_list.extend(['--name', name])\n elif prefix:\n ref = prefix\n search = [prefix]\n cmd_list.extend(['--prefix', prefix])\n else:\n raise TypeError('must specify either an environment name or a '\n 'path for new environment')\n\n if any(os.path.exists(prefix) for prefix in search):\n raise CondaEnvExistsError('Conda environment {0} already '\n 'exists'.format(ref))\n\n # TODO: Fix temporal hack\n if isinstance(pkgs, (list, tuple)):\n cmd_list.extend(pkgs)\n elif is_text_string(pkgs):\n cmd_list.extend(['--file', pkgs])\n\n # TODO: Check if correct\n if channels:\n cmd_list.extend(['--override-channels'])\n\n for channel in channels:\n cmd_list.extend(['--channel'])\n cmd_list.extend([channel])\n\n return self._call_and_parse(cmd_list)",
"def get_environment(self, id=None, name=None):\n \"\"\" Get environment object by name or id.\n \"\"\"\n log.info(\"Picking environment: %s (%s)\" % (name, id))\n return self.environments[id or name]",
"def find_environment(env_id=None, env_name=None):\n \"\"\"\n find the environment according environment id (prioritary) or environment name\n :param env_id: the environment id\n :param env_name: the environment name\n :return: found environment or None if not found\n \"\"\"\n LOGGER.debug(\"EnvironmentService.find_environment\")\n if (env_id is None or not env_id) and (env_name is None or not env_name):\n raise exceptions.ArianeCallParametersError('id and name')\n\n if (env_id is not None and env_id) and (env_name is not None and env_name):\n LOGGER.warn('Both id and name are defined. Will give you search on id.')\n env_name = None\n\n params = None\n if env_id is not None and env_id:\n params = {'id': env_id}\n elif env_name is not None and env_name:\n params = {'name': env_name}\n\n ret = None\n if params is not None:\n args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}\n response = EnvironmentService.requester.call(args)\n if response.rc == 0:\n ret = Environment.json_2_environment(response.response_content)\n elif response.rc != 404:\n err_msg = 'EnvironmentService.find_environment - Problem while finding environment (id:' + \\\n str(env_id) + ', name:' + str(env_name) + '). ' + \\\n 'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \\\n \" (\" + str(response.rc) + \")\"\n LOGGER.warning(\n err_msg\n )\n\n return ret",
"def env_maker(environment_id):\n \"\"\" Create a relatively raw atari environment \"\"\"\n env = gym.make(environment_id)\n assert 'NoFrameskip' in env.spec.id\n\n # Wait for between 1 and 30 rounds doing nothing on start\n env = NoopResetEnv(env, noop_max=30)\n\n # Do the same action for k steps. Return max of last 2 frames. Return sum of rewards\n env = MaxAndSkipEnv(env, skip=4)\n\n return env",
"public EnvironmentInner createOrUpdate(String resourceGroupName, String labAccountName, String labName, String environmentSettingName, String environmentName, EnvironmentInner environment) {\n return createOrUpdateWithServiceResponseAsync(resourceGroupName, labAccountName, labName, environmentSettingName, environmentName, environment).toBlocking().single().body();\n }",
"def env_maker(environment_id, seed, serial_id, monitor=False, allow_early_resets=False, normalize_observations=False,\n normalize_returns=False, normalize_gamma=0.99):\n \"\"\" Create a relatively raw atari environment \"\"\"\n env = gym.make(environment_id)\n env.seed(seed + serial_id)\n\n # Monitoring the env\n if monitor:\n logdir = logger.get_dir() and os.path.join(logger.get_dir(), str(serial_id))\n else:\n logdir = None\n\n env = Monitor(env, logdir, allow_early_resets=allow_early_resets)\n\n if normalize_observations or normalize_returns:\n env = EnvNormalize(\n env,\n normalize_observations=normalize_observations,\n normalize_returns=normalize_returns,\n gamma=normalize_gamma\n )\n\n return env",
"def get_or_create_instance(self, id=None, application=None, revision=None, environment=None, name=None, parameters=None, submodules=None,\n destroyInterval=None):\n \"\"\" Get instance by id or name.\n If not found: create with given parameters\n \"\"\"\n try:\n instance = self.get_instance(id=id, name=name)\n if name and name != instance.name:\n instance.rename(name)\n instance.ready()\n return instance\n except exceptions.NotFoundError:\n return self.create_instance(application, revision, environment, name, parameters, submodules, destroyInterval)",
"def create_environment(self, topology_name, topology={}, id=None, **kwargs):\n '''\n create_environment(self, topology_name, topology={}, id=None, **kwargs)\n\n Create a new environment\n\n :Parameters:\n\n * *topology_name* (`string`) -- The topology identifier. Must be provided to create an environment.\n * *topology* (`object`) -- Topology data (must match the topology json schema)\n * *id* (`object`) -- The environment identifier. If none provided when creating environment, Opereto will automatically assign a unique identifier.\n\n :return: id of the created environment\n\n '''\n request_data = {'topology_name': topology_name,'id': id, 'topology':topology, 'add_only':True}\n request_data.update(**kwargs)\n return self._call_rest_api('post', '/environments', data=request_data, error='Failed to create environment')",
"def instantiate(self, seed=0, serial_id=0, preset='default', extra_args=None) -> gym.Env:\n \"\"\" Make a single environment compatible with the experiments \"\"\"\n settings = self.get_preset(preset)\n return wrapped_env_maker(self.envname, seed, serial_id, **settings)",
"def env_maker(environment_id, seed, serial_id, monitor=False, allow_early_resets=False):\n \"\"\" Create a classic control environment with basic set of wrappers \"\"\"\n env = gym.make(environment_id)\n env.seed(seed + serial_id)\n\n # Monitoring the env\n if monitor:\n logdir = logger.get_dir() and os.path.join(logger.get_dir(), str(serial_id))\n else:\n logdir = None\n\n env = Monitor(env, logdir, allow_early_resets=allow_early_resets)\n\n return env"
] |
[
0.8086185455322266,
0.7347423434257507,
0.7233126759529114,
0.7075681686401367,
0.6985051035881042,
0.6982787251472473,
0.6969988346099854,
0.6945491433143616,
0.6899809837341309,
0.6873162388801575,
0.684951901435852,
0.6849428415298462
] |
Get zone object by name or id.
|
def get_zone(self, id=None, name=None):
""" Get zone object by name or id.
"""
log.info("Picking zone: %s (%s)" % (name, id))
return self.zones[id or name]
|
[
"async def get(self, zone_id: int, *, details: bool = False) -> dict:\n \"\"\"Return a specific zone.\"\"\"\n endpoint = 'zone/{0}'.format(zone_id)\n if details:\n endpoint += '/properties'\n return await self._request('get', endpoint)",
"def _get_zone_id_from_name(self, name):\n \"\"\"Return zone ID based on a zone.\"\"\"\n results = self.client['Account'].getDomains(\n filter={\"domains\": {\"name\": utils.query_filter(name)}})\n return [x['id'] for x in results]",
"def _find_zone_by_id(self, zone_id):\n \"\"\"Return zone by id.\"\"\"\n if not self.zones:\n return None\n\n zone = list(filter(\n lambda zone: zone.id == zone_id, self.zones))\n\n return zone[0] if zone else None",
"def get(self, zone_id):\n \"\"\"Retrieve the information for a zone entity.\"\"\"\n path = '/'.join(['zone', zone_id])\n return self.rachio.get(path)",
"def get_zone(self, zone_id, records=True):\n \"\"\"Get a zone and its records.\n\n :param zone: the zone name\n :returns: A dictionary containing a large amount of information about\n the specified zone.\n\n \"\"\"\n mask = None\n if records:\n mask = 'resourceRecords'\n return self.service.getObject(id=zone_id, mask=mask)",
"def get_zone(name)\n params = {\n 'command' => 'listZones',\n 'available' => 'true'\n }\n json = send_request(params)\n\n networks = json['zone']\n return nil unless networks\n\n networks.each { |z|\n if name.is_uuid? then\n return z if z['id'] == name\n else\n return z if z['name'] == name\n end\n }\n nil\n end",
"def _get_zone(self, domain, domain_id):\n \"\"\"\n Pulls the zone for the current domain from authenticated Hetzner account and\n returns it as an zone object.\n \"\"\"\n api = self.api[self.account]\n for request in api['zone']['GET']:\n url = (request.copy()).get('url', '/').replace('<id>', domain_id)\n params = request.get('params', {}).copy()\n for param in params:\n params[param] = params[param].replace('<id>', domain_id)\n response = self._get(url, query_params=params)\n dom = Provider._filter_dom(response.text, api['filter'])\n zone_file_filter = [{'name': 'textarea', 'attrs': {'name': api['zone']['file']}}]\n zone_file = Provider._filter_dom(dom, zone_file_filter).renderContents().decode('UTF-8')\n hidden = Provider._extract_hidden_data(dom)\n zone = {'data': dns.zone.from_text(zone_file, origin=domain, relativize=False),\n 'hidden': hidden}\n LOGGER.info('Hetzner => Get zone for domain %s', domain)\n return zone",
"def get_zone(self, zone_name):\n \"\"\"\n Get the information about a particular zone\n \"\"\"\n for zone in self.get_zones():\n if zone_name == zone['name']:\n return zone\n\n raise RuntimeError(\"Unknown zone\")",
"def get_record(self, zone_name, record_id):\n \"\"\"\n Get record with given id\n :param zone_name: Name of the zone\n :param record_id: Id of the record\n :return: Value of the record\n \"\"\"\n return self._client.get(\n '/domain/zone/{}/record/{}'.format(zone_name, record_id))",
"public OvhRecord zone_zoneName_record_id_GET(String zoneName, Long id) throws IOException {\n\t\tString qPath = \"/domain/zone/{zoneName}/record/{id}\";\n\t\tStringBuilder sb = path(qPath, zoneName, id);\n\t\tString resp = exec(qPath, \"GET\", sb.toString(), null);\n\t\treturn convertTo(resp, OvhRecord.class);\n\t}",
"def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):\n \"\"\"Find the name of the zone which contains the specified name.\n\n @param name: the query name\n @type name: absolute dns.name.Name object or string\n @param rdclass: The query class\n @type rdclass: int\n @param tcp: use TCP to make the query (default is False).\n @type tcp: bool\n @param resolver: the resolver to use\n @type resolver: dns.resolver.Resolver object or None\n @rtype: dns.name.Name\"\"\"\n\n if isinstance(name, basestring):\n name = dns.name.from_text(name, dns.name.root)\n if resolver is None:\n resolver = get_default_resolver()\n if not name.is_absolute():\n raise NotAbsolute(name)\n while 1:\n try:\n answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)\n if answer.rrset.name == name:\n return name\n # otherwise we were CNAMEd or DNAMEd and need to look higher\n except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):\n pass\n try:\n name = name.parent()\n except dns.name.NoParent:\n raise NoRootSOA",
"public OvhDynHostRecord zone_zoneName_dynHost_record_id_GET(String zoneName, Long id) throws IOException {\n\t\tString qPath = \"/domain/zone/{zoneName}/dynHost/record/{id}\";\n\t\tStringBuilder sb = path(qPath, zoneName, id);\n\t\tString resp = exec(qPath, \"GET\", sb.toString(), null);\n\t\treturn convertTo(resp, OvhDynHostRecord.class);\n\t}"
] |
[
0.7900980710983276,
0.7834318280220032,
0.7769157886505127,
0.7728655338287354,
0.7601249814033508,
0.743982195854187,
0.7381396889686584,
0.7289514541625977,
0.7282195091247559,
0.7281316518783569,
0.7177662253379822,
0.7134367227554321
] |
Creates role
|
def create_role(self, name=None, permissions=""):
""" Creates role """
name = name or "autocreated-role"
from qubell.api.private.role import Role
return Role.new(self._router, organization=self, name=name, permissions=permissions)
|
[
"def create_role(*_, **kwargs):\n \"\"\" Create user role \"\"\"\n click.echo(green('\\nCreating new role:'))\n click.echo(green('-' * 40))\n\n with get_app().app_context():\n role = Role(**kwargs)\n result = role_service.save(role)\n if not isinstance(result, Role):\n print_validation_errors(result)\n\n click.echo(green('Created: ') + str(role) + '\\n')",
"def post(self):\n \"\"\"Create a new role\"\"\"\n self.reqparse.add_argument('name', type=str, required=True)\n self.reqparse.add_argument('color', type=str, required=True)\n args = self.reqparse.parse_args()\n\n role = Role()\n role.name = args['name']\n role.color = args['color']\n\n db.session.add(role)\n db.session.commit()\n auditlog(event='role.create', actor=session['user'].username, data=args)\n\n return self.make_response('Role {} has been created'.format(role.role_id), HTTP.CREATED)",
"def create(self, handle, title=None, description=None):\n \"\"\" Create a role \"\"\"\n role = Role(handle=handle, title=title, description=description)\n schema = RoleSchema()\n valid = schema.process(role)\n if not valid:\n return valid\n\n db.session.add(role)\n db.session.commit()\n\n events.role_created_event.send(role)\n return role",
"def createRole(self, name, description):\n \"\"\"\n creates a role for a portal/agol site.\n Inputs:\n names - name of the role\n description - brief text string stating the nature of this\n role.\n Ouput:\n dictionary\n \"\"\"\n params = {\n \"name\" : name,\n \"description\" : description,\n \"f\" : \"json\"\n }\n url = self.root + \"/createRole\"\n return self._post(url=url,\n param_dict=params,\n securityHandler=self._securityHandler,\n proxy_url=self._proxy_url,\n proxy_port=self._proxy_port)",
"public function postCreateRole()\n {\n $response = (object)array(\n 'method' => 'createrole',\n 'success' => false,\n 'status' => 200,\n 'error_code' => 0,\n 'error_message' => ''\n );\n // decode json data\n $data = json_decode(file_get_contents(\"php://input\"));\n if (empty($data)) {\n $data = (object) $_POST;\n }\n $requiredParams = array('name', 'active');\n try {\n AuthorizerHelper::can(RoleValidator::ROLE_CAN_CREATE);\n\n $data = (array) $data;\n foreach ($requiredParams as $param){\n if (empty($data[$param])) {\n throw new \\Exception(ucfirst($param) .' is required.');\n }\n }\n $data[] = array('active' => $data['active'],\n 'name' => strtolower($data['name'])\n );\n\n $roleModel = new \\erdiko\\users\\models\\Role();\n $roleId = $roleModel->create($data);\n if ($roleId === 0) {\n throw new \\Exception('Could not create Role.');\n }\n $role = $roleModel->findById($roleId);\n $responseRole = array('id' => $role->getId(),\n 'active' => (boolean) $role->getActive(),\n 'name' => $role->getName()\n );\n $response->success = true;\n $response->role = $responseRole;\n unset($response->error_code);\n unset($response->error_message);\n } catch (\\Exception $e) {\n $response->success = false;\n $response->error_code = $e->getCode();\n $response->error_message = $e->getMessage();\n }\n $this->setContent($response);\n }",
"public function fire()\n {\n $roleName = $this->argument('role');\n $description= $this->argument('description');\n $roleKey = snake_case($roleName);\n $lockdown = $this->lockdown;\n $values = [\n 'role' => $roleName,\n ];\n\n try {\n $roleCheck = $lockdown->findRoleById($roleKey);\n } catch (RoleNotFound $e) {\n }\n\n if (isset($roleCheck) && $roleCheck) {\n $this->error('Role [%(role)s] already exists', $values);\n return;\n }\n\n try {\n $result = $lockdown->createRole(\n $roleName,\n $roleKey,\n $description\n );\n } catch (QueryException $e) {\n $this->error(\n \"The role [%(role)s] couldn't be created due to a \" .\n \"'QueryException', please check your error log.\",\n $values\n );\n return;\n }\n\n if ($result) {\n $this->info(\n \"The role [%(role)s] (%(key)s) has been created!\",\n $values\n );\n return;\n }\n\n $this->error(\"The role [%(role)s] couldn't be created\", $values);\n }",
"def create_role(name):\n \"\"\"\n Create a new role.\n \"\"\"\n role = role_manager.create(name=name)\n if click.confirm(f'Are you sure you want to create {role!r}?'):\n role_manager.save(role, commit=True)\n click.echo(f'Successfully created {role!r}')\n else:\n click.echo('Cancelled.')",
"def create_role(self, **kwargs):\n \"\"\"Creates and returns a new role from the given parameters.\"\"\"\n\n role = self.role_model(**kwargs)\n return self.put(role)",
"def create_role(name: 'new role', colour: 0, hoist: false, mentionable: false, permissions: 104_324_161, reason: nil)\n colour = colour.respond_to?(:combined) ? colour.combined : colour\n\n permissions = if permissions.is_a?(Array)\n Permissions.bits(permissions)\n elsif permissions.respond_to?(:bits)\n permissions.bits\n else\n permissions\n end\n\n response = API::Server.create_role(@bot.token, @id, name, colour, hoist, mentionable, permissions, reason)\n\n role = Role.new(JSON.parse(response), @bot, self)\n @roles << role\n role\n end",
"def create_role(resource_root,\n service_name,\n role_type,\n role_name,\n host_id,\n cluster_name=\"default\"):\n \"\"\"\n Create a role\n @param resource_root: The root Resource object.\n @param service_name: Service name\n @param role_type: Role type\n @param role_name: Role name\n @param cluster_name: Cluster name\n @return: An ApiRole object\n \"\"\"\n apirole = ApiRole(resource_root, role_name, role_type,\n ApiHostRef(resource_root, host_id))\n return call(resource_root.post,\n _get_roles_path(cluster_name, service_name),\n ApiRole, True, data=[apirole])[0]",
"def create_role(self, role_name, role_type, host_id):\n \"\"\"\n Create a role.\n\n @param role_name: Role name\n @param role_type: Role type\n @param host_id: ID of the host to assign the role to\n @return: An ApiRole object\n \"\"\"\n return roles.create_role(self._get_resource_root(), self.name, role_type,\n role_name, host_id, self._get_cluster_name())",
"def createRole(self, *args, **kwargs):\n \"\"\"\n Create Role\n\n Create a new role.\n\n The caller's scopes must satisfy the new role's scopes.\n\n If there already exists a role with the same `roleId` this operation\n will fail. Use `updateRole` to modify an existing role.\n\n Creation of a role that will generate an infinite expansion will result\n in an error response.\n\n This method takes input: ``v1/create-role-request.json#``\n\n This method gives output: ``v1/get-role-response.json#``\n\n This method is ``stable``\n \"\"\"\n\n return self._makeApiCall(self.funcinfo[\"createRole\"], *args, **kwargs)"
] |
[
0.841355562210083,
0.8247339725494385,
0.8023505806922913,
0.7976750135421753,
0.7924614548683167,
0.7875912189483643,
0.7864711880683899,
0.7849196195602417,
0.7841896414756775,
0.7808602452278137,
0.7768216729164124,
0.7726830840110779
] |
Get role object by name or id.
|
def get_role(self, id=None, name=None):
""" Get role object by name or id.
"""
log.info("Picking role: %s (%s)" % (name, id))
return self.roles[id or name]
|
[
"def get_role(role_id,**kwargs):\n \"\"\"\n Get a role by its ID.\n \"\"\"\n try:\n role = db.DBSession.query(Role).filter(Role.id==role_id).one()\n return role\n except NoResultFound:\n raise HydraError(\"Role not found (role_id={})\".format(role_id))",
"def get_role(self, key):\n \"\"\"Return id of named role.\"\"\"\n\n json_resp = self.get_roles()\n for item in json_resp:\n if key in item[\"name\"]:\n return item[\"id\"]\n raise CerberusClientException(\"Key '%s' not found\" % key)",
"def get(self, name_or_uri):\n \"\"\"\n Get the role by its URI or Name.\n\n Args:\n name_or_uri:\n Can be either the Name or the URI.\n\n Returns:\n dict: Role\n \"\"\"\n name_or_uri = quote(name_or_uri)\n return self._client.get(name_or_uri)",
"def get_role(self, name):\n \"\"\"\n Lookup a role by name.\n\n @param name: Role name\n @return: An ApiRole object\n \"\"\"\n return roles.get_role(self._get_resource_root(), self.name, name,\n self._get_cluster_name())",
"def get_role(self, name):\n \"\"\"Get a single Role by name.\n\n Args:\n name (str): The name of the Role.\n\n Returns:\n (:obj:`Role`): The Role that matches the name or None.\n \"\"\"\n\n address = _create_role_address(name)\n role_list_bytes = None\n\n try:\n role_list_bytes = self._state_view.get(address=address)\n except KeyError:\n return None\n\n if role_list_bytes is not None:\n role_list = _create_from_bytes(role_list_bytes,\n identity_pb2.RoleList)\n for role in role_list.roles:\n if role.name == name:\n return role\n return None",
"def role_get(role_id=None, name=None, profile=None, **connection_args):\n '''\n Return a specific roles (keystone role-get)\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt '*' keystone.role_get c965f79c4f864eaaa9c3b41904e67082\n salt '*' keystone.role_get role_id=c965f79c4f864eaaa9c3b41904e67082\n salt '*' keystone.role_get name=nova\n '''\n kstone = auth(profile, **connection_args)\n ret = {}\n if name:\n for role in kstone.roles.list():\n if role.name == name:\n role_id = role.id\n break\n if not role_id:\n return {'Error': 'Unable to resolve role id'}\n role = kstone.roles.get(role_id)\n\n ret[role.name] = {'id': role.id,\n 'name': role.name}\n return ret",
"def get(self, roleId):\n \"\"\"Get a specific role information\"\"\"\n role = db.Role.find_one(Role.role_id == roleId)\n\n if not role:\n return self.make_response('No such role found', HTTP.NOT_FOUND)\n\n return self.make_response({'role': role})",
"def get_single_role(self, id, role_id, account_id, role=None):\r\n \"\"\"\r\n Get a single role.\r\n\r\n Retrieve information about a single role\r\n \"\"\"\r\n path = {}\r\n data = {}\r\n params = {}\r\n\r\n # REQUIRED - PATH - id\r\n \"\"\"ID\"\"\"\r\n path[\"id\"] = id\r\n\r\n # REQUIRED - PATH - account_id\r\n \"\"\"The id of the account containing the role\"\"\"\r\n path[\"account_id\"] = account_id\r\n\r\n # REQUIRED - role_id\r\n \"\"\"The unique identifier for the role\"\"\"\r\n params[\"role_id\"] = role_id\r\n\r\n # OPTIONAL - role\r\n \"\"\"The name for the role\"\"\"\r\n if role is not None:\r\n params[\"role\"] = role\r\n\r\n self.logger.debug(\"GET /api/v1/accounts/{account_id}/roles/{id} with query params: {params} and form data: {data}\".format(params=params, data=data, **path))\r\n return self.generic_request(\"GET\", \"/api/v1/accounts/{account_id}/roles/{id}\".format(**path), data=data, params=params, single_item=True)",
"def findRoleID(self, name):\n \"\"\"searches the roles by name and returns the role's ID\"\"\"\n for r in self:\n if r['name'].lower() == name.lower():\n return r['id']\n del r\n return None",
"def get_role_by_code(role_code,**kwargs):\n \"\"\"\n Get a role by its code\n \"\"\"\n try:\n role = db.DBSession.query(Role).filter(Role.code==role_code).one()\n return role\n except NoResultFound:\n raise ResourceNotFoundError(\"Role not found (role_code={})\".format(role_code))",
"def get_role(resource_root, service_name, name, cluster_name=\"default\"):\n \"\"\"\n Lookup a role by name\n @param resource_root: The root Resource object.\n @param service_name: Service name\n @param name: Role name\n @param cluster_name: Cluster name\n @return: An ApiRole object\n \"\"\"\n return _get_role(resource_root, _get_role_path(cluster_name, service_name, name))",
"async def role(self, *args, **kwargs):\n \"\"\"\n Get Role\n\n Get information about a single role, including the set of scopes that the\n role expands to.\n\n This method gives output: ``v1/get-role-response.json#``\n\n This method is ``stable``\n \"\"\"\n\n return await self._makeApiCall(self.funcinfo[\"role\"], *args, **kwargs)"
] |
[
0.7889066934585571,
0.7750076651573181,
0.763481616973877,
0.7608851790428162,
0.7590010762214661,
0.7550540566444397,
0.7545921802520752,
0.7423298954963684,
0.7422281503677368,
0.7390575408935547,
0.7315366268157959,
0.7311252951622009
] |
Get user object by email or id.
|
def get_user(self, id=None, name=None, email=None):
""" Get user object by email or id.
"""
log.info("Picking user: %s (%s) (%s)" % (name, email, id))
from qubell.api.private.user import User
if email:
user = User.get(self._router, organization=self, email=email)
else:
user = self.users[id or name]
return user
|
[
"def get_user_by_email(self, email):\n\t\t\"\"\"This function gets a user's data field and info\"\"\"\n\n\t\tcall = \"/api/users/\"+ str(email)\n\n\t\treturn self.api_call(call=call, method=\"GET\")",
"def user(self, email: str) -> models.User:\n \"\"\"Fetch a user from the database.\"\"\"\n return self.User.query.filter_by(email=email).first()",
"def get_by_email(cls, email):\n \"\"\"\n Return a User by email address\n \"\"\"\n return cls.query().filter(cls.email == email).first()",
"def get_user_by_id(self, id):\n \"\"\"Retrieve a User object by ID.\"\"\"\n return self.db_adapter.get_object(self.UserClass, id=id)",
"def get(self, request, id):\n \"\"\" Get one user or all users \"\"\"\n if id:\n return self._get_one(id)\n else:\n return self._get_all()",
"def get_user(self, user_id=None, username=None, email=None):\n \"\"\"\n Returns the user specified by either ID, username or email.\n\n Since more than user can have the same email address, searching by that\n term will return a list of 1 or more User objects. Searching by\n username or ID will return a single User.\n\n If a user_id that doesn't belong to the current account is searched\n for, a Forbidden exception is raised. When searching by username or\n email, a NotFound exception is raised if there is no matching user.\n \"\"\"\n if user_id:\n uri = \"/users/%s\" % user_id\n elif username:\n uri = \"/users?name=%s\" % username\n elif email:\n uri = \"/users?email=%s\" % email\n else:\n raise ValueError(\"You must include one of 'user_id', \"\n \"'username', or 'email' when calling get_user().\")\n resp, resp_body = self.method_get(uri)\n if resp.status_code == 404:\n raise exc.NotFound(\"No such user exists.\")\n users = resp_body.get(\"users\", [])\n if users:\n return [User(self, user) for user in users]\n else:\n user = resp_body.get(\"user\", {})\n if user:\n return User(self, user)\n else:\n raise exc.NotFound(\"No such user exists.\")",
"def get_by_id(self, id):\n \"\"\"Find user by his id and return user model.\"\"\"\n return self.user_cls(id=id, password='secret' + str(id))",
"def by_email_address(cls, email):\n \"\"\"Return the user object whose email address is ``email``.\"\"\"\n return DBSession.query(cls).filter_by(email_address=email).first()",
"def get_user_and_user_email_by_id(self, user_or_user_email_id):\n \"\"\"Retrieve the User and UserEmail object by ID.\"\"\"\n if self.UserEmailClass:\n user_email = self.db_adapter.get_object(self.UserEmailClass, user_or_user_email_id)\n user = user_email.user if user_email else None\n else:\n user = self.db_adapter.get_object(self.UserClass, user_or_user_email_id)\n user_email = user\n return (user, user_email)",
"def GetUserByEmail(self, email):\n \"\"\"Gets user info by email.\n\n Args:\n email: string, the user email.\n\n Returns:\n GitkitUser, containing the user info.\n \"\"\"\n user = self.rpc_helper.GetAccountInfoByEmail(email)\n return GitkitUser.FromApiResponse(user)",
"def get_user_by_email(self, email):\n \"\"\"\n Returns details for user with the given email address.\n\n If there is more than one match will only return the first. Use\n get_users() for full result set.\n \"\"\"\n results = self.get_users(filter='email eq \"%s\"' % (email))\n if results['totalResults'] == 0:\n logging.warning(\"Found no matches for given email.\")\n return\n elif results['totalResults'] > 1:\n logging.warning(\"Found %s matches for email %s\" %\n (results['totalResults'], email))\n\n return results['resources'][0]",
"def get_user(uid, **kwargs):\n \"\"\"\n Get a user by ID\n \"\"\"\n user_id=kwargs.get('user_id')\n if uid is None:\n uid = user_id\n user_i = _get_user(uid)\n return user_i"
] |
[
0.7941586971282959,
0.7891509532928467,
0.7855108380317688,
0.761310338973999,
0.7595750093460083,
0.7590205073356628,
0.7588252425193787,
0.7587990164756775,
0.7583810687065125,
0.7540221810340881,
0.7507094144821167,
0.750545859336853
] |
Send invitation to email with a list of roles
:param email:
:param roles: None or "ALL" or list of role_names
:return:
|
def invite(self, email, roles=None):
"""
Send invitation to email with a list of roles
:param email:
:param roles: None or "ALL" or list of role_names
:return:
"""
if roles is None:
role_ids = [self.roles['Guest'].roleId]
elif roles == "ALL":
role_ids = list([i.id for i in self.roles])
else:
if "Guest" not in roles:
roles.append('Guest')
role_ids = list([i.id for i in self.roles if i.name in roles])
self._router.invite_user(data=json.dumps({
"organizationId": self.organizationId,
"email": email,
"roles": role_ids}))
|
[
"def inviteByEmail(self,\n emails,\n subject,\n text,\n html,\n role=\"org_user\",\n mustApprove=True,\n expiration=1440):\n \"\"\"Invites a user or users to a site.\n\n Inputs:\n emails - comma seperated list of emails\n subject - title of email\n text - email text\n html - email text in html\n role - site role (can't be administrator)\n mustApprove - verifies if user that is join must be approved by\n an administrator\n expiration - time in seconds. Default is 1 day 1440\n \"\"\"\n url = self.root + \"/inviteByEmail\"\n params = {\n \"f\" : \"json\",\n \"emails\": emails,\n \"subject\": subject,\n \"text\": text,\n \"html\" : html,\n \"role\" : role,\n \"mustApprove\": mustApprove,\n \"expiration\" : expiration\n }\n return self._post(url=url, param_dict=params,\n securityHandler=self._securityHandler,\n proxy_url=self._proxy_url,\n proxy_port=self._proxy_port)",
"def invite_user(self, email, role):\n \"\"\"\n Send an invitation to email with a link to join your team\n :param email: Email to add to your team\n :param role: Can be admin or member\n \"\"\"\n parameters = {\n 'email': email,\n 'role': role\n }\n\n connection = Connection(self.token)\n connection.set_url(self.production, self.TEAM_USERS_URL)\n connection.add_params(parameters)\n\n return connection.post_request()",
"def invite_by_email(self, email, user, organization, **kwargs):\n # type: (Text, AbstractUser, AbstractBaseOrganization) -> OrganizationInvitationBase\n \"\"\"\n Primary interface method by which one user invites another to join\n\n Args:\n email:\n request:\n **kwargs:\n\n Returns:\n an invitation instance\n\n Raises:\n MultipleObjectsReturned if multiple matching users are found\n\n \"\"\"\n try:\n invitee = self.user_model.objects.get(email__iexact=email)\n except self.user_model.DoesNotExist:\n invitee = None\n\n # TODO allow sending just the OrganizationUser instance\n user_invitation = self.invitation_model.objects.create(\n invitee=invitee,\n invitee_identifier=email.lower(),\n invited_by=user,\n organization=organization,\n )\n self.send_invitation(user_invitation)\n return user_invitation",
"def send_email(recipients: list, subject: str, text: str, html: str='', sender: str='', files: list=[], exceptions: bool=False):\n \"\"\"\n :param recipients: List of recipients; or single email (str); or comma-separated email list (str); or list of name-email pairs (e.g. settings.ADMINS)\n :param subject: Subject of the email\n :param text: Body (text)\n :param html: Body (html)\n :param sender: Sender email, or settings.DEFAULT_FROM_EMAIL if missing\n :param files: Paths to files to attach\n :param exceptions: Raise exception if email sending fails\n :return: Status code 202 if all emails were sent successfully, error status code otherwise\n \"\"\"\n import sendgrid\n from sendgrid.helpers.mail import Content, Mail, Attachment\n from django.conf import settings\n from base64 import b64encode\n from os.path import basename\n from django.utils.timezone import now\n from jutil.logs import log_event\n\n try:\n # default sender to settings.DEFAULT_FROM_EMAIL\n if not sender:\n sender = settings.DEFAULT_FROM_EMAIL\n\n # support multiple recipient list styles\n if isinstance(recipients, str): # allow single email and comma-separated list as input\n recipients = [str(r).strip() for r in recipients.split(',')]\n\n sg = sendgrid.SendGridAPIClient(apikey=settings.EMAIL_SENDGRID_API_KEY)\n from_email = sendgrid.Email(sender or settings.DEFAULT_FROM_EMAIL)\n content = Content('text/plain', text) if not html else Content('text/html', html)\n\n attachments = []\n for filename in files:\n with open(filename, 'rb') as fp:\n attachment = Attachment()\n attachment.content = b64encode(fp.read()).decode()\n attachment.type = \"application/octet-stream\"\n attachment.filename = basename(filename)\n attachment.content_id = basename(filename)\n attachment.disposition = \"attachment\"\n attachments.append(attachment)\n except Exception as e:\n logger.error(e)\n if exceptions:\n raise\n return -1\n\n status_codes = []\n for recipient in recipients:\n try:\n t = now()\n\n to_email = sendgrid.Email()\n if isinstance(recipient, str):\n to_email.email = recipient\n elif (isinstance(recipient, list) or isinstance(recipient, tuple)) and len(recipient) == 2:\n to_email.name = recipient[0]\n to_email.email = recipient[1]\n else:\n raise Exception('Invalid recipient format: {}'.format(recipient))\n\n mail = Mail(from_email=from_email, subject=subject, to_email=to_email, content=content)\n for attachment in attachments:\n mail.add_attachment(attachment)\n res = sg.client.mail.send.post(request_body=mail.get())\n\n send_dt = (now()-t).total_seconds()\n if res.status_code == 202:\n log_event('EMAIL_SENT', data={'time': send_dt, 'to': recipient, 'subject': subject, 'status': res.status_code})\n else:\n log_event('EMAIL_ERROR', data={'time': send_dt, 'to': recipient, 'subject': subject, 'status': res.status_code, 'body': res.body})\n\n status_codes.append(res.status_code)\n except Exception as e:\n logger.error(e)\n if exceptions:\n raise\n status_codes.append(-1)\n\n for status in status_codes:\n if status != 202:\n return status\n return 202",
"def invite_by_email(self, email, sender=None, request=None, **kwargs):\n \"\"\"Creates an inactive user with the information we know and then sends\n an invitation email for that user to complete registration.\n\n If your project uses email in a different way then you should make to\n extend this method as it only checks the `email` attribute for Users.\n \"\"\"\n try:\n user = self.user_model.objects.get(email=email)\n except self.user_model.DoesNotExist:\n # TODO break out user creation process\n if \"username\" in inspect.getargspec(\n self.user_model.objects.create_user\n ).args:\n user = self.user_model.objects.create(\n username=self.get_username(),\n email=email,\n password=self.user_model.objects.make_random_password(),\n )\n else:\n user = self.user_model.objects.create(\n email=email, password=self.user_model.objects.make_random_password()\n )\n user.is_active = False\n user.save()\n self.send_invitation(user, sender, **kwargs)\n return user",
"public function inviteNewUser($email, $roles)\n {\n $user = static::firstOrCreate(compact('email'));\n $user->assignRole($roles);\n\n /** @var Invitation $invites */\n $invites = app(Invitation::class);\n $invites->generateToken($user->id);\n\n Detail::firstOrCreate([\n 'user_id' => $user->id,\n 'display_name' => $email,\n ]);\n\n return $user;\n }",
"public function sendNewUserActivation($email, $roles)\n {\n $user = static::firstOrCreate(compact('email'));\n $user->assignRole($roles);\n\n /** @var Activation $invites */\n $invites = app(Activation::class);\n $invites->generateToken($user->id);\n\n Detail::firstOrCreate([\n 'user_id' => $user->id,\n 'display_name' => $email,\n ]);\n\n return $user;\n }",
"def invite(self, users, role, expiration=1440):\n \"\"\"\n A group administrator can invite users to join their group using\n the Invite to Group operation. This creates a new user invitation,\n which the users accept or decline. The role of the user and the\n invitation expiration date can be set in the invitation.\n A notification is created for the user indicating that they were\n invited to join the group. Available only to authenticated users.\n\n Inputs:\n users - A comma separated list of usernames to be invited to the\n group. If a user is already a member of the group or an\n invitation has already been sent, the call still returns\n a success.\n Example: users=regularusername1,regularusername2\n role\t- Allows administrators to set the user's role in the group\n Roles are:\n group_member: Ability to view and share items with\n group.\n group_admin: In addition to viewing and sharing items,\n the group_admin has the same capabilities\n as the group owner-invite users to the\n group, accept or decline group\n applications, delete content, and remove\n users.\n expiration - Expiration date on the invitation can be set for\n one day, three days, one week, or two weeks, in\n minutes. Default is 1440\n \"\"\"\n params = {\n \"f\" : \"json\",\n \"users\" : users,\n \"role\" : role,\n \"expiration\" : expiration\n }\n return self._post(url=self._url + \"/invite\",\n securityHandler=self._securityHandler,\n param_dict=params,\n proxy_url=self._proxy_url,\n proxy_port=self._proxy_port)",
"def invite_others_to_group(self, group_id, invitees):\r\n \"\"\"\r\n Invite others to a group.\r\n\r\n Sends an invitation to all supplied email addresses which will allow the\r\n receivers to join the group.\r\n \"\"\"\r\n path = {}\r\n data = {}\r\n params = {}\r\n\r\n # REQUIRED - PATH - group_id\r\n \"\"\"ID\"\"\"\r\n path[\"group_id\"] = group_id\r\n\r\n # REQUIRED - invitees\r\n \"\"\"An array of email addresses to be sent invitations.\"\"\"\r\n data[\"invitees\"] = invitees\r\n\r\n self.logger.debug(\"POST /api/v1/groups/{group_id}/invite with query params: {params} and form data: {data}\".format(params=params, data=data, **path))\r\n return self.generic_request(\"POST\", \"/api/v1/groups/{group_id}/invite\".format(**path), data=data, params=params, no_data=True)",
"def invite_user(self, user=github.GithubObject.NotSet, email=github.GithubObject.NotSet, role=github.GithubObject.NotSet, teams=github.GithubObject.NotSet):\n \"\"\"\n :calls: `POST /orgs/:org/invitations <http://developer.github.com/v3/orgs/members>`_\n :param user: :class:`github.NamedUser.NamedUser`\n :param email: string\n :param role: string\n :param teams: array of :class:`github.Team.Team`\n :rtype: None\n \"\"\"\n assert user is github.GithubObject.NotSet or isinstance(user, github.NamedUser.NamedUser), user\n assert email is github.GithubObject.NotSet or isinstance(email, (str, unicode)), email\n assert (email is github.GithubObject.NotSet) ^ (user is github.GithubObject.NotSet), \"specify only one of email or user\"\n parameters = {}\n if user is not github.GithubObject.NotSet:\n parameters[\"invitee_id\"] = user.id\n elif email is not github.GithubObject.NotSet:\n parameters[\"email\"] = email\n if role is not github.GithubObject.NotSet:\n assert isinstance(role, (str, unicode)), role\n assert role in ['admin', 'direct_member', 'billing_manager']\n parameters[\"role\"] = role\n if teams is not github.GithubObject.NotSet:\n assert all(isinstance(team, github.Team.Team) for team in teams)\n parameters[\"team_ids\"] = [t.id for t in teams]\n headers, data = self._requester.requestJsonAndCheck(\n \"POST\",\n self.url + \"/invitations\",\n headers={'Accept': Consts.mediaTypeOrganizationInvitationPreview},\n input=parameters\n )",
"def send_invite(self, **kwargs):\n \"\"\"\n Invite new subaccount.\n Returns True if success.\n\n :Example:\n\n s = client.subaccounts.create(email=\"johndoe@yahoo.com\", role=\"A\")\n\n :param str email: Subaccount email. Required.\n :param str role: Subaccount role: `A` for administrator or `U` for regular user. Required.\n \"\"\"\n resp, _ = self.request(\"POST\", self.uri, data=kwargs)\n return resp.status == 204",
"def invite(name, emails, perm):\n \"\"\"Invite other users to access the virtual folder.\n\n \\b\n NAME: Name of a virtual folder.\n EMAIL: Emails to invite.\n \"\"\"\n with Session() as session:\n try:\n assert perm in ['rw', 'ro'], \\\n 'Invalid permission: {}'.format(perm)\n result = session.VFolder(name).invite(perm, emails)\n invited_ids = result.get('invited_ids', [])\n if len(invited_ids) > 0:\n print('Invitation sent to:')\n for invitee in invited_ids:\n print('\\t- ' + invitee)\n else:\n print('No users found. Invitation was not sent.')\n except Exception as e:\n print_error(e)\n sys.exit(1)"
] |
[
0.7545664310455322,
0.746834397315979,
0.7035861015319824,
0.7022272348403931,
0.6953301429748535,
0.6830317378044128,
0.6827439069747925,
0.6780673265457153,
0.6774584054946899,
0.6759794354438782,
0.6752433776855469,
0.6739144921302795
] |
Mimics wizard's environment preparation
|
def init(self, access_key=None, secret_key=None):
"""
Mimics wizard's environment preparation
"""
if not access_key and not secret_key:
self._router.post_init(org_id=self.organizationId, data='{"initCloudAccount": true}')
else:
self._router.post_init(org_id=self.organizationId, data='{}')
ca_data = dict(accessKey=access_key, secretKey=secret_key)
self._router.post_init_custom_cloud_account(org_id=self.organizationId, data=json.dumps(ca_data))
|
[
"def prepare_env(org):\n \"\"\" Example shows how to configure environment from scratch \"\"\"\n\n # Add services\n key_service = org.service(type='builtin:cobalt_secure_store', name='Keystore')\n wf_service = org.service(type='builtin:workflow_service', name='Workflow', parameters='{}')\n\n # Add services to environment\n env = org.environment(name='default')\n env.clean()\n env.add_service(key_service)\n env.add_service(wf_service)\n env.add_policy(\n {\"action\": \"provisionVms\",\n \"parameter\": \"publicKeyId\",\n \"value\": key_service.regenerate()['id']})\n\n # Add cloud provider account\n access = {\n \"provider\": \"aws-ec2\",\n \"usedEnvironments\": [],\n \"ec2SecurityGroup\": \"default\",\n \"providerCopy\": \"aws-ec2\",\n \"name\": \"test-provider\",\n \"jcloudsIdentity\": KEY,\n \"jcloudsCredential\": SECRET_KEY,\n \"jcloudsRegions\": \"us-east-1\"\n }\n prov = org.provider(access)\n env.add_provider(prov)\n return org.organizationId",
"def prepare_env(app, env, docname):\n \"\"\"\n Prepares the sphinx environment to store sphinx-needs internal data.\n \"\"\"\n if not hasattr(env, 'needs_all_needs'):\n # Used to store all needed information about all needs in document\n env.needs_all_needs = {}\n\n if not hasattr(env, 'needs_functions'):\n # Used to store all registered functions for supporting dynamic need values.\n env.needs_functions = {}\n\n # needs_functions = getattr(app.config, 'needs_functions', [])\n needs_functions = app.needs_functions\n if needs_functions is None:\n needs_functions = []\n if not isinstance(needs_functions, list):\n raise SphinxError('Config parameter needs_functions must be a list!')\n\n # Register built-in functions\n for need_common_func in needs_common_functions:\n register_func(env, need_common_func)\n\n # Register functions configured by user\n for needs_func in needs_functions:\n register_func(env, needs_func)\n\n app.config.needs_hide_options += ['hidden']\n app.config.needs_extra_options['hidden'] = directives.unchanged\n\n if not hasattr(env, 'needs_workflow'):\n # Used to store workflow status information for already executed tasks.\n # Some tasks like backlink_creation need be be performed only once.\n # But most sphinx-events get called several times (for each single document file), which would also\n # execute our code several times...\n env.needs_workflow = {\n 'backlink_creation': False,\n 'dynamic_values_resolved': False\n }",
"function wizardSetup (program) {\n const welcome = multiline.stripIndent(function () { /*\n Welcome to the Electron-Windows-Store tool!\n\n This tool will assist you with turning your Electron app into\n a swanky Windows Store app.\n\n We need to know some settings. We will ask you only once and store\n your answers in your profile folder in a .electron-windows-store\n file.\n\n */\n })\n const complete = multiline.stripIndent(function () { /*\n\n Setup complete, moving on to package your app!\n\n */\n })\n\n let questions = [\n {\n name: 'desktopConverter',\n type: 'input',\n message: 'Please enter the path to your Desktop App Converter (DesktopAppConverter.ps1): ',\n validate: (input) => pathExists.sync(input),\n when: () => (!program.desktopConverter)\n },\n {\n name: 'expandedBaseImage',\n type: 'input',\n message: 'Please enter the path to your Expanded Base Image: ',\n default: 'C:\\\\ProgramData\\\\Microsoft\\\\Windows\\\\Images\\\\BaseImage-14316\\\\',\n validate: (input) => pathExists.sync(input),\n when: () => (!program.expandedBaseImage)\n },\n {\n name: 'devCert',\n type: 'input',\n message: 'Please enter the path to your development PFX certficate: ',\n default: null,\n when: () => (!dotfile.get().makeCertificate || !program.devCert)\n },\n {\n name: 'publisher',\n type: 'input',\n message: 'Please enter your publisher identity: ',\n default: 'CN=developmentca',\n when: () => (!program.publisher)\n },\n {\n name: 'windowsKit',\n type: 'input',\n message: \"Please enter the location of your Windows Kit's bin folder: \",\n default: utils.getDefaultWindowsKitLocation(),\n when: () => (!program.windowsKit)\n }\n ]\n\n if (!program.isModuleUse) {\n utils.log(welcome)\n }\n\n // Remove the Desktop Converter Questions if not installed\n if (program.didInstallDesktopAppConverter === false) {\n questions = questions.slice(3)\n }\n\n if (program.isModuleUse) {\n program.windowsKit = program.windowsKit || utils.getDefaultWindowsKitLocation()\n\n return Promise.resolve(program)\n }\n\n return inquirer.prompt(questions)\n .then((answers) => {\n dotfile.set({\n desktopConverter: answers.desktopConverter || false,\n expandedBaseImage: answers.expandedBaseImage || false,\n devCert: answers.devCert,\n publisher: answers.publisher,\n windowsKit: answers.windowsKit,\n makeCertificate: dotfile.get().makeCertificate\n })\n\n program.desktopConverter = answers.desktopConverter\n program.expandedBaseImage = answers.expandedBaseImage\n program.devCert = answers.devCert\n program.publisher = answers.publisher\n program.windowsKit = answers.windowsKit\n\n if (program.makeCertificate) {\n utils.log(chalk.bold.green('Creating Certficate'))\n let publisher = dotfile.get().publisher.split('=')[1]\n let certFolder = path.join(process.env.APPDATA, 'electron-windows-store', publisher)\n\n return sign.makeCert({ publisherName: publisher, certFilePath: certFolder, program: program })\n .then(pfxFile => {\n utils.log('Created and installed certificate:')\n utils.log(pfxFile)\n dotfile.set({ devCert: pfxFile })\n })\n }\n\n utils.log(complete)\n })\n}",
"def prepare_environment(work_dir):\n \"\"\"\n Performs a few maintenance tasks before the Honeypot is run. Copies the data directory,\n and the config file to the cwd. The config file copied here is overwritten if\n the __init__ method is called with a configuration URL.\n\n :param work_dir: The directory to copy files to.\n \"\"\"\n package_directory = os.path.dirname(os.path.abspath(beeswarm.__file__))\n\n logger.info('Copying data files to workdir.')\n shutil.copytree(os.path.join(package_directory, 'drones/honeypot/data'), os.path.join(work_dir, 'data/'),\n ignore=Honeypot._ignore_copy_files)",
"def prepare_env(self):\n \"\"\"\n Manages reading environment metadata files under ``private_data_dir`` and merging/updating\n with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily\n \"\"\"\n try:\n passwords = self.loader.load_file('env/passwords', Mapping)\n self.expect_passwords = {\n re.compile(pattern, re.M): password\n for pattern, password in iteritems(passwords)\n }\n except ConfigurationError:\n output.debug('Not loading passwords')\n self.expect_passwords = dict()\n self.expect_passwords[pexpect.TIMEOUT] = None\n self.expect_passwords[pexpect.EOF] = None\n\n try:\n # seed env with existing shell env\n self.env = os.environ.copy()\n envvars = self.loader.load_file('env/envvars', Mapping)\n if envvars:\n self.env.update({k:six.text_type(v) for k, v in envvars.items()})\n if self.envvars and isinstance(self.envvars, dict):\n self.env.update({k:six.text_type(v) for k, v in self.envvars.items()})\n except ConfigurationError:\n output.debug(\"Not loading environment vars\")\n # Still need to pass default environment to pexpect\n self.env = os.environ.copy()\n\n try:\n self.settings = self.loader.load_file('env/settings', Mapping)\n except ConfigurationError:\n output.debug(\"Not loading settings\")\n self.settings = dict()\n\n try:\n self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)\n except ConfigurationError:\n output.debug(\"Not loading ssh key\")\n self.ssh_key_data = None\n\n self.idle_timeout = self.settings.get('idle_timeout', None)\n self.job_timeout = self.settings.get('job_timeout', None)\n self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)\n\n self.process_isolation = self.settings.get('process_isolation', self.process_isolation)\n self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)\n self.process_isolation_path = self.settings.get('process_isolation_path', self.process_isolation_path)\n self.process_isolation_hide_paths = self.settings.get('process_isolation_hide_paths', self.process_isolation_hide_paths)\n self.process_isolation_show_paths = self.settings.get('process_isolation_show_paths', self.process_isolation_show_paths)\n self.process_isolation_ro_paths = self.settings.get('process_isolation_ro_paths', self.process_isolation_ro_paths)\n\n self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)\n self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)\n self.directory_isolation_cleanup = bool(self.settings.get('directory_isolation_cleanup', True))\n\n if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(self.project_dir):\n self.cwd = self.private_data_dir\n else:\n if self.directory_isolation_path is not None:\n self.cwd = self.directory_isolation_path\n else:\n self.cwd = self.project_dir\n\n if 'fact_cache' in self.settings:\n if 'fact_cache_type' in self.settings:\n if self.settings['fact_cache_type'] == 'jsonfile':\n self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])\n else:\n self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])",
"def act(environment)\n environment.objects.each do |object|\n if (object.location.x < @left) then\n object.location.x = @left\n object.vector.pitch = Utility.find_reflection_angle(90, object.vector.pitch)\n elsif (object.location.x > @right) then\n object.location.x = @right\n object.vector.pitch = Utility.find_reflection_angle(270, object.vector.pitch)\n end\n if (object.location.y > @top) then\n object.location.y = @top\n object.vector.pitch = Utility.find_reflection_angle(0, object.vector.pitch)\n elsif (object.location.y < @bottom) then\n object.location.y = @bottom\n object.vector.pitch = Utility.find_reflection_angle(180, object.vector.pitch)\n end\n end\n end",
"def _prepare_env(self): # pragma: no cover\n \"\"\"Setup the document's environment, if necessary.\"\"\"\n env = self.state.document.settings.env\n if not hasattr(env, self.directive_name):\n # Track places where we use this directive, so we can check for\n # outdated documents in the future.\n state = DirectiveState()\n setattr(env, self.directive_name, state)\n else:\n state = getattr(env, self.directive_name)\n return env, state",
"def prepare_context(env)\n context = Roger::Template::TemplateContext.new(self, env)\n\n # Extend context with all helpers\n self.class.helpers.each do |mod|\n context.extend(mod)\n end\n\n context\n end",
"private static List<String> prepareBashCommand(List<String> cmd, Map<String, String> childEnv) {\n if (childEnv.isEmpty()) {\n return cmd;\n }\n\n List<String> newCmd = new ArrayList<>();\n newCmd.add(\"env\");\n\n for (Map.Entry<String, String> e : childEnv.entrySet()) {\n newCmd.add(String.format(\"%s=%s\", e.getKey(), e.getValue()));\n }\n newCmd.addAll(cmd);\n return newCmd;\n }",
"def cat(*wizards):\n \"\"\"A higher-order wizard which is the concatenation of a number of other\n wizards.\n\n The resulting data is the union of all wizard outputs.\n \"\"\"\n data = {}\n\n for wizard in wizards:\n try:\n response = None\n while True:\n response = yield wizard.send(response)\n except Success as s:\n data.update(s.data)\n\n raise Success(data)",
"function applyEnvironmentVariables(opts) {\n // if we have a custom CouchDB url\n if (typeof process.env.COUCH_URL !== 'undefined') {\n opts.url = process.env.COUCH_URL;\n }\n\n // if we have a specified databases\n if (typeof process.env.COUCH_DATABASE !== 'undefined') {\n opts.db = process.env.COUCH_DATABASE;\n }\n\n // if we have a specified buffer size\n if (typeof process.env.COUCH_BUFFER_SIZE !== 'undefined') {\n opts.bufferSize = parseInt(process.env.COUCH_BUFFER_SIZE);\n }\n\n // if we have a specified parallelism\n if (typeof process.env.COUCH_PARALLELISM !== 'undefined') {\n opts.parallelism = parseInt(process.env.COUCH_PARALLELISM);\n }\n\n // if we have a specified request timeout\n if (typeof process.env.COUCH_REQUEST_TIMEOUT !== 'undefined') {\n opts.requestTimeout = parseInt(process.env.COUCH_REQUEST_TIMEOUT);\n }\n\n // if we have a specified log file\n if (typeof process.env.COUCH_LOG !== 'undefined') {\n opts.log = path.normalize(process.env.COUCH_LOG);\n }\n\n // if we are instructed to resume\n if (typeof process.env.COUCH_RESUME !== 'undefined' && process.env.COUCH_RESUME === 'true') {\n opts.resume = true;\n }\n\n // if we are given an output filename\n if (typeof process.env.COUCH_OUTPUT !== 'undefined') {\n opts.output = path.normalize(process.env.COUCH_OUTPUT);\n }\n\n // if we only want a shallow copy\n if (typeof process.env.COUCH_MODE !== 'undefined' && process.env.COUCH_MODE === 'shallow') {\n opts.mode = 'shallow';\n }\n\n // if we have a specified API key\n if (typeof process.env.CLOUDANT_IAM_API_KEY !== 'undefined') {\n opts.iamApiKey = process.env.CLOUDANT_IAM_API_KEY;\n }\n\n // if we have a specified IAM token endpoint\n if (typeof process.env.CLOUDANT_IAM_TOKEN_URL !== 'undefined') {\n opts.iamTokenUrl = process.env.CLOUDANT_IAM_TOKEN_URL;\n }\n}",
"private void setupWizard() {\n\t\tsetupComponents();\n\t\tlayoutComponents();\n\n\t\tsetMinimumSize(defaultminimumSize);\n\n\t\t// Center on screen\n\t\tDimension screenSize = Toolkit.getDefaultToolkit().getScreenSize();\n\t\tint xPosition = (screenSize.width / 2) - (defaultminimumSize.width / 2);\n\t\tint yPosition = (screenSize.height / 2) - (defaultminimumSize.height / 2);\n\t\tsetLocation(xPosition, yPosition);\n\n\t\tsetDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);\n\t}"
] |
[
0.6536383628845215,
0.6476923823356628,
0.6427945494651794,
0.6405804753303528,
0.6359236240386963,
0.6320371031761169,
0.6313691139221191,
0.6308788061141968,
0.6306089162826538,
0.6291554570198059,
0.6277564764022827,
0.6275478601455688
] |
Parses meta and update or create each application
:param str metadata: path or url to meta.yml
:param list[str] exclude: List of application names, to exclude from meta.
This might be need when you use meta as list of dependencies
|
def set_applications_from_meta(self, metadata, exclude=None):
"""
Parses meta and update or create each application
:param str metadata: path or url to meta.yml
:param list[str] exclude: List of application names, to exclude from meta.
This might be need when you use meta as list of dependencies
"""
if not exclude:
exclude = []
if metadata.startswith('http'):
meta = yaml.safe_load(requests.get(url=metadata).content)
else:
# noinspection PyArgumentEqualDefault
meta = yaml.safe_load(open(metadata, 'r').read())
applications = []
for app in meta['kit']['applications']:
if app['name'] not in exclude:
applications.append({
'name': app['name'],
'url': app['manifest']})
self.restore({'applications': applications})
|
[
"def update_application_metadata(template, application_id, sar_client=None):\n \"\"\"\n Update the application metadata.\n\n :param template: Content of a packaged YAML or JSON SAM template\n :type template: str_or_dict\n :param application_id: The Amazon Resource Name (ARN) of the application\n :type application_id: str\n :param sar_client: The boto3 client used to access SAR\n :type sar_client: boto3.client\n :raises ValueError\n \"\"\"\n if not template or not application_id:\n raise ValueError('Require SAM template and application ID to update application metadata')\n\n if not sar_client:\n sar_client = boto3.client('serverlessrepo')\n\n template_dict = _get_template_dict(template)\n app_metadata = get_app_metadata(template_dict)\n request = _update_application_request(app_metadata, application_id)\n sar_client.update_application(**request)",
"def process_metadata(meta):\n \"\"\"\n Merge metadata of run on multiple grid districts\n\n Parameters\n ----------\n meta: list of dict\n Metadata of run of each MV grid district\n\n Returns\n -------\n dict\n Single metadata dict including merge metadata\n \"\"\"\n mvgds = []\n\n metadata = meta[0]\n\n for mvgd in meta:\n if isinstance(mvgd['mv_grid_districts'], list):\n mvgds.extend(mvgd['mv_grid_districts'])\n else:\n mvgds.append(mvgd['mv_grid_districts'])\n\n metadata['mv_grid_districts'] = mvgds\n\n return metadata",
"def get_apps(exclude=(), append=(), current={'apps': INSTALLED_APPS}):\n \"\"\"\n Returns INSTALLED_APPS without the apps listed in exclude and with the apps\n listed in append.\n\n The use of a mutable dict is intentional, in order to preserve the state of\n the INSTALLED_APPS tuple across multiple settings files.\n \"\"\"\n\n current['apps'] = tuple(\n [a for a in current['apps'] if a not in exclude]\n ) + tuple(append)\n return current['apps']",
"def projects_from_metadata(metadata):\n \"\"\"Extract the project dependencies from a metadata spec.\"\"\"\n projects = []\n for data in metadata:\n meta = distlib.metadata.Metadata(fileobj=io.StringIO(data))\n projects.extend(pypi.just_name(project) for project in meta.run_requires)\n return frozenset(map(packaging.utils.canonicalize_name, projects))",
"def exclude(self, d, item):\n \"\"\" check metadata for excluded items \"\"\"\n try:\n md = d.__metadata__\n pmd = getattr(md, '__print__', None)\n if pmd is None:\n return False\n excludes = getattr(pmd, 'excludes', [])\n return ( item[0] in excludes ) \n except:\n pass\n return False",
"def apply_metadata_filters(user_filter, default_filter, actual_keys):\n \"\"\"Apply the filter and replace 'all' with the actual or filtered keys\"\"\"\n\n default_filter = metadata_filter_as_dict(default_filter) or {}\n user_filter = metadata_filter_as_dict(user_filter) or {}\n\n for key in ['additional', 'excluded']:\n default_filter.setdefault(key, [])\n user_filter.setdefault(key, [])\n\n if user_filter.get('excluded') == 'all':\n default_filter['additional'] = []\n if user_filter.get('additional') == 'all':\n default_filter['excluded'] = []\n\n # notebook default filter = only few metadata\n if default_filter.get('additional'):\n if user_filter.get('additional') == 'all':\n return actual_keys.difference(user_filter.get('excluded'))\n\n return (actual_keys\n .intersection(set(user_filter.get('additional')).union(default_filter.get('additional')))\n .difference(user_filter.get('excluded')))\n\n # cell default filter = all metadata but removed ones\n if user_filter.get('excluded') == 'all':\n return actual_keys.intersection(user_filter.get('additional'))\n\n return (actual_keys.difference(\n set(user_filter.get('excluded')).union(set(default_filter.get('excluded'))\n .difference(user_filter.get('additional')))))",
"def get_manifest_from_meta(metaurl, name):\n \"\"\"\n Extact manifest url from metadata url\n :param metaurl: Url to metadata\n :param name: Name of application to extract\n :return:\n \"\"\"\n if 'http' in metaurl:\n kit = yaml.safe_load(requests.get(url=metaurl).content)['kit']['applications']\n else:\n kit = yaml.safe_load(open(metaurl).read())['kit']['applications']\n app_urls = [x['manifest'] for x in kit if x['name'] == name]\n assert len(app_urls) == 1\n return app_urls[0]",
"def strip_app_metadata(template_dict):\n \"\"\"\n Strip the \"AWS::ServerlessRepo::Application\" metadata section from template.\n\n :param template_dict: SAM template as a dictionary\n :type template_dict: dict\n :return: stripped template content\n :rtype: str\n \"\"\"\n if SERVERLESS_REPO_APPLICATION not in template_dict.get(METADATA, {}):\n return template_dict\n\n template_dict_copy = copy.deepcopy(template_dict)\n\n # strip the whole metadata section if SERVERLESS_REPO_APPLICATION is the only key in it\n if not [k for k in template_dict_copy.get(METADATA) if k != SERVERLESS_REPO_APPLICATION]:\n template_dict_copy.pop(METADATA, None)\n else:\n template_dict_copy.get(METADATA).pop(SERVERLESS_REPO_APPLICATION, None)\n\n return template_dict_copy",
"def modify_meta(uid, data_dic, extinfo=None):\n '''\n update meta of the rec.\n '''\n if extinfo is None:\n extinfo = {}\n title = data_dic['title'].strip()\n if len(title) < 2:\n return False\n\n cur_info = MPost.get_by_uid(uid)\n if cur_info:\n # ToDo: should not do this. Not for 's'\n if DB_CFG['kind'] == 's':\n entry = TabPost.update(\n title=title,\n user_name=data_dic['user_name'],\n keywords='',\n time_update=tools.timestamp(),\n date=datetime.now(),\n cnt_md=data_dic['cnt_md'],\n memo=data_dic['memo'] if 'memo' in data_dic else '',\n logo=data_dic['logo'],\n order=data_dic['order'],\n cnt_html=tools.markdown2html(data_dic['cnt_md']),\n valid=data_dic['valid']\n ).where(TabPost.uid == uid)\n entry.execute()\n else:\n cur_extinfo = cur_info.extinfo\n # Update the extinfo, Not replace\n for key in extinfo:\n cur_extinfo[key] = extinfo[key]\n\n entry = TabPost.update(\n title=title,\n user_name=data_dic['user_name'],\n keywords='',\n time_update=tools.timestamp(),\n date=datetime.now(),\n cnt_md=data_dic['cnt_md'],\n memo=data_dic['memo'] if 'memo' in data_dic else '',\n logo=data_dic['logo'],\n order=data_dic['order'] if 'order' in data_dic else '',\n cnt_html=tools.markdown2html(data_dic['cnt_md']),\n extinfo=cur_extinfo,\n valid=data_dic['valid']\n ).where(TabPost.uid == uid)\n entry.execute()\n else:\n return MPost.add_meta(uid, data_dic, extinfo)\n return uid",
"def update_meta_data(meta=None):\n \"\"\"\n Modify the metadata dictionary.\n DATE, PROGRAM, and PROGVER are added/modified.\n\n Parameters\n ----------\n meta : dict\n The dictionary to be modified, default = None (empty)\n\n Returns\n -------\n An updated dictionary.\n \"\"\"\n if meta is None:\n meta = {}\n if 'DATE' not in meta:\n meta['DATE'] = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n if 'PROGRAM' not in meta:\n meta['PROGRAM'] = \"AegeanTools.catalogs\"\n meta['PROGVER'] = \"{0}-({1})\".format(__version__, __date__)\n return meta",
"def add_excludes(self, excludes):\n # type: (_BaseSourcePaths, list) -> None\n \"\"\"Add a list of excludes\n :param _BaseSourcePaths self: this\n :param list excludes: list of excludes\n \"\"\"\n if not isinstance(excludes, list):\n if isinstance(excludes, tuple):\n excludes = list(excludes)\n else:\n excludes = [excludes]\n # remove any starting rglob spec\n excl = []\n for exc in excludes:\n tmp = pathlib.Path(exc).parts\n if tmp[0] == '**':\n if len(tmp) == 1:\n continue\n else:\n excl.append(str(pathlib.Path(*tmp[1:])))\n else:\n excl.append(exc)\n # check for any remaining rglob specs\n if any(['**' in x for x in excl]):\n raise ValueError('invalid exclude specification containing \"**\"')\n if self._exclude is None:\n self._exclude = excl\n else:\n self._exclude.extend(excl)",
"function ignoredMetas(cfgs, metas){\n\tcfgs = cfgs || {};\n\tObject.keys(metas).forEach(function(name){\n\t\tvar cfg = metas[name];\n\t\tif(cfg && cfg.bundle === false) {\n\t\t\tcfgs[name] = cfg;\n\t\t}\n\t});\n\treturn { meta: cfgs };\n}"
] |
[
0.6643194556236267,
0.6590691804885864,
0.6588473916053772,
0.6521787047386169,
0.6484628319740295,
0.6427765488624573,
0.6378247141838074,
0.634354829788208,
0.6340119242668152,
0.6330169439315796,
0.6315110921859741,
0.6309704780578613
] |
Mimics get starter-kit and wizard functionality to create components
Note: may create component duplicates, not idempotent
:type metadata: str
:type category: Category
:param metadata: url to meta.yml
:param category: category
|
def upload_applications(self, metadata, category=None):
"""
Mimics get starter-kit and wizard functionality to create components
Note: may create component duplicates, not idempotent
:type metadata: str
:type category: Category
:param metadata: url to meta.yml
:param category: category
"""
upload_json = self._router.get_upload(params=dict(metadataUrl=metadata)).json()
manifests = [dict(name=app['name'], manifest=app['url']) for app in upload_json['applications']]
if not category:
category = self.categories['Application']
data = {'categoryId': category.id, 'applications': manifests}
self._router.post_application_kits(org_id=self.organizationId, data=json.dumps(data))
|
[
"def _to_add_with_category(self, catid):\n '''\n Used for info2.\n :param catid: the uid of category\n '''\n\n catinfo = MCategory.get_by_uid(catid)\n kwd = {\n 'uid': self._gen_uid(),\n 'userid': self.userinfo.user_name if self.userinfo else '',\n 'gcat0': catid,\n 'parentname': MCategory.get_by_uid(catinfo.pid).name,\n 'catname': MCategory.get_by_uid(catid).name,\n }\n\n self.render('autogen/add/add_{0}.html'.format(catid),\n userinfo=self.userinfo,\n kwd=kwd)",
"def create_child_item_for_category(category)\n category.child_items.find_or_create_by(categorizable_type: self.categorizable.class.base_class.name, categorizable_id: self.categorizable.id, master_item_id: self.id)\n end",
"def get_starter_kit_meta(name):\n \"\"\"\n Extract metadata link for starter kit from platform configs. Starter kit available on add component - starter kit menu.\n Beware, config could be changed by deploy scripts during deploy.\n :param name: Name of starter kit\n :return: Link to metadata\n \"\"\"\n kits = yaml.safe_load(requests.get(url=starter_kits_url).content)['kits']\n kits_meta_url = [x['metaUrl'] for x in kits if x['name'] == name]\n\n assert len(kits_meta_url)==1, \"No component %s found in meta:\\n %s\" % (name, kits)\n meta = yaml.safe_load(requests.get(url=kits_meta_url[0]).content)['download_url']\n return meta",
"def new_category(blog_id, username, password, category_struct):\n \"\"\"\n wp.newCategory(blog_id, username, password, category)\n => category_id\n \"\"\"\n authenticate(username, password, 'zinnia.add_category')\n category_dict = {'title': category_struct['name'],\n 'description': category_struct['description'],\n 'slug': category_struct['slug']}\n if int(category_struct['parent_id']):\n category_dict['parent'] = Category.objects.get(\n pk=category_struct['parent_id'])\n category = Category.objects.create(**category_dict)\n\n return category.pk",
"def create_category(self, category):\n \"\"\"CreateCategory.\n [Preview API]\n :param :class:`<ExtensionCategory> <azure.devops.v5_1.gallery.models.ExtensionCategory>` category:\n :rtype: :class:`<ExtensionCategory> <azure.devops.v5_1.gallery.models.ExtensionCategory>`\n \"\"\"\n content = self._serialize.body(category, 'ExtensionCategory')\n response = self._send(http_method='POST',\n location_id='476531a3-7024-4516-a76a-ed64d3008ad6',\n version='5.1-preview.1',\n content=content)\n return self._deserialize('ExtensionCategory', response)",
"def help_center_category_create(self, data, locale=None, **kwargs):\n \"https://developer.zendesk.com/rest_api/docs/help_center/categories#create-category\"\n api_path = \"/api/v2/help_center/categories.json\"\n if locale:\n api_opt_path = \"/api/v2/help_center/{locale}/categories.json\"\n api_path = api_opt_path.format(locale=locale)\n return self.call(api_path, method=\"POST\", data=data, **kwargs)",
"public static void mixin(MetaClass self, Class categoryClass) {\n mixin(self, Collections.singletonList(categoryClass));\n }",
"def create_meta_main(create_path, config, role, categories):\n \"\"\"\n Create a meta template.\n \"\"\"\n meta_file = c.DEFAULT_META_FILE.replace(\n \"%author_name\", config[\"author_name\"])\n meta_file = meta_file.replace(\n \"%author_company\", config[\"author_company\"])\n meta_file = meta_file.replace(\"%license_type\", config[\"license_type\"])\n meta_file = meta_file.replace(\"%role_name\", role)\n\n # Normalize the category so %categories always gets replaced.\n if not categories:\n categories = \"\"\n\n meta_file = meta_file.replace(\"%categories\", categories)\n\n string_to_file(create_path, meta_file)",
"def clone(self, **kw):\n \"\"\"Copy this distribution, substituting in any changed keyword args\"\"\"\n names = 'project_name version py_version platform location precedence'\n for attr in names.split():\n kw.setdefault(attr, getattr(self, attr, None))\n kw.setdefault('metadata', self._provider)\n return self.__class__(**kw)",
"def use_categories_as_metadata(self):\n '''\n Returns a TermDocMatrix which is identical to self except the metadata values are now identical to the\n categories present.\n\n :return: TermDocMatrix\n '''\n new_metadata_factory = CSRMatrixFactory()\n for i, category_idx in enumerate(self.get_category_ids()):\n new_metadata_factory[i, category_idx] = 1\n new_metadata = new_metadata_factory.get_csr_matrix()\n new_tdm = self._make_new_term_doc_matrix(self._X,\n new_metadata,\n self._y,\n self._term_idx_store,\n self._category_idx_store,\n copy(self._category_idx_store),\n self._y == self._y)\n return new_tdm",
"def _create_orchestrated_cat(self, foreign_catalog_id, db_name, cat_name):\n \"\"\"Creates a catalog in the current service orchestrated with a foreign service Id.\"\"\"\n if (foreign_catalog_id.identifier_namespace == db_name + '.' + cat_name and\n foreign_catalog_id.authority == self._authority):\n raise errors.NotFound() # This is not a foreign catalog\n foreign_service_name = foreign_catalog_id.get_identifier_namespace().split('.')[0]\n # foreign_cat_name = inflection.underscore(foreign_catalog_id.namespace.split('.')[1])\n # catalog_name = foreign_cat_name.lower()\n catalog_name = camel_to_under(foreign_catalog_id.namespace.split('.')[1])\n manager = self._get_provider_manager(foreign_service_name.upper())\n lookup_session = getattr(manager, 'get_{0}_lookup_session'.format(catalog_name))(proxy=self._proxy)\n getattr(lookup_session, 'get_{0}'.format(catalog_name))(foreign_catalog_id) # Raises NotFound\n collection = JSONClientValidated(db_name,\n collection=cat_name,\n runtime=self._runtime)\n foreign_identifier = ObjectId(foreign_catalog_id.get_identifier())\n default_text = 'Orchestrated ' + foreign_service_name\n catalog_map = make_catalog_map(cat_name, identifier=foreign_identifier, default_text=default_text)\n collection.insert_one(catalog_map)\n alias_id = Id(identifier=foreign_catalog_id.identifier,\n namespace=db_name + '.' + cat_name,\n authority=self._authority)\n try:\n admin_session = getattr(manager, 'get_{0}_admin_session'.format(catalog_name))(proxy=self._proxy)\n getattr(admin_session, 'alias_{0}'.format(catalog_name))(foreign_catalog_id, alias_id)\n except (errors.Unimplemented, AttributeError):\n pass\n return catalog_map",
"def create_device_from_category(self, plm, addr, cat, subcat,\n product_key=0x00):\n \"\"\"Create a new device from the cat, subcat and product_key data.\"\"\"\n saved_device = self._saved_devices.get(Address(addr).id, {})\n cat = saved_device.get('cat', cat)\n subcat = saved_device.get('subcat', subcat)\n product_key = saved_device.get('product_key', product_key)\n\n device_override = self._overrides.get(Address(addr).id, {})\n cat = device_override.get('cat', cat)\n subcat = device_override.get('subcat', subcat)\n product_key = device_override.get('firmware', product_key)\n product_key = device_override.get('product_key', product_key)\n\n return insteonplm.devices.create(plm, addr, cat, subcat, product_key)"
] |
[
0.6669116020202637,
0.6542688608169556,
0.6495332717895508,
0.6467118263244629,
0.6392396092414856,
0.6365825533866882,
0.6346891522407532,
0.6271485090255737,
0.6263657212257385,
0.6260377168655396,
0.6258659958839417,
0.625115692615509
] |
Commits and leaves transaction management.
|
def process_response(self, request, response):
"""Commits and leaves transaction management."""
if tldap.transaction.is_managed():
tldap.transaction.commit()
tldap.transaction.leave_transaction_management()
return response
|
[
"def leave_transaction_management(self) -> None:\n \"\"\"\n End a transaction. Must not be dirty when doing so. ie. commit() or\n rollback() must be called if changes made. If dirty, changes will be\n discarded.\n \"\"\"\n if len(self._transactions) == 0:\n raise RuntimeError(\"leave_transaction_management called outside transaction\")\n elif len(self._transactions[-1]) > 0:\n raise RuntimeError(\"leave_transaction_management called with uncommited rollbacks\")\n else:\n self._transactions.pop()",
"@Override\n public void commit() throws DataCorruptedException, IOException, IllegalStateException{\n try {\n tranManager.commit(t1);\n } catch (RollbackException e) {\n rollback();\n throw new IOException(\"Error trying to commit transaction.\", e);\n } catch (TransactionException e) {\n rollback();\n throw new IOException(\"Error trying to commit transaction.\", e);\n }\n }",
"@Override\n\tpublic void commit() throws TransactionManagementException {\n\t\tlogger.entering(CLASSNAME, \"commit\");\n\t\ttry {\n\t\t\tuserTran.commit();\n\t\t\tlogger.log(Level.FINE, \"javax.transaction.Status: {0}\", userTran.getStatus());\n\t\t} catch (SecurityException e) {\n\t\t\tthrow new TransactionManagementException(e);\n\t\t} catch (IllegalStateException e) {\n\t\t\tthrow new TransactionManagementException(e);\n\t\t} catch (RollbackException e) {\n\t\t\tthrow new TransactionManagementException(e);\n\t\t} catch (HeuristicMixedException e) {\n\t\t\tthrow new TransactionManagementException(e);\n\t\t} catch (HeuristicRollbackException e) {\n\t\t\tthrow new TransactionManagementException(e);\n\t\t} catch (SystemException e) {\n\t\t\tthrow new TransactionManagementException(e);\n\t\t}\n\t\tlogger.exiting(CLASSNAME, \"commit\");\n\t}",
"private void leaveTransaction(EntityManager em, Object transaction) {\n if( isJTA ) { \n try { \n if( transaction != null ) { \n // There's a tx running, close it.\n ((UserTransaction) transaction).commit();\n }\n } catch(Exception e) { \n logger.error(\"Unable to commit transaction: \", e);\n }\n } else { \n if( transaction != null ) { \n ((EntityTransaction) transaction).commit();\n }\n }\n \n\n if (!sharedEM) {\n try { \n em.flush();\n em.close(); \n } catch( Exception e ) { \n logger.error(\"Unable to close created EntityManager: {}\", e.getMessage(), e);\n }\n }\n }",
"@Override\n public void commit()\n throws HeuristicMixedException,\n HeuristicRollbackException,\n RollbackException,\n SystemException\n {\n try {\n if (!Context.isTMNoTransaction()) {\n if (Context.isTMActive()) {\n Context.commit();\n } else {\n Context.rollback();\n }\n }\n } catch (final EFapsException e) {\n throw new SystemException(e.getMessage());\n }\n }",
"@Override\n\tpublic void commit() {\n\t\tOptional<LocalTransactionContext> txContext = currentTxContext();\n\t\tif (txContext.isPresent()) {\n\t\t\ttxContext.get().commit();\n\t\t} else {\n\t\t\tthis.unmanagedTransaction.ifPresent(LocalTransactionContext::commit);\n\t\t}\n\t}",
"def commit(self) -> None:\n \"\"\"\n Attempt to commit all changes to LDAP database. i.e. forget all\n rollbacks. However stay inside transaction management.\n \"\"\"\n if len(self._transactions) == 0:\n raise RuntimeError(\"commit called outside transaction\")\n\n # If we have nested transactions, we don't actually commit, but push\n # rollbacks up to previous transaction.\n if len(self._transactions) > 1:\n for on_rollback in reversed(self._transactions[-1]):\n self._transactions[-2].insert(0, on_rollback)\n\n _debug(\"commit\")\n self.reset()",
"def leave_transaction_management(using=None):\n \"\"\"\n Leaves transaction management for a running thread. A dirty flag is carried\n over to the surrounding block, as a commit will commit all changes, even\n those from outside. (Commits are on connection level.)\n \"\"\"\n if using is None:\n for using in tldap.backend.connections:\n connection = tldap.backend.connections[using]\n connection.leave_transaction_management()\n return\n connection = tldap.backend.connections[using]\n connection.leave_transaction_management()",
"@Override\n public void commit()\n {\n if (!getRollbackOnly())\n {\n onTransaction(TxAction.COMMIT);\n ((EntityManagerImpl) entityManager).getPersistenceDelegator().commit();\n }\n }",
"@Override\n public void commit() throws RollbackException, HeuristicMixedException, HeuristicRollbackException,\n SecurityException, IllegalStateException, SystemException\n {\n if (!setRollBackOnly)\n {\n for (ResourceManager implementor : implementors)\n {\n if (implementor != null)\n {\n implementor.doCommit();\n }\n }\n status = Status.STATUS_COMMITTED;\n }\n else\n {\n if (log.isDebugEnabled())\n log.debug(\"Transaction is set for rollback only, processing rollback.\");\n\n for (ResourceManager implementor : implementors)\n {\n if (implementor != null)\n {\n implementor.doRollback();\n status = Status.STATUS_ROLLEDBACK;\n }\n }\n }\n\n }",
"public final void commit() throws IllegalStateException, RepositoryException\n {\n checkIfOpened();\n try\n {\n closeStatements();\n\n if (!readOnly)\n {\n try\n {\n for (ValueIOChannel vo : valueChanges)\n {\n vo.twoPhaseCommit();\n }\n }\n catch (IOException e)\n {\n throw new RepositoryException(e);\n }\n finally\n {\n valueChanges.clear();\n }\n if (getDbConnectionTotalUsed() == 1)\n {\n // We don't commit as long as it is used\n dbConnection.commit();\n }\n }\n }\n catch (SQLException e)\n {\n throw new RepositoryException(e);\n }\n finally\n {\n try\n {\n if (release() == 0)\n {\n // We don't close the connection as long as it is used\n dbConnection.close();\n }\n }\n catch (SQLException e)\n {\n if (LOG.isWarnEnabled())\n {\n LOG.warn(\"Could not close the connection\", e);\n }\n }\n }\n }",
"public void commit()\r\n {\r\n checkOpen();\r\n try\r\n {\r\n prepareCommit();\r\n checkForCommit();\r\n\r\n txStatus = Status.STATUS_COMMITTING;\r\n if (log.isDebugEnabled()) log.debug(\"Commit transaction \" + this);\r\n // now do real commit on broker\r\n if(hasBroker()) getBroker().commitTransaction();\r\n\r\n // Now, we notify everything the commit is done.\r\n performTransactionAwareAfterCommit();\r\n\r\n doClose();\r\n txStatus = Status.STATUS_COMMITTED;\r\n }\r\n catch(Exception ex)\r\n {\r\n log.error(\"Error while commit objects, do abort tx \" + this + \", \" + ex.getMessage(), ex);\r\n txStatus = Status.STATUS_MARKED_ROLLBACK;\r\n abort();\r\n if(!(ex instanceof ODMGRuntimeException))\r\n {\r\n throw new TransactionAbortedExceptionOJB(\"Can't commit objects: \" + ex.getMessage(), ex);\r\n }\r\n else\r\n {\r\n throw (ODMGRuntimeException) ex;\r\n }\r\n }\r\n }"
] |
[
0.7870027422904968,
0.7715346813201904,
0.7684056758880615,
0.7661560773849487,
0.7623730301856995,
0.7611523866653442,
0.759559690952301,
0.75840824842453,
0.7467091083526611,
0.7364818453788757,
0.7344239950180054,
0.7322505712509155
] |
Format a report as per InfluxDB line protocol
:param name: name of the report
:param tags: tags identifying the specific report
:param fields: measurements of the report
:param timestamp: when the measurement was taken, in **seconds** since the epoch
|
def line_protocol(name, tags: dict = None, fields: dict = None, timestamp: float = None) -> str:
"""
Format a report as per InfluxDB line protocol
:param name: name of the report
:param tags: tags identifying the specific report
:param fields: measurements of the report
:param timestamp: when the measurement was taken, in **seconds** since the epoch
"""
output_str = name
if tags:
output_str += ','
output_str += ','.join('%s=%s' % (key, value) for key, value in sorted(tags.items()))
output_str += ' '
output_str += ','.join(('%s=%r' % (key, value)).replace("'", '"') for key, value in sorted(fields.items()))
if timestamp is not None:
# line protocol requires nanosecond precision, python uses seconds
output_str += ' %d' % (timestamp * 1E9)
return output_str + '\n'
|
[
"def metric(self, measurement_name, values, tags=None, timestamp=None):\n \"\"\"\n Append global tags configured for the client to the tags given then\n converts the data into InfluxDB Line protocol and sends to to socket\n \"\"\"\n if not measurement_name or values in (None, {}):\n # Don't try to send empty data\n return\n\n tags = tags or {}\n\n # Do a shallow merge of the metric tags and global tags\n all_tags = dict(self.tags, **tags)\n\n # Create a metric line from the input and then send it to socket\n line = Line(measurement_name, values, all_tags, timestamp)\n self.send(line.to_line_protocol())",
"def _tag_and_field_maker(self, event):\n '''\n >>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',\n ... 'chimichanga', 'logs', 'collection')\n >>> log = {u'data': {u'_': {u'file': u'log.py',\n ... u'fn': u'start',\n ... u'ln': 8,\n ... u'name': u'__main__'},\n ... u'a': 1,\n ... u'b': 2,\n ... u'__ignore_this': 'some_string',\n ... u'msg': u'this is a dummy log'},\n ... u'error': False,\n ... u'error_tb': u'',\n ... u'event': u'some_log',\n ... u'file': u'/var/log/sample.log',\n ... u'formatter': u'logagg.formatters.basescript',\n ... u'host': u'deepcompute',\n ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',\n ... u'level': u'info',\n ... u'raw': u'{...}',\n ... u'timestamp': u'2018-04-09T09:59:24.733945Z',\n ... u'type': u'metric'}\n\n >>> tags, fields = idbf._tag_and_field_maker(log)\n >>> from pprint import pprint\n >>> pprint(tags)\n {u'data.msg': u'this is a dummy log',\n u'error_tb': u'',\n u'file': u'/var/log/sample.log',\n u'formatter': u'logagg.formatters.basescript',\n u'host': u'deepcompute',\n u'level': u'info'}\n >>> pprint(fields)\n {u'data._': \"{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}\",\n u'data.a': 1,\n u'data.b': 2}\n\n '''\n data = event.pop('data')\n data = flatten_dict({'data': data})\n\n t = dict((k, event[k]) for k in event if k not in self.EXCLUDE_TAGS)\n f = dict()\n\n for k in data:\n v = data[k]\n\n if is_number(v) or isinstance(v, MarkValue):\n f[k] = v\n else:\n #if v.startswith('_'): f[k] = eval(v.split('_', 1)[1])\n t[k] = v\n\n return t, f",
"def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes:\n \"\"\"Converts a Pandas DataFrame into line protocol format\"\"\"\n # Pre-processing\n if measurement is None:\n raise ValueError(\"Missing 'measurement'\")\n if not isinstance(df.index, pd.DatetimeIndex):\n raise ValueError('DataFrame index is not DatetimeIndex')\n tag_columns = set(tag_columns or [])\n isnull = df.isnull().any(axis=1)\n\n # Make parser function\n tags = []\n fields = []\n for k, v in extra_tags.items():\n tags.append(f\"{k}={escape(v, key_escape)}\")\n for i, (k, v) in enumerate(df.dtypes.items()):\n k = k.translate(key_escape)\n if k in tag_columns:\n tags.append(f\"{k}={{p[{i+1}]}}\")\n elif issubclass(v.type, np.integer):\n fields.append(f\"{k}={{p[{i+1}]}}i\")\n elif issubclass(v.type, (np.float, np.bool_)):\n fields.append(f\"{k}={{p[{i+1}]}}\")\n else:\n # String escaping is skipped for performance reasons\n # Strings containing double-quotes can cause strange write errors\n # and should be sanitized by the user.\n # e.g., df[k] = df[k].astype('str').str.translate(str_escape)\n fields.append(f\"{k}=\\\"{{p[{i+1}]}}\\\"\")\n fmt = (f'{measurement}', f'{\",\" if tags else \"\"}', ','.join(tags),\n ' ', ','.join(fields), ' {p[0].value}')\n f = eval(\"lambda p: f'{}'\".format(''.join(fmt)))\n\n # Map/concat\n if isnull.any():\n lp = map(f, _itertuples(df[~isnull]))\n rep = _replace(df)\n lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p))\n for p in _itertuples(df[isnull]))\n return '\\n'.join(chain(lp, lp_nan)).encode('utf-8')\n else:\n return '\\n'.join(map(f, _itertuples(df))).encode('utf-8')",
"def marshall(self):\n \"\"\"Return the measurement in the line protocol format.\n\n :rtype: str\n\n \"\"\"\n return '{},{} {} {}'.format(\n self._escape(self.name),\n ','.join(['{}={}'.format(self._escape(k), self._escape(v))\n for k, v in self.tags.items()]),\n self._marshall_fields(),\n int(self.timestamp * 1000))",
"def write(name, values, tags={}, timestamp=None, database=None):\n \"\"\" Method to be called via threading module. \"\"\"\n point = {\n 'measurement': name,\n 'tags': tags,\n 'fields': values\n }\n if isinstance(timestamp, datetime):\n timestamp = timestamp.strftime('%Y-%m-%dT%H:%M:%SZ')\n if timestamp:\n point['time'] = timestamp\n try:\n get_db().write({'points': [point]},\n {'db': database or settings.INFLUXDB_DATABASE})\n except Exception, e:\n if settings.INFLUXDB_FAIL_SILENTLY:\n pass\n else:\n raise e",
"def _normalize(self, name, columns, points):\n \"\"\"Normalize data for the InfluxDB's data model.\"\"\"\n\n for i, _ in enumerate(points):\n # Supported type:\n # https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_reference/\n if points[i] is None:\n # Ignore points with None value\n del(points[i])\n del(columns[i])\n continue\n try:\n points[i] = float(points[i])\n except (TypeError, ValueError):\n pass\n else:\n continue\n try:\n points[i] = str(points[i])\n except (TypeError, ValueError):\n pass\n else:\n continue\n\n return [{'measurement': name,\n 'tags': self.parse_tags(self.tags),\n 'fields': dict(zip(columns, points))}]",
"def make_lines(data, precision=None):\n \"\"\"Extract points from given dict.\n\n Extracts the points from the given dict and returns a Unicode string\n matching the line protocol introduced in InfluxDB 0.9.0.\n \"\"\"\n lines = []\n static_tags = data.get('tags')\n for point in data['points']:\n elements = []\n\n # add measurement name\n measurement = _escape_tag(_get_unicode(\n point.get('measurement', data.get('measurement'))))\n key_values = [measurement]\n\n # add tags\n if static_tags:\n tags = dict(static_tags) # make a copy, since we'll modify\n tags.update(point.get('tags') or {})\n else:\n tags = point.get('tags') or {}\n\n # tags should be sorted client-side to take load off server\n for tag_key, tag_value in sorted(iteritems(tags)):\n key = _escape_tag(tag_key)\n value = _escape_tag_value(tag_value)\n\n if key != '' and value != '':\n key_values.append(key + \"=\" + value)\n\n elements.append(','.join(key_values))\n\n # add fields\n field_values = []\n for field_key, field_value in sorted(iteritems(point['fields'])):\n key = _escape_tag(field_key)\n value = _escape_value(field_value)\n\n if key != '' and value != '':\n field_values.append(key + \"=\" + value)\n\n elements.append(','.join(field_values))\n\n # add timestamp\n if 'time' in point:\n timestamp = _get_unicode(str(int(\n _convert_timestamp(point['time'], precision))))\n elements.append(timestamp)\n\n line = ' '.join(elements)\n lines.append(line)\n\n return '\\n'.join(lines) + '\\n'",
"def log(self, name, val, **tags):\n \"\"\"Log metric name with value val. You must include at least one tag as a kwarg\"\"\"\n global _last_timestamp, _last_metrics\n\n # do not allow .log after closing\n assert not self.done.is_set(), \"worker thread has been closed\"\n # check if valid metric name\n assert all(c in _valid_metric_chars for c in name), \"invalid metric name \" + name\n\n val = float(val) #Duck type to float/int, if possible.\n if int(val) == val:\n val = int(val)\n\n if self.host_tag and 'host' not in tags:\n tags['host'] = self.host_tag\n\n # get timestamp from system time, unless it's supplied as a tag\n timestamp = int(tags.pop('timestamp', time.time()))\n\n assert not self.done.is_set(), \"tsdb object has been closed\"\n assert tags != {}, \"Need at least one tag\"\n\n tagvals = ' '.join(['%s=%s' % (k, v) for k, v in tags.items()])\n\n # OpenTSDB has major problems if you insert a data point with the same\n # metric, timestamp and tags. So we keep a temporary set of what points\n # we have sent for the last timestamp value. If we encounter a duplicate,\n # it is dropped.\n unique_str = \"%s, %s, %s, %s, %s\" % (name, timestamp, tagvals, self.host, self.port)\n if timestamp == _last_timestamp or _last_timestamp == None:\n if unique_str in _last_metrics:\n return # discard duplicate metrics\n else:\n _last_metrics.add(unique_str)\n else:\n _last_timestamp = timestamp\n _last_metrics.clear()\n\n line = \"put %s %d %s %s\\n\" % (name, timestamp, val, tagvals)\n\n try:\n self.q.put(line, False)\n self.queued += 1\n except queue.Full:\n print(\"potsdb - Warning: dropping oldest metric because Queue is full. Size: %s\" % self.q.qsize(), file=sys.stderr)\n self.q.get() #Drop the oldest metric to make room\n self.q.put(line, False)\n return line",
"def record(self, tags, measurement_map, timestamp, attachments=None):\n \"\"\"records stats with a set of tags\"\"\"\n assert all(vv >= 0 for vv in measurement_map.values())\n for measure, value in measurement_map.items():\n if measure != self._registered_measures.get(measure.name):\n return\n view_datas = []\n for measure_name, view_data_list \\\n in self._measure_to_view_data_list_map.items():\n if measure_name == measure.name:\n view_datas.extend(view_data_list)\n for view_data in view_datas:\n view_data.record(\n context=tags, value=value, timestamp=timestamp,\n attachments=attachments)\n self.export(view_datas)",
"def write_summaries(self, tagged_data, experiment_name, run_name):\n \"\"\"Transactionally writes the given tagged summary data to the DB.\n\n Args:\n tagged_data: map from tag to TagData instances.\n experiment_name: name of experiment.\n run_name: name of run.\n \"\"\"\n logger.debug('Writing summaries for %s tags', len(tagged_data))\n # Connection used as context manager for auto commit/rollback on exit.\n # We still need an explicit BEGIN, because it doesn't do one on enter,\n # it waits until the first DML command - which is totally broken.\n # See: https://stackoverflow.com/a/44448465/1179226\n with self._db:\n self._db.execute('BEGIN TRANSACTION')\n run_id = self._maybe_init_run(experiment_name, run_name)\n tag_to_metadata = {\n tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data)\n }\n tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata)\n tensor_values = []\n for tag, tagdata in six.iteritems(tagged_data):\n tag_id = tag_to_id[tag]\n for step, wall_time, tensor_proto in tagdata.values:\n dtype = tensor_proto.dtype\n shape = ','.join(str(d.size) for d in tensor_proto.tensor_shape.dim)\n # Use tensor_proto.tensor_content if it's set, to skip relatively\n # expensive extraction into intermediate ndarray.\n data = self._make_blob(\n tensor_proto.tensor_content or\n tensor_util.make_ndarray(tensor_proto).tobytes())\n tensor_values.append((tag_id, step, wall_time, dtype, shape, data))\n self._db.executemany(\n \"\"\"\n INSERT OR REPLACE INTO Tensors (\n series, step, computed_time, dtype, shape, data\n ) VALUES (?, ?, ?, ?, ?, ?)\n \"\"\",\n tensor_values)",
"def _format_line(headers, fields):\n \"\"\"Format a line of a table.\n\n Arguments:\n headers: A list of strings that are used as the table headers.\n fields: A list of the same length as `headers` where `fields[i]` is\n the entry for `headers[i]` in this row. Elements can be of\n arbitrary types. Pass `headers` to print the header row.\n\n Returns:\n A pretty string.\n \"\"\"\n assert len(fields) == len(headers), (fields, headers)\n fields = [\"%2.4f\" % field if isinstance(field, float) else str(field)\n for field in fields]\n return ' '.join(' ' * max(0, len(header) - len(field)) + field\n for (header, field) in zip(headers, fields))",
"def format(self, filename, line, timestamp, **kwargs):\n \"\"\"Returns a formatted log line\"\"\"\n line = unicode(line.encode(\"utf-8\"), \"utf-8\", errors=\"ignore\")\n formatter = self._beaver_config.get_field('format', filename)\n if formatter not in self._formatters:\n formatter = self._default_formatter\n\n data = {\n self._fields.get('type'): kwargs.get('type'),\n self._fields.get('tags'): kwargs.get('tags'),\n '@timestamp': timestamp,\n self._fields.get('host'): self._current_host,\n self._fields.get('file'): filename,\n self._fields.get('message'): line\n }\n\n if self._logstash_version == 0:\n data['@source'] = 'file://{0}'.format(filename)\n data['@fields'] = kwargs.get('fields')\n else:\n data['@version'] = self._logstash_version\n fields = kwargs.get('fields')\n for key in fields:\n data[key] = fields.get(key)\n\n return self._formatters[formatter](data)"
] |
[
0.7091605067253113,
0.7001076340675354,
0.6801954507827759,
0.6764403581619263,
0.6736587285995483,
0.6688472628593445,
0.6525717973709106,
0.6519296765327454,
0.6441308856010437,
0.6438611745834351,
0.6385225057601929,
0.6380767822265625
] |
This gets display on the block header.
|
def block_type(self):
""" This gets display on the block header. """
return capfirst(force_text(
self.content_block.content_type.model_class()._meta.verbose_name
))
|
[
"def header(self):\n \"\"\"\n Returns the header of the block\n \"\"\"\n if self._block_header is None:\n self._block_header = BlockHeader()\n self._block_header.ParseFromString(self.block.header)\n return self._block_header",
"function(showHeader){\n\t\t\tthis.options.showHeader = showHeader;\n\t\t\tif(showHeader){\n\t\t\t\t$('#' + this.options.id + '_header').css('display',\"block\");\n\t\t\t}else{\n\t\t\t\t$('#' + this.options.id + '_header').css('display',\"none\");\n\t\t\t}\n\t\t}",
"def to_s\n description = []\n if block_name\n block_name = self.block_name.to_s\n if block_name.include?(\" \")\n block_name = \":\\\"#{block_name}\\\"\"\n else\n block_name = \":#{block_name}\"\n end\n description << \"Block Name: #{block_name}\"\n end\n\n if render_item.is_a?(String)\n description << \"Renders with partial \\\"#{render_item}\\\"\"\n elsif render_item.is_a?(Proc)\n description << \"Renders with block defined at #{render_item.source_location}\"\n end\n\n\n CONTROL_VARIABLES.each do |control_variable, *|\n if value = send(control_variable)\n description << \"#{control_variable}: #{value} [#{callers[control_variable]}]\"\n end\n end\n\n description << super\n description.join(\"\\n\")\n end",
"def Header(self):\n \"\"\"\n Get the block header.\n\n Returns:\n neo.Core.Header:\n \"\"\"\n if not self._header:\n self._header = Header(self.PrevHash, self.MerkleRoot, self.Timestamp,\n self.Index, self.ConsensusData, self.NextConsensus, self.Script)\n\n return self._header",
"def header(self):\n \"\"\" Show summary header. \"\"\"\n # Show question mark instead of count when errors encountered\n count = \"? (error encountered)\" if self._error else len(self.stats)\n utils.item(\"{0}: {1}\".format(self.name, count), options=self.options)",
"def format_blocks_section(self):\n \"\"\"format blocks section.\n assign_vertexid() should be called before this method, because\n vertices reffered by blocks should have valid index.\n \"\"\"\n buf = io.StringIO()\n buf.write('blocks\\n')\n buf.write('(\\n')\n for b in self.blocks.values():\n buf.write(' ' + b.format(self.vertices) + '\\n')\n buf.write(');')\n return buf.getvalue()",
"def __display_header(self, stat_display):\n \"\"\"Display the firsts lines (header) in the Curses interface.\n\n system + ip + uptime\n (cloud)\n \"\"\"\n # First line\n self.new_line()\n self.space_between_column = 0\n l_uptime = (self.get_stats_display_width(stat_display[\"system\"]) +\n self.get_stats_display_width(stat_display[\"ip\"]) +\n self.get_stats_display_width(stat_display[\"uptime\"]) + 1)\n self.display_plugin(\n stat_display[\"system\"],\n display_optional=(self.screen.getmaxyx()[1] >= l_uptime))\n self.space_between_column = 3\n self.new_column()\n self.display_plugin(stat_display[\"ip\"])\n self.new_column()\n self.display_plugin(\n stat_display[\"uptime\"],\n add_space=-(self.get_stats_display_width(stat_display[\"cloud\"]) != 0))\n # Second line (optional)\n self.init_column()\n self.new_line()\n self.display_plugin(stat_display[\"cloud\"])",
"def show(self):\n \"\"\" Display indented statistics. \"\"\"\n if not self._error and not self.stats:\n return\n self.header()\n for stat in self.stats:\n utils.item(stat, level=1, options=self.options)",
"private void printBlockMasterInfo() throws IOException {\n Set<BlockMasterInfoField> blockMasterInfoFilter = new HashSet<>(Arrays\n .asList(BlockMasterInfoField.LIVE_WORKER_NUM, BlockMasterInfoField.LOST_WORKER_NUM,\n BlockMasterInfoField.CAPACITY_BYTES, BlockMasterInfoField.USED_BYTES,\n BlockMasterInfoField.FREE_BYTES, BlockMasterInfoField.CAPACITY_BYTES_ON_TIERS,\n BlockMasterInfoField.USED_BYTES_ON_TIERS));\n BlockMasterInfo blockMasterInfo = mBlockMasterClient.getBlockMasterInfo(blockMasterInfoFilter);\n\n print(\"Live Workers: \" + blockMasterInfo.getLiveWorkerNum());\n print(\"Lost Workers: \" + blockMasterInfo.getLostWorkerNum());\n\n print(\"Total Capacity: \"\n + FormatUtils.getSizeFromBytes(blockMasterInfo.getCapacityBytes()));\n\n mIndentationLevel++;\n Map<String, Long> totalCapacityOnTiers = new TreeMap<>((a, b)\n -> (FileSystemAdminShellUtils.compareTierNames(a, b)));\n totalCapacityOnTiers.putAll(blockMasterInfo.getCapacityBytesOnTiers());\n for (Map.Entry<String, Long> capacityBytesTier : totalCapacityOnTiers.entrySet()) {\n print(\"Tier: \" + capacityBytesTier.getKey()\n + \" Size: \" + FormatUtils.getSizeFromBytes(capacityBytesTier.getValue()));\n }\n\n mIndentationLevel--;\n print(\"Used Capacity: \"\n + FormatUtils.getSizeFromBytes(blockMasterInfo.getUsedBytes()));\n\n mIndentationLevel++;\n Map<String, Long> usedCapacityOnTiers = new TreeMap<>((a, b)\n -> (FileSystemAdminShellUtils.compareTierNames(a, b)));\n usedCapacityOnTiers.putAll(blockMasterInfo.getUsedBytesOnTiers());\n for (Map.Entry<String, Long> usedBytesTier: usedCapacityOnTiers.entrySet()) {\n print(\"Tier: \" + usedBytesTier.getKey()\n + \" Size: \" + FormatUtils.getSizeFromBytes(usedBytesTier.getValue()));\n }\n\n mIndentationLevel--;\n print(\"Free Capacity: \"\n + FormatUtils.getSizeFromBytes(blockMasterInfo.getFreeBytes()));\n }",
"def _get_head_block(self):\n \"\"\"Get head block header.\n\n :return:\n \"\"\"\n if not self.head_block_header:\n block_hash = self.db.get(head_header_key)\n num = self._get_block_number(block_hash)\n self.head_block_header = self._get_block_header(block_hash, num)\n # find header with valid state\n while (\n not self.db.get(self.head_block_header.state_root)\n and self.head_block_header.prevhash is not None\n ):\n block_hash = self.head_block_header.prevhash\n num = self._get_block_number(block_hash)\n self.head_block_header = self._get_block_header(block_hash, num)\n\n return self.head_block_header",
"def get_block_info(self, block):\n \"\"\"\n Args:\n block: block number (eg: 223212)\n block hash (eg: 0000000000000000210b10d620600dc1cc2380bb58eb2408f9767eb792ed31fa)\n word \"last\" - this will always return the latest block\n word \"first\" - this will always return the first block\n Returns:\n basic block data\n\n \"\"\"\n url = '{}/block/info/{}'.format(self._url, block)\n return self.make_request(url)",
"String showDetail() {\n if (isDefault() || isEmpty()) {\n return \"\";\n } else {\n return entries.stream()\n .map(sue -> \"---- \" + sue.name\n + (sue.timeStamp.isEmpty()\n ? \"\"\n : \" @ \" + sue.timeStamp)\n + \" ----\\n\" + sue.content)\n .collect(joining());\n }\n }"
] |
[
0.7495452761650085,
0.7194896936416626,
0.7102897763252258,
0.7064126133918762,
0.7062720060348511,
0.701840877532959,
0.7010632753372192,
0.6981439590454102,
0.6946882605552673,
0.6943727135658264,
0.6923321485519409,
0.6914297342300415
] |
Return a list of column default block tuples (URL, verbose name).
Used for quick add block buttons.
|
def get_default_blocks(self, top=False):
"""
Return a list of column default block tuples (URL, verbose name).
Used for quick add block buttons.
"""
default_blocks = []
for block_model, block_name in self.glitter_page.default_blocks:
block = apps.get_model(block_model)
base_url = reverse('block_admin:{}_{}_add'.format(
block._meta.app_label, block._meta.model_name,
), kwargs={
'version_id': self.glitter_page.version.id,
})
block_qs = {
'column': self.name,
'top': top,
}
block_url = '{}?{}'.format(base_url, urlencode(block_qs))
block_text = capfirst(force_text(block._meta.verbose_name))
default_blocks.append((block_url, block_text))
return default_blocks
|
[
"def default_blocks(self):\n \"\"\"\n Return a list of default block tuples (appname.ModelName, verbose name).\n\n Next to the dropdown list of block types, a small number of common blocks which are\n frequently used can be added immediately to a column with one click. This method defines\n the list of default blocks.\n \"\"\"\n # Use the block list provided by settings if it's defined\n block_list = getattr(settings, 'GLITTER_DEFAULT_BLOCKS', None)\n\n if block_list is not None:\n return block_list\n\n # Try and auto fill in default blocks if the apps are installed\n block_list = []\n\n for block in GLITTER_FALLBACK_BLOCKS:\n app_name, model_name = block.split('.')\n\n try:\n model_class = apps.get_model(app_name, model_name)\n verbose_name = capfirst(model_class._meta.verbose_name)\n block_list.append((block, verbose_name))\n except LookupError:\n # Block isn't installed - don't add it as a quick add default\n pass\n\n return block_list",
"def add_block_options(self, top):\n \"\"\"\n Return a list of URLs and titles for blocks which can be added to this column.\n\n All available blocks are grouped by block category.\n \"\"\"\n from .blockadmin import blocks\n\n block_choices = []\n\n # Group all block by category\n for category in sorted(blocks.site.block_list):\n category_blocks = blocks.site.block_list[category]\n category_choices = []\n\n for block in category_blocks:\n base_url = reverse('block_admin:{}_{}_add'.format(\n block._meta.app_label, block._meta.model_name,\n ), kwargs={\n 'version_id': self.glitter_page.version.id,\n })\n block_qs = {\n 'column': self.name,\n 'top': top,\n }\n block_url = '{}?{}'.format(base_url, urlencode(block_qs))\n block_text = capfirst(force_text(block._meta.verbose_name))\n\n category_choices.append((block_url, block_text))\n\n category_choices = sorted(category_choices, key=lambda x: x[1])\n block_choices.append((category, category_choices))\n\n return block_choices",
"def defaults(self):\n \"\"\" component default component\n\n .. Note:: default components is just an indication for user and the\n views, except if the Block is required. If required then default is\n selected if nothing explisitely selected.\n \"\"\"\n default = self._defaults\n # if require and no default, the first component as default\n if not len(default) and self.required and len(self._components):\n default = [six.next(six.itervalues(self._components)).name]\n return default",
"def all_blocks(self):\n\n status = OrderedDict.fromkeys(parameters.BLOCKS.keys())\n status['13AE'] = ['discovery complete', '50', '24.05']\n status['13AO'] = ['discovery complete', '36', '24.40']\n status['13BL'] = ['discovery complete', '79', '24.48']\n status['14BH'] = ['discovery running', '-', '-']\n status['15AP'] = ['discovery running', '-', '-']\n status['15AM'] = ['discovery running', '-', '-']\n\n '''Overview tal table is expecting:\n ID observations processing status discoveries m_r 40%\n '''\n bks = []\n for block in status.iterkeys():\n bk = [block, self.num_block_images(block)] # if set in the .fromkeys(), doesn't give a unique list\n if status[block] is not None:\n bk = bk + status[block]\n else:\n bk = bk + ['awaiting triplets', '-', '-']\n bks.append(bk)\n\n retval = {'blocks': bks, 'status': status}\n\n return retval",
"function getColumnBlocks(tableGrid) {\n var cols = range(0, tableGrid.grid.maxCols);\n var rows = range(0, tableGrid.grid.maxRows);\n\n return Tools.map(cols, function (col) {\n function getBlock() {\n var details = [];\n for (var i = 0; i < rows.length; i++) {\n var detail = tableGrid.getAt(i, col);\n if (detail && detail.colIndex === col) {\n details.push(detail);\n }\n }\n\n return details;\n }\n\n function isSingle(detail) {\n return detail.colspan === 1;\n }\n\n function getFallback() {\n var item;\n\n for (var i = 0; i < rows.length; i++) {\n item = tableGrid.getAt(i, col);\n if (item) {\n return item;\n }\n }\n\n return null;\n }\n\n return decide(getBlock, isSingle, getFallback);\n });\n }",
"def default_entities(self):\n \"\"\"Return range from 0 to rows + columns.\"\"\"\n return [str(i) for i in range(self.rows + self.columns)]",
"def add_block_widget(self, top=False):\n \"\"\"\n Return a select widget for blocks which can be added to this column.\n \"\"\"\n widget = AddBlockSelect(attrs={\n 'class': 'glitter-add-block-select',\n }, choices=self.add_block_options(top=top))\n\n return widget.render(name='', value=None)",
"def getColumnsClasses(self, view=None):\n \"\"\"Determine whether a column should be shown. The left column is\n called plone.leftcolumn; the right column is called\n plone.rightcolumn.\n \"\"\"\n\n plone_view = getMultiAdapter(\n (self.context, self.request), name=u'plone')\n portal_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_portal_state')\n\n sl = plone_view.have_portlets('plone.leftcolumn', view=view)\n sr = plone_view.have_portlets('plone.rightcolumn', view=view)\n\n isRTL = portal_state.is_rtl()\n\n # pre-fill dictionary\n columns = dict(one=\"\", content=\"\", two=\"\")\n\n if not sl and not sr:\n # we don't have columns, thus conten takes the whole width\n columns['content'] = \"col-md-12\"\n\n elif sl and sr:\n # In case we have both columns, content takes 50% of the whole\n # width and the rest 50% is spread between the columns\n columns['one'] = \"col-xs-12 col-md-2\"\n columns['content'] = \"col-xs-12 col-md-8\"\n columns['two'] = \"col-xs-12 col-md-2\"\n\n elif (sr and not sl) and not isRTL:\n # We have right column and we are NOT in RTL language\n columns['content'] = \"col-xs-12 col-md-10\"\n columns['two'] = \"col-xs-12 col-md-2\"\n\n elif (sl and not sr) and isRTL:\n # We have left column and we are in RTL language\n columns['one'] = \"col-xs-12 col-md-2\"\n columns['content'] = \"col-xs-12 col-md-10\"\n\n elif (sl and not sr) and not isRTL:\n # We have left column and we are in NOT RTL language\n columns['one'] = \"col-xs-12 col-md-2\"\n columns['content'] = \"col-xs-12 col-md-10\"\n\n # # append cell to each css-string\n # for key, value in columns.items():\n # columns[key] = \"cell \" + value\n\n return columns",
"def defaults(self):\n \"\"\"Default filter form data when no GET data is provided.\"\"\"\n # Set default date span to previous week.\n (start, end) = get_week_window(timezone.now() - relativedelta(days=7))\n return {\n 'from_date': start,\n 'to_date': end,\n 'billable': True,\n 'non_billable': False,\n 'paid_leave': False,\n 'trunc': 'day',\n 'projects': [],\n }",
"def columns(self):\n \"\"\"Return names of all the addressable columns (including foreign keys) referenced in user supplied model\"\"\"\n res = [col['name'] for col in self.column_definitions]\n res.extend([col['name'] for col in self.foreign_key_definitions])\n return res",
"def columns(p_alt_layout_path=None):\n \"\"\"\n Returns list with complete column configuration dicts.\n \"\"\"\n def _get_column_dict(p_cp, p_column):\n column_dict = dict()\n\n filterexpr = p_cp.get(p_column, 'filterexpr')\n\n try:\n title = p_cp.get(p_column, 'title')\n except NoOptionError:\n title = filterexpr\n\n column_dict['title'] = title or 'Yet another column'\n column_dict['filterexpr'] = filterexpr\n column_dict['sortexpr'] = p_cp.get(p_column, 'sortexpr')\n column_dict['groupexpr'] = p_cp.get(p_column, 'groupexpr')\n column_dict['show_all'] = p_cp.getboolean(p_column, 'show_all')\n\n return column_dict\n\n defaults = {\n 'filterexpr': '',\n 'sortexpr': config().sort_string(),\n 'groupexpr': config().group_string(),\n 'show_all': '0',\n }\n\n cp = RawConfigParser(defaults, strict=False)\n files = [\n \"topydo_columns.ini\",\n \"topydo_columns.conf\",\n \".topydo_columns\",\n home_config_path('.topydo_columns'),\n home_config_path('.config/topydo/columns'),\n \"/etc/topydo_columns.conf\",\n ]\n\n if p_alt_layout_path is not None:\n files.insert(0, expanduser(p_alt_layout_path))\n for filename in files:\n if cp.read(filename):\n break\n\n column_list = []\n\n for column in cp.sections():\n column_list.append(_get_column_dict(cp, column))\n\n return column_list",
"def get_default_classes(self):\n \"\"\"Returns a flattened string of the cell's CSS classes.\"\"\"\n if not self.url:\n self.column.classes = [cls for cls in self.column.classes\n if cls != \"anchor\"]\n column_class_string = self.column.get_final_attrs().get('class', \"\")\n classes = set(column_class_string.split(\" \"))\n if self.column.status:\n classes.add(self.get_status_class(self.status))\n\n if self.inline_edit_available:\n classes.add(\"inline_edit_available\")\n\n return list(classes)"
] |
[
0.7970972061157227,
0.766054093837738,
0.6778176426887512,
0.6707983016967773,
0.667448103427887,
0.6652927398681641,
0.6631214022636414,
0.6560982465744019,
0.6543580293655396,
0.6515933871269226,
0.6515030860900879,
0.6506086587905884
] |
Return a select widget for blocks which can be added to this column.
|
def add_block_widget(self, top=False):
"""
Return a select widget for blocks which can be added to this column.
"""
widget = AddBlockSelect(attrs={
'class': 'glitter-add-block-select',
}, choices=self.add_block_options(top=top))
return widget.render(name='', value=None)
|
[
"def add_block_options(self, top):\n \"\"\"\n Return a list of URLs and titles for blocks which can be added to this column.\n\n All available blocks are grouped by block category.\n \"\"\"\n from .blockadmin import blocks\n\n block_choices = []\n\n # Group all block by category\n for category in sorted(blocks.site.block_list):\n category_blocks = blocks.site.block_list[category]\n category_choices = []\n\n for block in category_blocks:\n base_url = reverse('block_admin:{}_{}_add'.format(\n block._meta.app_label, block._meta.model_name,\n ), kwargs={\n 'version_id': self.glitter_page.version.id,\n })\n block_qs = {\n 'column': self.name,\n 'top': top,\n }\n block_url = '{}?{}'.format(base_url, urlencode(block_qs))\n block_text = capfirst(force_text(block._meta.verbose_name))\n\n category_choices.append((block_url, block_text))\n\n category_choices = sorted(category_choices, key=lambda x: x[1])\n block_choices.append((category, category_choices))\n\n return block_choices",
"def get_widget(self, request):\n \"\"\"\n Field widget is replaced with \"RestrictedSelectWidget\" because we not want to use modified widgets for\n filtering.\n \"\"\"\n return self._update_widget_choices(self.field.formfield(widget=RestrictedSelectWidget).widget)",
"def get_widget(self, request):\n \"\"\"\n Table view is not able to get form field from reverse relation.\n Therefore this widget returns similar form field as direct relation (ModelChoiceField).\n Because there is used \"RestrictedSelectWidget\" it is returned textarea or selectox with choices according to\n count objects in the queryset.\n \"\"\"\n return self._update_widget_choices(\n forms.ModelChoiceField(\n widget=RestrictedSelectWidget, queryset=self.field.related_model._default_manager.all()\n ).widget\n )",
"def default_blocks(self):\n \"\"\"\n Return a list of default block tuples (appname.ModelName, verbose name).\n\n Next to the dropdown list of block types, a small number of common blocks which are\n frequently used can be added immediately to a column with one click. This method defines\n the list of default blocks.\n \"\"\"\n # Use the block list provided by settings if it's defined\n block_list = getattr(settings, 'GLITTER_DEFAULT_BLOCKS', None)\n\n if block_list is not None:\n return block_list\n\n # Try and auto fill in default blocks if the apps are installed\n block_list = []\n\n for block in GLITTER_FALLBACK_BLOCKS:\n app_name, model_name = block.split('.')\n\n try:\n model_class = apps.get_model(app_name, model_name)\n verbose_name = capfirst(model_class._meta.verbose_name)\n block_list.append((block, verbose_name))\n except LookupError:\n # Block isn't installed - don't add it as a quick add default\n pass\n\n return block_list",
"def get_default_blocks(self, top=False):\n \"\"\"\n Return a list of column default block tuples (URL, verbose name).\n\n Used for quick add block buttons.\n \"\"\"\n default_blocks = []\n\n for block_model, block_name in self.glitter_page.default_blocks:\n block = apps.get_model(block_model)\n base_url = reverse('block_admin:{}_{}_add'.format(\n block._meta.app_label, block._meta.model_name,\n ), kwargs={\n 'version_id': self.glitter_page.version.id,\n })\n block_qs = {\n 'column': self.name,\n 'top': top,\n }\n block_url = '{}?{}'.format(base_url, urlencode(block_qs))\n block_text = capfirst(force_text(block._meta.verbose_name))\n\n default_blocks.append((block_url, block_text))\n\n return default_blocks",
"def select\n return enum_for(:select) unless block_given?\n return reduce(self.class.empty) { |tuple, element| yield(element) ? tuple.add(element) : tuple }\n end",
"def area_field(key='area'):\n \"\"\"Provides a select box for country selection\"\"\"\n\n area_list = list(subdivisions)\n title_map = []\n for item in area_list:\n title_map.append({'value': item.code, 'name': item.name})\n\n widget = {\n 'key': key,\n 'type': 'uiselect',\n 'titleMap': title_map\n }\n\n return widget",
"def get_only_selected_choices(self, value):\n \"\"\"Return a list of optgroups for this widget.\"\"\"\n schoices = self.choices\n selected_choices = set([force_text(v) for v in value if v])\n if isinstance(schoices, ModelChoiceIterator):\n schoices.queryset = schoices.queryset.filter(pk__in=selected_choices)\n else:\n schoices = [e for e in schoices if force_text(e) in selected_choices]\n return schoices",
"def select(&block)\n return self.to_enum(:select) unless block_given?\n rows = []\n i = 0\n self.each_row do |row|\n if row.instance_eval(&block)\n rows.push(row)\n end\n end\n Mikon::DataFrame.new(rows)\n end",
"def _selectedBlocks(self):\n \"\"\"Return selected blocks and tuple (startBlock, endBlock)\n \"\"\"\n cursor = self.textCursor()\n return self.document().findBlock(cursor.selectionStart()), \\\n self.document().findBlock(cursor.selectionEnd())",
"def country_field(key='country'):\n \"\"\"Provides a select box for country selection\"\"\"\n\n country_list = list(countries)\n title_map = []\n for item in country_list:\n title_map.append({'value': item.alpha_3, 'name': item.name})\n\n widget = {\n 'key': key,\n 'type': 'uiselect',\n 'titleMap': title_map\n }\n\n return widget",
"def selections(self):\n \"\"\"Build list of extra selections for rectangular selection\"\"\"\n selections = []\n cursors = self.cursors()\n if cursors:\n background = self._qpart.palette().color(QPalette.Highlight)\n foreground = self._qpart.palette().color(QPalette.HighlightedText)\n for cursor in cursors:\n selection = QTextEdit.ExtraSelection()\n selection.format.setBackground(background)\n selection.format.setForeground(foreground)\n selection.cursor = cursor\n\n selections.append(selection)\n\n return selections"
] |
[
0.7587445974349976,
0.7542740106582642,
0.7345138192176819,
0.7179350256919861,
0.711238443851471,
0.7025933861732483,
0.6922687888145447,
0.6916624903678894,
0.6891977787017822,
0.6877712607383728,
0.6849820613861084,
0.6842983365058899
] |
Return a list of URLs and titles for blocks which can be added to this column.
All available blocks are grouped by block category.
|
def add_block_options(self, top):
"""
Return a list of URLs and titles for blocks which can be added to this column.
All available blocks are grouped by block category.
"""
from .blockadmin import blocks
block_choices = []
# Group all block by category
for category in sorted(blocks.site.block_list):
category_blocks = blocks.site.block_list[category]
category_choices = []
for block in category_blocks:
base_url = reverse('block_admin:{}_{}_add'.format(
block._meta.app_label, block._meta.model_name,
), kwargs={
'version_id': self.glitter_page.version.id,
})
block_qs = {
'column': self.name,
'top': top,
}
block_url = '{}?{}'.format(base_url, urlencode(block_qs))
block_text = capfirst(force_text(block._meta.verbose_name))
category_choices.append((block_url, block_text))
category_choices = sorted(category_choices, key=lambda x: x[1])
block_choices.append((category, category_choices))
return block_choices
|
[
"def get_default_blocks(self, top=False):\n \"\"\"\n Return a list of column default block tuples (URL, verbose name).\n\n Used for quick add block buttons.\n \"\"\"\n default_blocks = []\n\n for block_model, block_name in self.glitter_page.default_blocks:\n block = apps.get_model(block_model)\n base_url = reverse('block_admin:{}_{}_add'.format(\n block._meta.app_label, block._meta.model_name,\n ), kwargs={\n 'version_id': self.glitter_page.version.id,\n })\n block_qs = {\n 'column': self.name,\n 'top': top,\n }\n block_url = '{}?{}'.format(base_url, urlencode(block_qs))\n block_text = capfirst(force_text(block._meta.verbose_name))\n\n default_blocks.append((block_url, block_text))\n\n return default_blocks",
"def listBlocks(self, dataset=\"\", block_name=\"\", data_tier_name=\"\", origin_site_name=\"\",\n logical_file_name=\"\", run_num=-1, min_cdate=0, max_cdate=0,\n min_ldate=0, max_ldate=0, cdate=0, ldate=0, open_for_writing=-1, detail=False):\n \"\"\"\n dataset, block_name, data_tier_name or logical_file_name must be passed.\n \"\"\"\n if (not dataset) or re.search(\"['%','*']\", dataset):\n if (not block_name) or re.search(\"['%','*']\", block_name):\n if (not logical_file_name) or re.search(\"['%','*']\", logical_file_name):\n if not data_tier_name or re.search(\"['%','*']\", data_tier_name):\n msg = \"DBSBlock/listBlock. You must specify at least one parameter(dataset, block_name,\\\n\t\t\t \tdata_tier_name, logical_file_name) with listBlocks api\"\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)\n\n if data_tier_name:\n if not (min_cdate and max_cdate) or (max_cdate-min_cdate)>32*24*3600:\n msg = \"min_cdate and max_cdate are mandatory parameters. If data_tier_name parameter is used \\\n the maximal time range allowed is 31 days\"\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)\n if detail:\n msg = \"DBSBlock/listBlock. Detail parameter not allowed togther with data_tier_name\"\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)\n\n with self.dbi.connection() as conn:\n dao = (self.blockbrieflist, self.blocklist)[detail]\n for item in dao.execute(conn, dataset, block_name, data_tier_name, origin_site_name, logical_file_name, run_num,\n min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate):\n yield item",
"def listBlocks(self, dataset=\"\", block_name=\"\", data_tier_name=\"\", origin_site_name=\"\",\n logical_file_name=\"\",run_num=-1, min_cdate='0', max_cdate='0',\n min_ldate='0', max_ldate='0', cdate='0', ldate='0', open_for_writing=-1, detail=False):\n\n \"\"\"\n API to list a block in DBS. At least one of the parameters block_name, dataset, data_tier_name or\n logical_file_name are required. If data_tier_name is provided, min_cdate and max_cdate have to be specified and\n the difference in time have to be less than 31 days.\n\n :param block_name: name of the block\n :type block_name: str\n :param dataset: dataset\n :type dataset: str\n :param data_tier_name: data tier\n :type data_tier_name: str\n :param logical_file_name: Logical File Name\n :type logical_file_name: str\n :param origin_site_name: Origin Site Name (Optional)\n :type origin_site_name: str\n :param open_for_writing: Open for Writting (Optional)\n :type open_for_writing: int (0 or 1)\n :param run_num: run_num numbers (Optional). Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...].\n :type run_num: int, list of runs or list of run ranges\n :param min_cdate: Lower limit for the creation date (unixtime) (Optional)\n :type min_cdate: int, str\n :param max_cdate: Upper limit for the creation date (unixtime) (Optional)\n :type max_cdate: int, str\n :param min_ldate: Lower limit for the last modification date (unixtime) (Optional)\n :type min_ldate: int, str\n :param max_ldate: Upper limit for the last modification date (unixtime) (Optional)\n :type max_ldate: int, str\n :param cdate: creation date (unixtime) (Optional)\n :type cdate: int, str\n :param ldate: last modification date (unixtime) (Optional)\n :type ldate: int, str\n :param detail: Get detailed information of a block (Optional)\n :type detail: bool\n :returns: List of dictionaries containing following keys (block_name). If option detail is used the dictionaries contain the following keys (block_id, create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, dataset_id and block_size)\n :rtype: list of dicts\n\n \"\"\"\n # run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours\n # We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK while logical_file_name is given.\n # YG Jan. 15 2019\n # \n if (run_num != -1 and logical_file_name ==''):\n for r in parseRunRange(run_num):\n if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long): \n if r == 1 or r == '1':\n dbsExceptionHandler(\"dbsException-invalid-input\", \"Run_num=1 is not a valid input.\",\n self.logger.exception)\n elif isinstance(r, run_tuple):\n if r[0] == r[1]:\n dbsExceptionHandler(\"dbsException-invalid-input\", \"DBS run range must be apart at least by 1.\",\n self.logger.exception)\n elif r[0] <= 1 <= r[1]:\n dbsExceptionHandler(\"dbsException-invalid-input2\", \"Run_num=1 is not a valid input.\",\n self.logger.exception)\n \n dataset = dataset.replace(\"*\", \"%\")\n block_name = block_name.replace(\"*\", \"%\")\n logical_file_name = logical_file_name.replace(\"*\", \"%\")\n origin_site_name = origin_site_name.replace(\"*\", \"%\")\n #\n\tif isinstance(min_cdate, basestring) and ('*' in min_cdate or '%' in min_cdate):\n min_cdate = 0\n else:\n try:\n min_cdate = int(min_cdate)\n except:\n dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for min_cdate\")\n #\n if isinstance(max_cdate, basestring) and ('*' in max_cdate or '%' in max_cdate):\n max_cdate = 0\n else:\n try:\n max_cdate = int(max_cdate)\n except:\n dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for max_cdate\")\n #\n if isinstance(min_ldate, basestring) and ('*' in min_ldate or '%' in min_ldate):\n min_ldate = 0\n else:\n try:\n min_ldate = int(min_ldate)\n except:\n dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for max_cdate\")\n #\n\tif isinstance(max_ldate, basestring) and ('*' in max_ldate or '%' in max_ldate):\n max_ldate = 0\n else:\n try:\n max_ldate = int(max_ldate)\n except:\n dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for max_ldate\")\n #\n if isinstance(cdate, basestring) and ('*' in cdate or '%' in cdate):\n cdate = 0\n else:\n try:\n cdate = int(cdate)\n except:\n dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for cdate\")\n #\n if isinstance(cdate, basestring) and ('*' in ldate or '%' in ldate):\n ldate = 0\n else:\n try:\n ldate = int(ldate)\n except:\n dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for ldate\")\n #\n detail = detail in (True, 1, \"True\", \"1\", 'true')\n try:\n b= self.dbsBlock.listBlocks(dataset, block_name, data_tier_name, origin_site_name, logical_file_name,\n run_num, min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate, open_for_writing, detail)\n\t #for item in b:\n\t\t#yield item\n\t return b\t\n\texcept HTTPError:\n\t raise\t\n\texcept dbsException as de:\n dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)\n except Exception as ex:\n sError = \"DBSReaderModel/listBlocks. %s\\n. Exception trace: \\n %s\" \\\n % (ex, traceback.format_exc())\n dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)",
"async def list_blocks(self, request):\n \"\"\"Fetches list of blocks from validator, optionally filtered by id.\n\n Request:\n query:\n - head: The id of the block to use as the head of the chain\n - id: Comma separated list of block ids to include in results\n\n Response:\n data: JSON array of fully expanded Block objects\n head: The head used for this query (most recent if unspecified)\n link: The link to this exact query, including head block\n paging: Paging info and nav, like total resources and a next link\n \"\"\"\n paging_controls = self._get_paging_controls(request)\n validator_query = client_block_pb2.ClientBlockListRequest(\n head_id=self._get_head_id(request),\n block_ids=self._get_filter_ids(request),\n sorting=self._get_sorting_message(request, \"block_num\"),\n paging=self._make_paging_message(paging_controls))\n\n response = await self._query_validator(\n Message.CLIENT_BLOCK_LIST_REQUEST,\n client_block_pb2.ClientBlockListResponse,\n validator_query)\n\n return self._wrap_paginated_response(\n request=request,\n response=response,\n controls=paging_controls,\n data=[self._expand_block(b) for b in response['blocks']])",
"def titleize(&block)\n title_parts = []\n order = 0\n parts.each do |part|\n\n if block_given?\n response = yield(part.first,order)\n if response != false\n if response == nil\n title_parts << part.first\n else\n title_parts << response\n end\n end\n else\n title_parts << part.first\n end\n order += 1\n end\n title_parts.join(title_separator)\n end",
"def api_block_type_list\n arr = BlockType.where(\"parent_id is null and share = ?\", true).reorder(:name).all.collect do |bt|\n { 'name' => bt.name, 'description' => bt.description }\n end \n render :json => arr \n end",
"def default_blocks(self):\n \"\"\"\n Return a list of default block tuples (appname.ModelName, verbose name).\n\n Next to the dropdown list of block types, a small number of common blocks which are\n frequently used can be added immediately to a column with one click. This method defines\n the list of default blocks.\n \"\"\"\n # Use the block list provided by settings if it's defined\n block_list = getattr(settings, 'GLITTER_DEFAULT_BLOCKS', None)\n\n if block_list is not None:\n return block_list\n\n # Try and auto fill in default blocks if the apps are installed\n block_list = []\n\n for block in GLITTER_FALLBACK_BLOCKS:\n app_name, model_name = block.split('.')\n\n try:\n model_class = apps.get_model(app_name, model_name)\n verbose_name = capfirst(model_class._meta.verbose_name)\n block_list.append((block, verbose_name))\n except LookupError:\n # Block isn't installed - don't add it as a quick add default\n pass\n\n return block_list",
"def listSites(self, block_name=\"\", site_name=\"\"):\n \"\"\"\n Returns sites.\n \"\"\"\n try:\n conn = self.dbi.connection()\n if block_name:\n result = self.blksitelist.execute(conn, block_name)\n else:\n result = self.sitelist.execute(conn, site_name)\n return result\n finally:\n if conn:\n conn.close()",
"public function listAvailableBlockComponentAction()\n {\n $this->denyAccessUnlessGranted(ContributionActionInterface::READ, BlockInterface::ENTITY_TYPE);\n\n $siteId = $this->get('open_orchestra_backoffice.context_backoffice_manager')->getSiteId();\n $site = $this->get('open_orchestra_model.repository.site')->findOneBySiteId($siteId);\n\n $availableBlocks = $site->getBlocks();\n\n return $this->get('open_orchestra_api.transformer_manager')->transform('block_component_collection', $availableBlocks);\n }",
"def listBlocksOrigin(self, origin_site_name=\"\", dataset=\"\", block_name=\"\"):\n \"\"\"\n This is the API to list all the blocks/datasets first generated in the site called origin_site_name,\n if origin_site_name is provided w/ no wildcards allow. If a fully spelled dataset is provided, then it will\n only list the blocks first generated from origin_site_name under the given dataset.\n \"\"\"\n if not (dataset or block_name):\n dbsExceptionHandler(\"dbsException-invalid-input\",\n \"DBSBlock/listBlocksOrigin: dataset or block_name must be provided.\")\n if re.search(\"['%', '*']\", dataset) or re.search(\"['%', '*']\", block_name):\n dbsExceptionHandler(\"dbsException-invalid-input\",\n \"DBSBlock/listBlocksOrigin: dataset or block_name with wildcard is not supported.\")\n try:\n conn = self.dbi.connection()\n result = self.bkOriginlist.execute(conn, origin_site_name, dataset, block_name)\n return result\n finally:\n if conn:\n conn.close()",
"def getSrcBlocks(self, url, dataset=\"\", block=\"\"):\n \"\"\"\n Need to list all blocks of the dataset and its parents starting from the top\n For now just list the blocks from this dataset.\n Client type call...\n \"\"\"\n if block:\n params={'block_name':block, 'open_for_writing':0}\n elif dataset:\n params={'dataset':dataset, 'open_for_writing':0}\n else:\n m = 'DBSMigration: Invalid input. Either block or dataset name has to be provided'\n e = 'DBSMigrate/getSrcBlocks: Invalid input. Either block or dataset name has to be provided'\n dbsExceptionHandler('dbsException-invalid-input2', m, self.logger.exception, e )\n\n return cjson.decode(self.callDBSService(url, 'blocks', params, {}))",
"def blocks(username, options = {})\n options[:oauth_token] = @access_token\n query = build_query_string(options)\n path = \"/users/#{username}/blocks\"\n url = @base_url + path + query\n\n get(url)\n end"
] |
[
0.7032273411750793,
0.6791521310806274,
0.6765880584716797,
0.6721671223640442,
0.671200692653656,
0.6706481575965881,
0.6672818064689636,
0.6650665402412415,
0.6645358800888062,
0.6555565595626831,
0.6552610397338867,
0.6548682451248169
] |
Return a list of default block tuples (appname.ModelName, verbose name).
Next to the dropdown list of block types, a small number of common blocks which are
frequently used can be added immediately to a column with one click. This method defines
the list of default blocks.
|
def default_blocks(self):
"""
Return a list of default block tuples (appname.ModelName, verbose name).
Next to the dropdown list of block types, a small number of common blocks which are
frequently used can be added immediately to a column with one click. This method defines
the list of default blocks.
"""
# Use the block list provided by settings if it's defined
block_list = getattr(settings, 'GLITTER_DEFAULT_BLOCKS', None)
if block_list is not None:
return block_list
# Try and auto fill in default blocks if the apps are installed
block_list = []
for block in GLITTER_FALLBACK_BLOCKS:
app_name, model_name = block.split('.')
try:
model_class = apps.get_model(app_name, model_name)
verbose_name = capfirst(model_class._meta.verbose_name)
block_list.append((block, verbose_name))
except LookupError:
# Block isn't installed - don't add it as a quick add default
pass
return block_list
|
[
"def get_default_blocks(self, top=False):\n \"\"\"\n Return a list of column default block tuples (URL, verbose name).\n\n Used for quick add block buttons.\n \"\"\"\n default_blocks = []\n\n for block_model, block_name in self.glitter_page.default_blocks:\n block = apps.get_model(block_model)\n base_url = reverse('block_admin:{}_{}_add'.format(\n block._meta.app_label, block._meta.model_name,\n ), kwargs={\n 'version_id': self.glitter_page.version.id,\n })\n block_qs = {\n 'column': self.name,\n 'top': top,\n }\n block_url = '{}?{}'.format(base_url, urlencode(block_qs))\n block_text = capfirst(force_text(block._meta.verbose_name))\n\n default_blocks.append((block_url, block_text))\n\n return default_blocks",
"def defaults(self):\n \"\"\" component default component\n\n .. Note:: default components is just an indication for user and the\n views, except if the Block is required. If required then default is\n selected if nothing explisitely selected.\n \"\"\"\n default = self._defaults\n # if require and no default, the first component as default\n if not len(default) and self.required and len(self._components):\n default = [six.next(six.itervalues(self._components)).name]\n return default",
"def add_block_options(self, top):\n \"\"\"\n Return a list of URLs and titles for blocks which can be added to this column.\n\n All available blocks are grouped by block category.\n \"\"\"\n from .blockadmin import blocks\n\n block_choices = []\n\n # Group all block by category\n for category in sorted(blocks.site.block_list):\n category_blocks = blocks.site.block_list[category]\n category_choices = []\n\n for block in category_blocks:\n base_url = reverse('block_admin:{}_{}_add'.format(\n block._meta.app_label, block._meta.model_name,\n ), kwargs={\n 'version_id': self.glitter_page.version.id,\n })\n block_qs = {\n 'column': self.name,\n 'top': top,\n }\n block_url = '{}?{}'.format(base_url, urlencode(block_qs))\n block_text = capfirst(force_text(block._meta.verbose_name))\n\n category_choices.append((block_url, block_text))\n\n category_choices = sorted(category_choices, key=lambda x: x[1])\n block_choices.append((category, category_choices))\n\n return block_choices",
"def default(self, block, name):\n \"\"\"\n Ask the kvs for the default (default implementation which other classes may override).\n\n :param block: block containing field to default\n :type block: :class:`~xblock.core.XBlock`\n :param name: name of the field to default\n \"\"\"\n return self._kvs.default(self._key(block, name))",
"def selected(self):\n \"\"\" returns the list of selected component names.\n\n if no component selected return the one marked as default.\n If the block is required and no component where indicated as default,\n then the first component is selected.\n \"\"\"\n selected = self._selected\n if len(self._selected) == 0 and self.required:\n # nothing has been selected yet BUT the component is required\n selected = self.defaults\n return selected",
"def blocks_name_declaration(mixin_node)\n block_name_declaration(DEFAULT_BLOCK_NAME)\n\n mixin_node.params.select { |n| n.type == :mixin_block_param }.each do |param|\n block_name_declaration(param.value)\n end\n end",
"def all_blocks(self):\n\n status = OrderedDict.fromkeys(parameters.BLOCKS.keys())\n status['13AE'] = ['discovery complete', '50', '24.05']\n status['13AO'] = ['discovery complete', '36', '24.40']\n status['13BL'] = ['discovery complete', '79', '24.48']\n status['14BH'] = ['discovery running', '-', '-']\n status['15AP'] = ['discovery running', '-', '-']\n status['15AM'] = ['discovery running', '-', '-']\n\n '''Overview tal table is expecting:\n ID observations processing status discoveries m_r 40%\n '''\n bks = []\n for block in status.iterkeys():\n bk = [block, self.num_block_images(block)] # if set in the .fromkeys(), doesn't give a unique list\n if status[block] is not None:\n bk = bk + status[block]\n else:\n bk = bk + ['awaiting triplets', '-', '-']\n bks.append(bk)\n\n retval = {'blocks': bks, 'status': status}\n\n return retval",
"public function get_default_blocks() {\n global $CFG;\n if (isset($CFG->defaultblocks)) {\n return blocks_parse_default_blocks_list($CFG->defaultblocks);\n }\n $blocknames = array(\n BLOCK_POS_LEFT => array(),\n BLOCK_POS_RIGHT => array()\n );\n return $blocknames;\n }",
"def add_block_widget(self, top=False):\n \"\"\"\n Return a select widget for blocks which can be added to this column.\n \"\"\"\n widget = AddBlockSelect(attrs={\n 'class': 'glitter-add-block-select',\n }, choices=self.add_block_options(top=top))\n\n return widget.render(name='', value=None)",
"def block_type(self):\n \"\"\" This gets display on the block header. \"\"\"\n return capfirst(force_text(\n self.content_block.content_type.model_class()._meta.verbose_name\n ))",
"public function get_default_blocks() {\n global $CFG;\n $formatconfig = $CFG->dirroot.'/course/format/'.$this->format.'/config.php';\n $format = array(); // initialize array in external file\n if (is_readable($formatconfig)) {\n include($formatconfig);\n }\n if (!empty($format['defaultblocks'])) {\n return blocks_parse_default_blocks_list($format['defaultblocks']);\n }\n return parent::get_default_blocks();\n }",
"def get_default_field_names(self, declared_fields, model_info):\n \"\"\"\n Return the default list of field names that will be used if the\n `Meta.fields` option is not specified.\n \"\"\"\n return (\n [model_info.pk.name] +\n list(declared_fields.keys()) +\n list(model_info.fields.keys()) +\n list(model_info.forward_relations.keys())\n )"
] |
[
0.8696724772453308,
0.7356063723564148,
0.7220466732978821,
0.6898665428161621,
0.6882414817810059,
0.6722919940948486,
0.6687453985214233,
0.6686659455299377,
0.6667621731758118,
0.6619327068328857,
0.6586848497390747,
0.6583972573280334
] |
Returns a boolean if the current user has permission to add another object of the same
type which is being viewed/edited.
|
def has_add_permission(self):
"""
Returns a boolean if the current user has permission to add another object of the same
type which is being viewed/edited.
"""
has_permission = False
if self.user is not None:
# We don't check for the object level permission - as the add permission doesn't make
# sense on a per object level here.
has_permission = self.user.has_perm(
'{}.add_{}'.format(self.opts.app_label, self.opts.model_name)
)
return has_permission
|
[
"def hasUserAddEditPermission(self):\n \"\"\"\n Checks if the current user has privileges to access to the editing view.\n From Jira LIMS-1549:\n - Creation/Edit: Lab manager, Client Contact, Lab Clerk, Client Contact (for Client-specific SRTs)\n :returns: True/False\n \"\"\"\n mtool = getToolByName(self, 'portal_membership')\n checkPermission = mtool.checkPermission\n # In bika_samplinground_workflow.csv there are defined the ModifyPortalContent statements. There is said that\n # client has ModifyPortalContent permission enabled, so here we have to check if the client satisfy the\n # condition wrote in the function's description\n if (checkPermission(ModifyPortalContent, self) or checkPermission(AddPortalContent, self)) \\\n and 'Client' in api.user.get_current().getRoles():\n # Checking if the current user is a current client's contact\n userID = api.user.get_current().id\n contact_objs = self.getContacts()\n contact_ids = [obj.getUsername() for obj in contact_objs]\n if userID in contact_ids:\n return True\n else:\n return False\n return checkPermission(ModifyPortalContent, self) or checkPermission(AddPortalContent, self)",
"def has_add_permission(self, request):\n \"\"\"\n Returns True if the requesting user is allowed to add an object, False otherwise.\n \"\"\"\n perm_string = '%s.add_%s' % (self.model._meta.app_label,\n self.model._meta.object_name.lower()\n )\n return request.user.has_perm(perm_string)",
"def has_change_permission(self):\n \"\"\"\n Returns a boolean if the current user has permission to change the current object being\n viewed/edited.\n \"\"\"\n has_permission = False\n\n if self.user is not None:\n # We check for the object level permission here, even though by default the Django\n # admin doesn't. If the Django ModelAdmin is extended to allow object level\n # permissions - then this will work as expected.\n permission_name = '{}.change_{}'.format(self.opts.app_label, self.opts.model_name)\n has_permission = (\n self.user.has_perm(permission_name) or\n self.user.has_perm(permission_name, obj=self.obj)\n )\n\n return has_permission",
"def has_edit_permission(self, request, obj=None, version=None):\n \"\"\"\n Returns a boolean if the user in the request has edit permission for the object.\n\n Can also be passed a version object to check if the user has permission to edit a version\n of the object (if they own it).\n \"\"\"\n # Has the edit permission for this object type\n permission_name = '{}.edit_{}'.format(self.opts.app_label, self.opts.model_name)\n has_permission = request.user.has_perm(permission_name)\n\n if obj is not None and has_permission is False:\n has_permission = request.user.has_perm(permission_name, obj=obj)\n\n if has_permission and version is not None:\n # Version must not be saved, and must belong to this user\n if version.version_number or version.owner != request.user:\n has_permission = False\n\n return has_permission",
"def get_can_edit(self, obj):\n \"\"\" returns true if user has permission to edit, false otherwise \"\"\"\n view = self.context.get('view')\n request = copy(self.context.get('request'))\n request._method = 'PUT'\n try:\n view.check_object_permissions(request, obj)\n except (PermissionDenied, NotAuthenticated):\n return False\n else:\n return True",
"def has_add_permission(self, request):\n \"\"\"\n Returns True if the given request has permission to add an object.\n Can be overriden by the user in subclasses.\n \"\"\"\n opts = self.opts\n return request.user.has_perm(opts.app_label + '.' + opts.get_add_permission())",
"def is_editable(self, request):\n \"\"\"\n Restrict in-line editing to the objects's owner and superusers.\n \"\"\"\n return request.user.is_superuser or request.user.id == self.user_id",
"def has_glitter_edit_permission(self, request, obj):\n \"\"\"\n Return a boolean if a user has edit access to the glitter object/page this object is on.\n \"\"\"\n\n # We're testing for the edit permission here with the glitter object - not the current\n # object, not the change permission. Once a user has edit access to an object they can edit\n # all content on it.\n permission_name = '{}.edit_{}'.format(\n obj._meta.app_label, obj._meta.model_name,\n )\n has_permission = (\n request.user.has_perm(permission_name) or\n request.user.has_perm(permission_name, obj=obj)\n )\n return has_permission",
"def get_allow_future(self):\n \"\"\"\n Only superusers and users with the permission can edit the post.\n \"\"\"\n qs = self.get_queryset()\n post_edit_permission = '{}.edit_{}'.format(\n qs.model._meta.app_label, qs.model._meta.model_name\n )\n if self.request.user.has_perm(post_edit_permission):\n return True\n return False",
"def user_can_edit_newsitem(user, NewsItem):\n \"\"\"\n Check if the user has permission to edit a particular NewsItem type.\n \"\"\"\n for perm in format_perms(NewsItem, ['add', 'change', 'delete']):\n if user.has_perm(perm):\n return True\n\n return False",
"def has_permission(self, request, view):\n \"\"\"\n Returns true if the current request is by the user themselves.\n\n Note: a 404 is returned for non-staff instead of a 403. This is to prevent\n users from being able to detect the existence of accounts.\n \"\"\"\n url_username = request.parser_context.get('kwargs', {}).get('username', '')\n if request.user.username.lower() != url_username.lower():\n if request.user.is_staff:\n return False # staff gets 403\n raise Http404()\n return True",
"def has_object_permission(self, request, view, obj):\n \"\"\"\n Allow staff or superusers, and the owner of the object itself.\n \"\"\"\n user = request.user\n if not user.is_authenticated:\n return False\n elif user.is_staff or user.is_superuser:\n return True\n return user == obj.user"
] |
[
0.8009886741638184,
0.8001337647438049,
0.78636234998703,
0.7730831503868103,
0.7683552503585815,
0.767172634601593,
0.7458767294883728,
0.735199511051178,
0.7289831638336182,
0.7264009118080139,
0.7216977477073669,
0.7203620076179504
] |
Returns a boolean if the current user has permission to change the current object being
viewed/edited.
|
def has_change_permission(self):
"""
Returns a boolean if the current user has permission to change the current object being
viewed/edited.
"""
has_permission = False
if self.user is not None:
# We check for the object level permission here, even though by default the Django
# admin doesn't. If the Django ModelAdmin is extended to allow object level
# permissions - then this will work as expected.
permission_name = '{}.change_{}'.format(self.opts.app_label, self.opts.model_name)
has_permission = (
self.user.has_perm(permission_name) or
self.user.has_perm(permission_name, obj=self.obj)
)
return has_permission
|
[
"def has_edit_permission(self, request, obj=None, version=None):\n \"\"\"\n Returns a boolean if the user in the request has edit permission for the object.\n\n Can also be passed a version object to check if the user has permission to edit a version\n of the object (if they own it).\n \"\"\"\n # Has the edit permission for this object type\n permission_name = '{}.edit_{}'.format(self.opts.app_label, self.opts.model_name)\n has_permission = request.user.has_perm(permission_name)\n\n if obj is not None and has_permission is False:\n has_permission = request.user.has_perm(permission_name, obj=obj)\n\n if has_permission and version is not None:\n # Version must not be saved, and must belong to this user\n if version.version_number or version.owner != request.user:\n has_permission = False\n\n return has_permission",
"def get_can_edit(self, obj):\n \"\"\" returns true if user has permission to edit, false otherwise \"\"\"\n view = self.context.get('view')\n request = copy(self.context.get('request'))\n request._method = 'PUT'\n try:\n view.check_object_permissions(request, obj)\n except (PermissionDenied, NotAuthenticated):\n return False\n else:\n return True",
"def has_glitter_edit_permission(self, request, obj):\n \"\"\"\n Return a boolean if a user has edit access to the glitter object/page this object is on.\n \"\"\"\n\n # We're testing for the edit permission here with the glitter object - not the current\n # object, not the change permission. Once a user has edit access to an object they can edit\n # all content on it.\n permission_name = '{}.edit_{}'.format(\n obj._meta.app_label, obj._meta.model_name,\n )\n has_permission = (\n request.user.has_perm(permission_name) or\n request.user.has_perm(permission_name, obj=obj)\n )\n return has_permission",
"def has_add_permission(self):\n \"\"\"\n Returns a boolean if the current user has permission to add another object of the same\n type which is being viewed/edited.\n \"\"\"\n has_permission = False\n\n if self.user is not None:\n # We don't check for the object level permission - as the add permission doesn't make\n # sense on a per object level here.\n has_permission = self.user.has_perm(\n '{}.add_{}'.format(self.opts.app_label, self.opts.model_name)\n )\n\n return has_permission",
"def hasUserAddEditPermission(self):\n \"\"\"\n Checks if the current user has privileges to access to the editing view.\n From Jira LIMS-1549:\n - Creation/Edit: Lab manager, Client Contact, Lab Clerk, Client Contact (for Client-specific SRTs)\n :returns: True/False\n \"\"\"\n mtool = getToolByName(self, 'portal_membership')\n checkPermission = mtool.checkPermission\n # In bika_samplinground_workflow.csv there are defined the ModifyPortalContent statements. There is said that\n # client has ModifyPortalContent permission enabled, so here we have to check if the client satisfy the\n # condition wrote in the function's description\n if (checkPermission(ModifyPortalContent, self) or checkPermission(AddPortalContent, self)) \\\n and 'Client' in api.user.get_current().getRoles():\n # Checking if the current user is a current client's contact\n userID = api.user.get_current().id\n contact_objs = self.getContacts()\n contact_ids = [obj.getUsername() for obj in contact_objs]\n if userID in contact_ids:\n return True\n else:\n return False\n return checkPermission(ModifyPortalContent, self) or checkPermission(AddPortalContent, self)",
"def is_editable(self, request):\n \"\"\"\n Restrict in-line editing to the objects's owner and superusers.\n \"\"\"\n return request.user.is_superuser or request.user.id == self.user_id",
"def has_change_permission(self, request, obj=None):\n \"\"\"\n Returns True if the given request has permission to change the given\n Django model instance, the default implementation doesn't examine the\n `obj` parameter.\n\n Can be overriden by the user in subclasses. In such case it should\n return True if the given request has permission to change the `obj`\n model instance. If `obj` is None, this should return True if the given\n request has permission to change *any* object of the given type.\n \"\"\"\n opts = self.opts\n return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission(), obj)",
"def has_update_permission(self, request, obj):\n \"\"\"\n Returns True if the requesting user is allowed to update the given object, False otherwise.\n \"\"\"\n perm_string = '%s.change_%s' % (self.model._meta.app_label,\n self.model._meta.object_name.lower()\n )\n return request.user.has_perm(perm_string)",
"def can_edit(self, user=None, request=None):\n \"\"\"\n Define if a user can edit or not the instance, according to his account\n or the request.\n \"\"\"\n can = False\n if request and not self.owner:\n if (getattr(settings, \"UMAP_ALLOW_ANONYMOUS\", False)\n and self.is_anonymous_owner(request)):\n can = True\n if self.edit_status == self.ANONYMOUS:\n can = True\n elif not user.is_authenticated:\n pass\n elif user == self.owner:\n can = True\n elif self.edit_status == self.EDITORS and user in self.editors.all():\n can = True\n return can",
"def has_object_permission(self, request, view, obj):\n \"\"\"Check object permissions.\"\"\"\n # admins can do anything\n if request.user.is_superuser:\n return True\n\n # `share` permission is required for editing permissions\n if 'permissions' in view.action:\n self.perms_map['POST'] = ['%(app_label)s.share_%(model_name)s']\n\n if view.action in ['add_data', 'remove_data']:\n self.perms_map['POST'] = ['%(app_label)s.add_%(model_name)s']\n\n if hasattr(view, 'get_queryset'):\n queryset = view.get_queryset()\n else:\n queryset = getattr(view, 'queryset', None)\n\n assert queryset is not None, (\n 'Cannot apply DjangoObjectPermissions on a view that '\n 'does not set `.queryset` or have a `.get_queryset()` method.'\n )\n\n model_cls = queryset.model\n user = request.user\n\n perms = self.get_required_object_permissions(request.method, model_cls)\n\n if not user.has_perms(perms, obj) and not AnonymousUser().has_perms(perms, obj):\n # If the user does not have permissions we need to determine if\n # they have read permissions to see 403, or not, and simply see\n # a 404 response.\n\n if request.method in permissions.SAFE_METHODS:\n # Read permissions already checked and failed, no need\n # to make another lookup.\n raise Http404\n\n read_perms = self.get_required_object_permissions('GET', model_cls)\n if not user.has_perms(read_perms, obj):\n raise Http404\n\n # Has read permissions.\n return False\n\n return True",
"def is_editable(obj, request):\n \"\"\"\n Returns ``True`` if the object is editable for the request. First\n check for a custom ``editable`` handler on the object, otherwise\n use the logged in user and check change permissions for the\n object's model.\n \"\"\"\n if hasattr(obj, \"is_editable\"):\n return obj.is_editable(request)\n else:\n codename = get_permission_codename(\"change\", obj._meta)\n perm = \"%s.%s\" % (obj._meta.app_label, codename)\n return (request.user.is_authenticated() and\n has_site_permission(request.user) and\n request.user.has_perm(perm))",
"def can_edit(self, user):\n \"\"\"Return whether or not `user` can make changes to the class.\"\"\"\n return user.is_admin or not self.is_locked and self in user.admin_for"
] |
[
0.8393487930297852,
0.8247575759887695,
0.8216869831085205,
0.8105201125144958,
0.8026542067527771,
0.8008126020431519,
0.8006837368011475,
0.7947006225585938,
0.784096360206604,
0.7820011973381042,
0.7794312238693237,
0.7788248062133789
] |
Get correct embed url for Youtube or Vimeo.
|
def get_embed_url(self):
""" Get correct embed url for Youtube or Vimeo. """
embed_url = None
youtube_embed_url = 'https://www.youtube.com/embed/{}'
vimeo_embed_url = 'https://player.vimeo.com/video/{}'
# Get video ID from url.
if re.match(YOUTUBE_URL_RE, self.url):
embed_url = youtube_embed_url.format(re.match(YOUTUBE_URL_RE, self.url).group(2))
if re.match(VIMEO_URL_RE, self.url):
embed_url = vimeo_embed_url.format(re.match(VIMEO_URL_RE, self.url).group(3))
return embed_url
|
[
"def get_embed_url(self, targeting=None, recirc=None):\n \"\"\"gets a canonical path to an embedded iframe of the video from the hub\n\n :return: the path to create an embedded iframe of the video\n :rtype: str\n \"\"\"\n url = getattr(settings, \"VIDEOHUB_EMBED_URL\", self.DEFAULT_VIDEOHUB_EMBED_URL)\n url = url.format(self.id)\n if targeting is not None:\n for k, v in sorted(targeting.items()):\n url += '&{0}={1}'.format(k, v)\n if recirc is not None:\n url += '&recirc={0}'.format(recirc)\n return url",
"def youtube_id(self):\n \"\"\"Extract and return Youtube video id\"\"\"\n if not self.video_embed:\n return ''\n m = re.search(r'/embed/([A-Za-z0-9\\-=_]*)', self.video_embed)\n if m:\n return m.group(1)\n return ''",
"def youtube_id(self):\n \"\"\"Extract and return Youtube video id\"\"\"\n m = re.search(r'/embed/([A-Za-z0-9\\-=_]*)', self.embed)\n if m:\n return m.group(1)\n return ''",
"protected function normalizeUrl()\n {\n if (preg_match('~\\.com/embed/~i', $this->url))\n $this->url = new \\Embera\\Url(str_ireplace('/embed/', '/videos/', $this->url));\n }",
"public static function get_video_embed_url( $url ) {\n\t\tif ( strpos( $url, 'youtube' ) !== false && strpos( $url, 'watch' ) !== false ) {\n\t\t\t$parts = wp_parse_url( $url );\n\n\t\t\tif ( is_array( $parts ) && isset( $parts['query'] ) ) {\n\t\t\t\tparse_str( $parts['query'] );\n\t\t\t\tif ( isset( $v ) ) {\n\t\t\t\t\treturn 'https://www.youtube.com/embed/' . $v;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif ( strpos( $url, 'vimeo' ) !== false && strpos( $url, 'player' ) === false ) {\n\t\t\t$parts = wp_parse_url( $url );\n\n\t\t\tif ( is_array( $parts ) && isset( $parts['path'] ) ) {\n\t\t\t\treturn 'https://player.vimeo.com/video' . $parts['path'];\n\t\t\t}\n\t\t}\n\n\t\treturn $url;\n\t}",
"public function getEmbedURL($encode = true)\n {\n $url = '';\n\n // PROTOCOL\n if ($this->getHTTPS()) {\n $url .= 'https://';\n } else {\n $url .= 'http://';\n }\n\n // DOMAIN\n if ($this->getCookies()) {\n $url .= 'www.youtube.com';\n } else {\n $url .= 'www.youtube-nocookie.com';\n }\n\n // PATH\n if ($this->isVideo()) {\n $url .= '/v/';\n } else {\n $url .= '/p/';\n }\n\n // ID\n if ($this->isVideo()) {\n $url .= $this->getID();\n } else {\n $url .= substr($this->getID(), 2);\n } // Playlists start with PL but YouTube doesn't want that\n\n // Build Query String\n $query = [];\n $query['version'] = 2;\n if ($this->getAutoplay()) {\n $query['autoplay'] = 1;\n }\n if ($this->getLoop()) {\n $query['loop'] = 1;\n }\n if ($this->getJSAPI()) {\n $query['enablejsapi'] = 1;\n }\n if ($this->isHD()) {\n $query['hd'] = 1;\n }\n $query['theme'] = $this->getTheme();\n\n $seperator = $encode ? '&' : '&';\n $url .= '?'.http_build_query($query, $seperator);\n\n return $url;\n }",
"public function getEmbedUrl($url, $params = [] )\n {\n // looks like there are, now let's only do this for YouTube and Vimeo\n if($this->getInfo($url)->type == 'video' && ($this->isYouTube($url) || $this->isVimeo($url)))\n {\n $parameters = '';\n\n // check if theree are any parameters passed along\n if (!empty($params)) {\n \n $parameters .= '?';\n $i = 0;\n foreach ($params as $k=>$v) {\n if (($parameters !== null) && ($i !== 0)) {\n $parameters .= '&';\n }\n $parameters .= \"{$k}={$v}\";\n $i++;\n }\n }\n \n if ($this->isYouTube($url)) {\n $id = $this->getYouTubeId($url);\n \n $embedUrl = '//www.youtube.com/embed/' . $id . $parameters;\n return $embedUrl;\n } else if ($this->isVimeo($url)) {\n $id = $this->getVimeoId($url);\n \n $embedUrl = '//player.vimeo.com/video/' . $id . $parameters;\n return $embedUrl;\n }\n }\n else\n {\n // return empty string\n return '';\n }\n }",
"def _get(self, version, method, url_or_urls, **kwargs):\n \"\"\"\n _get makes the actual call to api.embed.ly\n \"\"\"\n if not url_or_urls:\n raise ValueError('%s requires a url or a list of urls given: %s' %\n (method.title(), url_or_urls))\n\n # a flag we can use instead of calling isinstance() all the time\n multi = isinstance(url_or_urls, list)\n\n # throw an error early for too many URLs\n if multi and len(url_or_urls) > 20:\n raise ValueError('Embedly accepts only 20 urls at a time. Url '\n 'Count:%s' % len(url_or_urls))\n\n query = ''\n\n key = kwargs.get('key', self.key)\n\n # make sure that a key was set on the client or passed in\n if not key:\n raise ValueError('Requires a key. None given: %s' % key)\n\n kwargs['key'] = key\n\n query += urlencode(kwargs)\n\n if multi:\n query += '&urls=%s&' % ','.join([quote(url) for url in url_or_urls])\n else:\n query += '&url=%s' % quote(url_or_urls)\n\n url = 'http://api.embed.ly/%s/%s?%s' % (version, method, query)\n\n http = httplib2.Http(timeout=self.timeout)\n\n headers = {'User-Agent': self.user_agent,\n 'Connection': 'close'}\n\n resp, content = http.request(url, headers=headers)\n\n if resp['status'] == '200':\n data = json.loads(content.decode('utf-8'))\n\n if kwargs.get('raw', False):\n data['raw'] = content\n else:\n data = {'type': 'error',\n 'error': True,\n 'error_code': int(resp['status'])}\n\n if multi:\n return map(lambda url, data: Url(data, method, url),\n url_or_urls, data)\n\n return Url(data, method, url_or_urls)",
"def get_url(videoobj,liveconvert = false)\n raise NoMethodError, \"Please pass a VideoObject\" if not videoobj.is_a? VideoObject\n \n begin\n if liveconvert\n request(\"livePlaybackService\",\"initLivePlayback\",[conversion_settings(videoobj)])['result']['contentURL']\n else\n request(\"playbackService\",\"initPlayback\",[videoobj.location[1..-1]])['result']['contentURL']\n end\n rescue NoMethodError\n raise RuntimeError, \"This video does not exist\"\n end\n end",
"function videopress(str) {\n\tvar idRegex;\n\tif (str.indexOf('embed') > -1) {\n\t\tidRegex = /embed\\/(\\w{8})/;\n\t\treturn str.match(idRegex)[1];\n\t}\n\n\tidRegex = /\\/v\\/(\\w{8})/;\n\n\tvar match = str.match(idRegex);\n\n\tif (match && match.length > 0) {\n\t\treturn str.match(idRegex)[1];\n\t}\n\treturn undefined;\n}",
"function getYoutubeID(url) {\n let videoID = '';\n url = url.replace(/(>|<)/gi, '').split(/(vi\\/|v=|\\/v\\/|youtu\\.be\\/|\\/embed\\/)/);\n if (url[2] !== undefined) {\n videoID = url[2].split(/[^0-9a-z_\\-]/i);\n videoID = videoID[0];\n } else {\n videoID = url;\n }\n return videoID;\n}",
"public function getEmbedUrl($url, $params = [])\n {\n return Template::raw(VideoEmbedder::$plugin->service->getEmbedUrl($url, $params));\n }"
] |
[
0.7843751311302185,
0.7744758725166321,
0.7676146030426025,
0.7616967558860779,
0.7429187297821045,
0.7418445944786072,
0.7278897762298584,
0.7228388786315918,
0.7214562892913818,
0.7194799184799194,
0.7156552076339722,
0.7140334844589233
] |
Set html field with correct iframe.
|
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
""" Set html field with correct iframe. """
if self.url:
iframe_html = '<iframe src="{}" frameborder="0" title="{}" allowfullscreen></iframe>'
self.html = iframe_html.format(
self.get_embed_url(),
self.title
)
return super().save(force_insert, force_update, using, update_fields)
|
[
"function (type, args, obj) {\n\n var height = args[0],\n el = this.element;\n\n Dom.setStyle(el, \"height\", height);\n this.cfg.refireEvent(\"iframe\");\n }",
"function () {\n var self = this, c = self.io.config, uri = c.uri, hostname = uri.hostname, iframe, iframeUri, iframeDesc = iframeMap[hostname];\n var proxy = PROXY_PAGE;\n if (c.xdr && c.xdr.subDomain && c.xdr.subDomain.proxy) {\n proxy = c.xdr.subDomain.proxy;\n }\n if (iframeDesc && iframeDesc.ready) {\n self.nativeXhr = XhrTransportBase.nativeXhr(0, iframeDesc.iframe.contentWindow);\n if (self.nativeXhr) {\n self.sendInternal();\n } else {\n LoggerManager.error('document.domain not set correctly!');\n }\n return;\n }\n if (!iframeDesc) {\n iframeDesc = iframeMap[hostname] = {};\n iframe = iframeDesc.iframe = doc.createElement('iframe');\n Dom.css(iframe, {\n position: 'absolute',\n left: '-9999px',\n top: '-9999px'\n });\n Dom.prepend(iframe, doc.body || doc.documentElement);\n iframeUri = {};\n iframeUri.protocol = uri.protocol;\n iframeUri.host = uri.host;\n iframeUri.pathname = proxy;\n iframe.src = url.stringify(iframeUri);\n } else {\n iframe = iframeDesc.iframe;\n }\n Event.on(iframe, 'load', _onLoad, self);\n }",
"function(id){\n // We can't use following code as the name attribute\n // won't be properly registered in IE6, and new window\n // on form submit will open\n // var iframe = document.createElement('iframe');\n // iframe.setAttribute('name', id);\n\n var iframe = qq.toElement('<iframe src=\"javascript:false;\" name=\"' + id + '\" />');\n // src=\"javascript:false;\" removes ie6 prompt on https\n\n iframe.setAttribute('id', id);\n\n iframe.style.display = 'none';\n document.body.appendChild(iframe);\n\n return iframe;\n }",
"def _repr_html_(self, **kwargs):\n \"\"\"Displays the Figure in a Jupyter notebook.\n\n \"\"\"\n html = self.render(**kwargs)\n html = \"data:text/html;charset=utf-8;base64,\" + base64.b64encode(html.encode('utf8')).decode('utf8') # noqa\n\n if self.height is None:\n iframe = (\n '<div style=\"width:{width};\">'\n '<div style=\"position:relative;width:100%;height:0;padding-bottom:{ratio};\">' # noqa\n '<iframe src=\"{html}\" style=\"position:absolute;width:100%;height:100%;left:0;top:0;' # noqa\n 'border:none !important;\" '\n 'allowfullscreen webkitallowfullscreen mozallowfullscreen>'\n '</iframe>'\n '</div></div>').format\n iframe = iframe(html=html,\n width=self.width,\n ratio=self.ratio)\n else:\n iframe = ('<iframe src=\"{html}\" width=\"{width}\" height=\"{height}\"'\n 'style=\"border:none !important;\" '\n '\"allowfullscreen\" \"webkitallowfullscreen\" \"mozallowfullscreen\">' # noqa\n '</iframe>').format\n iframe = iframe(html=html, width=self.width, height=self.height)\n return iframe",
"def editable_field_html(klass, field_name, value, f, include_nil_selectors = false)\n # When editing a job the values are of the correct type.\n # When editing a dirmon entry values are strings.\n field = klass.fields[field_name.to_s]\n return unless field && field.type\n\n placeholder = field.default_val\n placeholder = nil if placeholder.is_a?(Proc)\n\n case field.type.name\n when 'Symbol', 'String', 'Integer'\n options = extract_inclusion_values(klass, field_name)\n str = \"[#{field.type.name}]\\n\".html_safe\n if options\n str + f.select(field_name, options, { include_blank: options.include?(nil) || include_nil_selectors, selected: value }, { class: 'selectize form-control' })\n else\n if field.type.name == 'Integer'\n str + f.number_field(field_name, value: value, class: 'form-control', placeholder: placeholder)\n else\n str + f.text_field(field_name, value: value, class: 'form-control', placeholder: placeholder)\n end\n end\n when 'Hash'\n \"[JSON Hash]\\n\".html_safe +\n f.text_field(field_name, value: value ? value.to_json : '', class: 'form-control', placeholder: '{\"key1\":\"value1\", \"key2\":\"value2\", \"key3\":\"value3\"}')\n when 'Array'\n options = Array(value)\n \"[Array]\\n\".html_safe +\n f.select(field_name, options_for_select(options, options), { include_hidden: false }, { class: 'selectize form-control', multiple: true })\n when 'Mongoid::Boolean'\n name = \"#{field_name}_true\".to_sym\n value = value.to_s\n str = '<div class=\"radio-buttons\">'.html_safe\n str << f.radio_button(field_name, 'true', checked: value == 'true')\n str << ' '.html_safe + f.label(name, 'true')\n str << ' '.html_safe + f.radio_button(field_name, 'false', checked: value == 'false')\n str << ' '.html_safe + f.label(name, 'false')\n # Allow this field to be unset (nil).\n if include_nil_selectors\n str << ' '.html_safe + f.radio_button(field_name, '', checked: value == '')\n str << ' '.html_safe + f.label(name, 'nil')\n end\n\n str << '</div>'.html_safe\n else\n \"[#{field.type.name}]\".html_safe +\n f.text_field(field_name, value: value, class: 'form-control', placeholder: placeholder)\n end\n end",
"function(type, args, obj) {\n var useIframe = args[0];\n \n if (!this.parent) {\n if (Dom.inDocument(this.oDomContainer)) {\n if (useIframe) {\n var pos = Dom.getStyle(this.oDomContainer, \"position\");\n \n if (pos == \"absolute\" || pos == \"relative\") {\n \n if (!Dom.inDocument(this.iframe)) {\n this.iframe = document.createElement(\"iframe\");\n this.iframe.src = \"javascript:false;\";\n \n Dom.setStyle(this.iframe, \"opacity\", \"0\");\n \n if (YAHOO.env.ua.ie && YAHOO.env.ua.ie <= 6) {\n Dom.addClass(this.iframe, this.Style.CSS_FIXED_SIZE);\n }\n \n this.oDomContainer.insertBefore(this.iframe, this.oDomContainer.firstChild);\n }\n }\n } else {\n if (this.iframe) {\n if (this.iframe.parentNode) {\n this.iframe.parentNode.removeChild(this.iframe);\n }\n this.iframe = null;\n }\n }\n }\n }\n }",
"function initIframeForUpload(name) {\n var iframe = qq.toElement(\"<iframe src='javascript:false;' name='\" + name + \"' />\");\n\n iframe.setAttribute(\"id\", name);\n\n iframe.style.display = \"none\";\n document.body.appendChild(iframe);\n\n return iframe;\n }",
"def as_iframe(self, html_data):\n \"\"\"Build the HTML representation for the mapviz.\"\"\"\n\n srcdoc = html_data.replace('\"', \"'\")\n return ('<iframe id=\"{div_id}\", srcdoc=\"{srcdoc}\" style=\"width: {width}; '\n 'height: {height};\"></iframe>'.format(\n div_id=self.div_id,\n srcdoc=srcdoc,\n width=self.width,\n height=self.height))",
"function frame (form) {\n var name = 'ff-' + new Date().valueOf();\n $(form)\n .attr('autocomplete', 'on')\n .attr('target', name);\n return $('<iframe>')\n .css('display', 'none')\n .attr('id', name)\n .attr('name', name)\n .attr('src', 'javascript:void 0')\n .afterOf(form);\n}",
"function( data )\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tif ( iframe )\r\n\t\t\t\t\t\t\tiframe.remove();\r\n\r\n\t\t\t\t\t\tvar src =\r\n\t\t\t\t\t\t\t'document.open();' +\r\n\r\n\t\t\t\t\t\t\t// The document domain must be set any time we\r\n\t\t\t\t\t\t\t// call document.open().\r\n\t\t\t\t\t\t\t( isCustomDomain ? ( 'document.domain=\"' + document.domain + '\";' ) : '' ) +\r\n\r\n\t\t\t\t\t\t\t'document.close();';\r\n\r\n\t\t\t\t\t\t// With IE, the custom domain has to be taken care at first,\r\n\t\t\t\t\t\t// for other browers, the 'src' attribute should be left empty to\r\n\t\t\t\t\t\t// trigger iframe's 'load' event.\r\n \t\t\t\t\t\tsrc =\r\n\t\t\t\t\t\t\tCKEDITOR.env.air ?\r\n\t\t\t\t\t\t\t\t'javascript:void(0)' :\r\n\t\t\t\t\t\t\tCKEDITOR.env.ie ?\r\n\t\t\t\t\t\t\t\t'javascript:void(function(){' + encodeURIComponent( src ) + '}())'\r\n\t\t\t\t\t\t\t:\r\n\t\t\t\t\t\t\t\t'';\r\n\r\n\t\t\t\t\t\tiframe = CKEDITOR.dom.element.createFromHtml( '<iframe' +\r\n \t\t\t\t\t\t\t' style=\"width:100%;height:100%\"' +\r\n \t\t\t\t\t\t\t' frameBorder=\"0\"' +\r\n \t\t\t\t\t\t\t' title=\"' + frameLabel + '\"' +\r\n \t\t\t\t\t\t\t' src=\"' + src + '\"' +\r\n\t\t\t\t\t\t\t' tabIndex=\"' + ( CKEDITOR.env.webkit? -1 : editor.tabIndex ) + '\"' +\r\n \t\t\t\t\t\t\t' allowTransparency=\"true\"' +\r\n \t\t\t\t\t\t\t'></iframe>' );\r\n\r\n\t\t\t\t\t\t// Running inside of Firefox chrome the load event doesn't bubble like in a normal page (#5689)\r\n\t\t\t\t\t\tif ( document.location.protocol == 'chrome:' )\r\n\t\t\t\t\t\t\tCKEDITOR.event.useCapture = true;\r\n\r\n\t\t\t\t\t\t// With FF, it's better to load the data on iframe.load. (#3894,#4058)\r\n\t\t\t\t\t\tiframe.on( 'load', function( ev )\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tframeLoaded = 1;\r\n\t\t\t\t\t\t\t\tev.removeListener();\r\n\r\n\t\t\t\t\t\t\t\tvar doc = iframe.getFrameDocument();\r\n\t\t\t\t\t\t\t\tdoc.write( data );\r\n\r\n\t\t\t\t\t\t\t\tCKEDITOR.env.air && contentDomReady( doc.getWindow().$ );\r\n\t\t\t\t\t\t\t});\r\n\r\n\t\t\t\t\t\t// Reset adjustment back to default (#5689)\r\n\t\t\t\t\t\tif ( document.location.protocol == 'chrome:' )\r\n\t\t\t\t\t\t\tCKEDITOR.event.useCapture = false;\r\n\r\n\t\t\t\t\t\tmainElement.append( iframe );\r\n\t\t\t\t\t}",
"function () {\n\n var iframe, document = Aria.$window.document;\n\n iframe = document.createElement('iframe');\n\n iframe.setAttribute('id', this.IFRAME_ID);\n iframe.style.display = 'none';\n\n document.body.appendChild(iframe);\n\n this._iframe = iframe;\n this.polledIframeHashString = this._currentHashString;\n\n this._addIframeHistoryEntry(this._currentHashString);\n }",
"function (formId) {\n var me = this,\n form = formId ? document.getElementById(formId) :\n domUtils.findParent(me.iframe.parentNode, function (node) {\n return node.tagName == 'FORM'\n }, true);\n form && setValue(form, me);\n }"
] |
[
0.7014394402503967,
0.6944384574890137,
0.6943004131317139,
0.6940367817878723,
0.693276584148407,
0.6919499635696411,
0.6914646029472351,
0.6909341216087341,
0.6894080638885498,
0.68863844871521,
0.6862516403198242,
0.6855077743530273
] |
Get IP address for the docker host
|
def _get_ip():
"""Get IP address for the docker host
"""
cmd_netstat = ['netstat', '-nr']
p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)
cmd_grep = ['grep', '^0\.0\.0\.0']
p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)
cmd_awk = ['awk', '{ print $2 }']
p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)
galaxy_ip = p3.stdout.read()
log.debug('Host IP determined to be %s', galaxy_ip)
return galaxy_ip
|
[
"def ip(self):\r\n\r\n '''\r\n a method to retrieve the ip of system running docker\r\n\r\n :return: string with ip address of system\r\n '''\r\n\r\n if self.localhost.os.sysname == 'Windows' and float(self.localhost.os.release) < 10:\r\n sys_cmd = 'docker-machine ip %s' % self.vbox\r\n system_ip = self.command(sys_cmd).replace('\\n','')\r\n else:\r\n system_ip = self.localhost.ip\r\n\r\n return system_ip",
"def get_container_ip(container_id_or_name):\n \"\"\"Get a Docker container's IP address from its id or name.\"\"\"\n command = [\n 'docker',\n 'inspect',\n '-f',\n '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}',\n container_id_or_name,\n ]\n\n return run_command(command, capture='out', check=True).stdout.strip()",
"def docker_routable_ip\n case @docker_url.scheme\n when 'tcp', 'http', 'https'\n docker_dns = @docker_url.host\n docker_port = @docker_url.port || 2376\n else\n # Cheap trick: for unix, file or other protocols, assume docker ports\n # are proxied to localhost in addition to other interfaces\n docker_dns = 'localhost'\n docker_port = 2376\n end\n\n addr = Addrinfo.getaddrinfo(\n docker_dns, docker_port,\n Socket::AF_INET, Socket::SOCK_STREAM).first\n\n addr && addr.ip_address\n end",
"def dockermachine_ip() -> Optional[str]:\n \"\"\"\n Gets IP address of the default docker machine\n Returns None if no docker-machine executable\n in the PATH and if there no Docker machine\n with name default present\n \"\"\"\n if not check_dockermachine():\n return None\n\n # noinspection PyBroadException\n try:\n out = subprocess.check_output(['docker-machine', 'ip'])\n return out.decode(\"utf-8\").strip()\n except Exception:\n logger.debug(f\"docker machine not present\")\n return None",
"def get_internal_registry_ip():\n \"\"\"\n Search for `docker-registry` IP\n :return: str, ip address\n \"\"\"\n with conu.backend.origin.backend.OpenshiftBackend() as origin_backend:\n services = origin_backend.list_services()\n for service in services:\n if service.name == 'docker-registry':\n logger.debug(\"Internal docker-registry IP: %s\",\n \"{ip}:{port}\".format(ip=service.get_ip(), port=INTERNAL_REGISTRY_PORT))\n return \"{ip}:{port}\".format(ip=service.get_ip(), port=INTERNAL_REGISTRY_PORT)\n return None",
"def ip_address(self,\n container: Container,\n raise_error: bool = False\n ) -> Optional[Union[IPv4Address, IPv6Address]]:\n \"\"\"\n The IP address used by a given container, or None if no IP address has\n been assigned to that container.\n \"\"\"\n # TODO: refactor!\n api_client = docker.APIClient(base_url='unix://var/run/docker.sock')\n docker_info = api_client.inspect_container(container.id)\n address = docker_info['NetworkSettings']['IPAddress']\n try:\n return IPv4Address(address)\n except ipaddress.AddressValueError:\n try:\n return IPv6Address(address)\n except ipaddress.AddressValueError:\n if raise_error:\n raise\n return None",
"def get_container_ip(self, container):\n \"\"\"\n Returns the internal ip of the container if available\n \"\"\"\n info = self.inspect_container(container)\n if not info:\n return None\n\n netInfo = info['NetworkSettings']\n if not netInfo:\n return None\n\n ip = netInfo['IPAddress']\n if not ip:\n return None\n\n return ip",
"def get_host_ip(self, env_with_dig='ingi/inginious-c-default'):\n \"\"\"\n Get the external IP of the host of the docker daemon. Uses OpenDNS internally.\n :param env_with_dig: any container image that has dig\n \"\"\"\n try:\n container = self._docker.containers.create(env_with_dig, command=\"dig +short myip.opendns.com @resolver1.opendns.com\")\n container.start()\n response = container.wait()\n assert response[\"StatusCode\"] == 0 if isinstance(response, dict) else response == 0\n answer = container.logs(stdout=True, stderr=False).decode('utf8').strip()\n container.remove(v=True, link=False, force=True)\n return answer\n except:\n return None",
"def ip_address(self,\n container: Container\n ) -> Union[IPv4Address, IPv6Address]:\n \"\"\"\n The IP address used by a given container, or None if no IP address has\n been assigned to that container.\n \"\"\"\n r = self.__api.get('containers/{}/ip'.format(container.uid))\n if r.status_code == 200:\n return r.json()\n self.__api.handle_erroneous_response(r)",
"def get_ip(host):\n '''\n Return the ip associated with the named host\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hosts.get_ip <hostname>\n '''\n hosts = _list_hosts()\n if not hosts:\n return ''\n # Look for the op\n for addr in hosts:\n if host in hosts[addr]:\n return addr\n # ip not found\n return ''",
"def get_host_ip(logHost):\r\n \"\"\" If it is not match your local ip, you should fill the PutLogsRequest\r\n parameter source by yourself.\r\n \"\"\"\r\n s = None\r\n try:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n s.connect((logHost, 80))\r\n ip = s.getsockname()[0]\r\n return ip\r\n except Exception:\r\n return '127.0.0.1'\r\n finally:\r\n if s:\r\n s.close()",
"def get_ip_address():\n \"\"\"Simple utility to get host IP address.\"\"\"\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip_address = s.getsockname()[0]\n except socket_error as sockerr:\n if sockerr.errno != errno.ENETUNREACH:\n raise sockerr\n ip_address = socket.gethostbyname(socket.getfqdn())\n finally:\n s.close()\n\n return ip_address"
] |
[
0.7984344363212585,
0.7968917489051819,
0.7957664728164673,
0.771469235420227,
0.7571123838424683,
0.7551725506782532,
0.7551681399345398,
0.7548179030418396,
0.7476183772087097,
0.7425757050514221,
0.7335226535797119,
0.7327448129653931
] |
Given access to the configuration dict that galaxy passed us, we try and connect to galaxy's API.
First we try connecting to galaxy directly, using an IP address given
us by docker (since the galaxy host is the default gateway for docker).
Using additional information collected by galaxy like the port it is
running on and the application path, we build a galaxy URL and test our
connection by attempting to get a history listing. This is done to
avoid any nasty network configuration that a SysAdmin has placed
between galaxy and us inside docker, like disabling API queries.
If that fails, we failover to using the URL the user is accessing
through. This will succeed where the previous connection fails under
the conditions of REMOTE_USER and galaxy running under uWSGI.
|
def get_galaxy_connection(history_id=None, obj=True):
"""
Given access to the configuration dict that galaxy passed us, we try and connect to galaxy's API.
First we try connecting to galaxy directly, using an IP address given
us by docker (since the galaxy host is the default gateway for docker).
Using additional information collected by galaxy like the port it is
running on and the application path, we build a galaxy URL and test our
connection by attempting to get a history listing. This is done to
avoid any nasty network configuration that a SysAdmin has placed
between galaxy and us inside docker, like disabling API queries.
If that fails, we failover to using the URL the user is accessing
through. This will succeed where the previous connection fails under
the conditions of REMOTE_USER and galaxy running under uWSGI.
"""
history_id = history_id or os.environ['HISTORY_ID']
key = os.environ['API_KEY']
### Customised/Raw galaxy_url ###
galaxy_ip = _get_ip()
# Substitute $DOCKER_HOST with real IP
url = Template(os.environ['GALAXY_URL']).safe_substitute({'DOCKER_HOST': galaxy_ip})
gi = _test_url(url, key, history_id, obj=obj)
if gi is not None:
return gi
### Failover, fully auto-detected URL ###
# Remove trailing slashes
app_path = os.environ['GALAXY_URL'].rstrip('/')
# Remove protocol+host:port if included
app_path = ''.join(app_path.split('/')[3:])
if 'GALAXY_WEB_PORT' not in os.environ:
# We've failed to detect a port in the config we were given by
# galaxy, so we won't be able to construct a valid URL
raise Exception("No port")
else:
# We should be able to find a port to connect to galaxy on via this
# conf var: galaxy_paster_port
galaxy_port = os.environ['GALAXY_WEB_PORT']
built_galaxy_url = 'http://%s:%s/%s' % (galaxy_ip.strip(), galaxy_port, app_path.strip())
url = built_galaxy_url.rstrip('/')
gi = _test_url(url, key, history_id, obj=obj)
if gi is not None:
return gi
### Fail ###
msg = "Could not connect to a galaxy instance. Please contact your SysAdmin for help with this error"
raise Exception(msg)
|
[
"def search_local_galaxies(galaxy):\n '''\n It is also possible to query the local galaxies by label, here is an example of querying for the local galaxy labeled IC 10\n\n http://star-api.herokuapp.com/api/v1/local_groups/IC 10\n '''\n\n base_url = \"http://star-api.herokuapp.com/api/v1/local_groups/\"\n\n if not isinstance(galaxy, str):\n raise ValueError(\"The galaxy arg you provided is not the type of str\")\n else:\n base_url += galaxy\n\n return dispatch_http_get(base_url)",
"def connect_with_retry(self) -> None:\n \"\"\" Attempt to connect to the Slack API. Retry on failures. \"\"\"\n if self.is_connected():\n log.debug('Already connected to the Slack API')\n return\n\n for retry in range(1, self.retries + 1):\n self.connect()\n if self.is_connected():\n log.debug('Connected to the Slack API')\n return\n else:\n interval = self.backoff(retry)\n log.debug(\"Waiting %.3fs before retrying\", interval)\n time.sleep(interval)\n\n raise FailedConnection('Failed to connect to the Slack API')",
"def connect(self, url=c.LOCALHOST, port=None, timeout=c.INITIAL_TIMEOUT,\n debug=False):\n \"\"\"socket connect to an already running starcraft2 process\"\"\"\n if port != None: # force a selection to a new port\n if self._port!=None: # if previously allocated port, return it\n portpicker.return_port(self._port)\n self._port = port\n elif self._port==None: # no connection exists\n self._port = portpicker.pick_unused_port()\n self._url = url\n if \":\" in url and not url.startswith(\"[\"): # Support ipv6 addresses.\n url = \"[%s]\" % url\n for i in range(timeout):\n startTime = time.time()\n if debug:\n print(\"attempt #%d to websocket connect to %s:%s\"%(i, url, port))\n try:\n finalUrl = \"ws://%s:%s/sc2api\" %(url, self._port)\n ws = websocket.create_connection(finalUrl, timeout=timeout)\n #print(\"ws:\", ws)\n self._client = protocol.StarcraftProtocol(ws)\n #super(ClientController, self).__init__(client) # ensure RemoteController initializtion is performed\n #if self.ping(): print(\"init ping()\") # ensure the latest state is synced\n # ping returns:\n # game_version: \"4.1.2.60604\"\n # data_version: \"33D9FE28909573253B7FC352CE7AEA40\"\n # data_build: 60604\n # base_build: 60321\n return self\n except socket.error: pass # SC2 hasn't started listening yet.\n except websocket.WebSocketException as err:\n print(err, type(err))\n if \"Handshake Status 404\" in str(err):\n pass # SC2 is listening, but hasn't set up the /sc2api endpoint yet.\n else: raise\n except Exception as e:\n print(type(e), e)\n sleepTime = max(0, 1 - (time.time() - startTime)) # try to wait for up to 1 second total\n if sleepTime: time.sleep(sleepTime)\n raise websocket.WebSocketException(\"Could not connect to game at %s on port %s\"%(url, port))",
"def _connect(**kwargs):\n '''\n Initialise netscaler connection\n '''\n connargs = dict()\n\n # Shamelessy ripped from the mysql module\n def __connarg(name, key=None, default=None):\n '''\n Add key to connargs, only if name exists in our kwargs or as\n netscaler.<name> in __opts__ or __pillar__ Evaluate in said order - kwargs,\n opts then pillar. To avoid collision with other functions, kwargs-based\n connection arguments are prefixed with 'netscaler_' (i.e.\n 'netscaler_host', 'netscaler_user', etc.).\n '''\n if key is None:\n key = name\n if name in kwargs:\n connargs[key] = kwargs[name]\n else:\n prefix = 'netscaler_'\n if name.startswith(prefix):\n try:\n name = name[len(prefix):]\n except IndexError:\n return\n val = __salt__['config.option']('netscaler.{0}'.format(name), None)\n if val is not None:\n connargs[key] = val\n elif default is not None:\n connargs[key] = default\n\n __connarg('netscaler_host', 'host')\n __connarg('netscaler_user', 'user')\n __connarg('netscaler_pass', 'pass')\n __connarg('netscaler_useSSL', 'useSSL', True)\n\n nitro = NSNitro(connargs['host'], connargs['user'], connargs['pass'], connargs['useSSL'])\n try:\n nitro.login()\n except NSNitroError as error:\n log.debug('netscaler module error - NSNitro.login() failed: %s', error)\n return None\n return nitro",
"def connect(host, port=None, **kwargs):\n '''\n Test connectivity to a host using a particular\n port from the minion.\n\n .. versionadded:: 2016.3.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' network.connect archlinux.org 80\n\n salt '*' network.connect archlinux.org 80 timeout=3\n\n salt '*' network.connect archlinux.org 80 timeout=3 family=ipv4\n\n salt '*' network.connect google-public-dns-a.google.com port=53 proto=udp timeout=3\n '''\n\n ret = {'result': None,\n 'comment': ''}\n\n if not host:\n ret['result'] = False\n ret['comment'] = 'Required argument, host, is missing.'\n return ret\n\n if not port:\n ret['result'] = False\n ret['comment'] = 'Required argument, port, is missing.'\n return ret\n\n proto = kwargs.get('proto', 'tcp')\n timeout = kwargs.get('timeout', 5)\n family = kwargs.get('family', None)\n\n if salt.utils.validate.net.ipv4_addr(host) or salt.utils.validate.net.ipv6_addr(host):\n address = host\n else:\n address = '{0}'.format(salt.utils.network.sanitize_host(host))\n\n try:\n if proto == 'udp':\n __proto = socket.SOL_UDP\n else:\n __proto = socket.SOL_TCP\n proto = 'tcp'\n\n if family:\n if family == 'ipv4':\n __family = socket.AF_INET\n elif family == 'ipv6':\n __family = socket.AF_INET6\n else:\n __family = 0\n else:\n __family = 0\n\n (family,\n socktype,\n _proto,\n garbage,\n _address) = socket.getaddrinfo(address, port, __family, 0, __proto)[0]\n\n skt = socket.socket(family, socktype, _proto)\n skt.settimeout(timeout)\n\n if proto == 'udp':\n # Generate a random string of a\n # decent size to test UDP connection\n md5h = hashlib.md5()\n md5h.update(datetime.datetime.now().strftime('%s'))\n msg = md5h.hexdigest()\n skt.sendto(msg, _address)\n recv, svr = skt.recvfrom(255)\n skt.close()\n else:\n skt.connect(_address)\n skt.shutdown(2)\n except Exception as exc:\n ret['result'] = False\n ret['comment'] = 'Unable to connect to {0} ({1}) on {2} port {3}'\\\n .format(host, _address[0], proto, port)\n return ret\n\n ret['result'] = True\n ret['comment'] = 'Successfully connected to {0} ({1}) on {2} port {3}'\\\n .format(host, _address[0], proto, port)\n return ret",
"def _try_options(options, exceptions,\n jid, metadata, negotiation_timeout, loop, logger):\n \"\"\"\n Helper function for :func:`connect_xmlstream`.\n \"\"\"\n for host, port, conn in options:\n logger.debug(\n \"domain %s: trying to connect to %r:%s using %r\",\n jid.domain, host, port, conn\n )\n try:\n transport, xmlstream, features = yield from conn.connect(\n loop,\n metadata,\n jid.domain,\n host,\n port,\n negotiation_timeout,\n base_logger=logger,\n )\n except OSError as exc:\n logger.warning(\n \"connection failed: %s\", exc\n )\n exceptions.append(exc)\n continue\n\n logger.debug(\n \"domain %s: connection succeeded using %r\",\n jid.domain,\n conn,\n )\n\n if not metadata.sasl_providers:\n return transport, xmlstream, features\n\n try:\n features = yield from security_layer.negotiate_sasl(\n transport,\n xmlstream,\n metadata.sasl_providers,\n negotiation_timeout=None,\n jid=jid,\n features=features,\n )\n except errors.SASLUnavailable as exc:\n protocol.send_stream_error_and_close(\n xmlstream,\n condition=errors.StreamErrorCondition.POLICY_VIOLATION,\n text=str(exc),\n )\n exceptions.append(exc)\n continue\n except Exception as exc:\n protocol.send_stream_error_and_close(\n xmlstream,\n condition=errors.StreamErrorCondition.UNDEFINED_CONDITION,\n text=str(exc),\n )\n raise\n\n return transport, xmlstream, features\n\n return None",
"def __connect(hostname, timeout=20, username=None, password=None):\n '''\n Connect to the DRAC\n '''\n drac_cred = __opts__.get('drac')\n err_msg = 'No drac login credentials found. Please add the \\'username\\' and \\'password\\' ' \\\n 'fields beneath a \\'drac\\' key in the master configuration file. Or you can ' \\\n 'pass in a username and password as kwargs at the CLI.'\n\n if not username:\n if drac_cred is None:\n log.error(err_msg)\n return False\n username = drac_cred.get('username', None)\n if not password:\n if drac_cred is None:\n log.error(err_msg)\n return False\n password = drac_cred.get('password', None)\n\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n try:\n client.connect(hostname, username=username, password=password, timeout=timeout)\n except Exception as e:\n log.error('Unable to connect to %s: %s', hostname, e)\n return False\n\n return client",
"def _setup_conn_old(**kwargs):\n '''\n Setup kubernetes API connection singleton the old way\n '''\n host = __salt__['config.option']('kubernetes.api_url',\n 'http://localhost:8080')\n username = __salt__['config.option']('kubernetes.user')\n password = __salt__['config.option']('kubernetes.password')\n ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data')\n client_cert = __salt__['config.option']('kubernetes.client-certificate-data')\n client_key = __salt__['config.option']('kubernetes.client-key-data')\n ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file')\n client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file')\n client_key_file = __salt__['config.option']('kubernetes.client-key-file')\n\n # Override default API settings when settings are provided\n if 'api_url' in kwargs:\n host = kwargs.get('api_url')\n\n if 'api_user' in kwargs:\n username = kwargs.get('api_user')\n\n if 'api_password' in kwargs:\n password = kwargs.get('api_password')\n\n if 'api_certificate_authority_file' in kwargs:\n ca_cert_file = kwargs.get('api_certificate_authority_file')\n\n if 'api_client_certificate_file' in kwargs:\n client_cert_file = kwargs.get('api_client_certificate_file')\n\n if 'api_client_key_file' in kwargs:\n client_key_file = kwargs.get('api_client_key_file')\n\n if (\n kubernetes.client.configuration.host != host or\n kubernetes.client.configuration.user != username or\n kubernetes.client.configuration.password != password):\n # Recreates API connection if settings are changed\n kubernetes.client.configuration.__init__()\n\n kubernetes.client.configuration.host = host\n kubernetes.client.configuration.user = username\n kubernetes.client.configuration.passwd = password\n\n if ca_cert_file:\n kubernetes.client.configuration.ssl_ca_cert = ca_cert_file\n elif ca_cert:\n with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca:\n ca.write(base64.b64decode(ca_cert))\n kubernetes.client.configuration.ssl_ca_cert = ca.name\n else:\n kubernetes.client.configuration.ssl_ca_cert = None\n\n if client_cert_file:\n kubernetes.client.configuration.cert_file = client_cert_file\n elif client_cert:\n with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c:\n c.write(base64.b64decode(client_cert))\n kubernetes.client.configuration.cert_file = c.name\n else:\n kubernetes.client.configuration.cert_file = None\n\n if client_key_file:\n kubernetes.client.configuration.key_file = client_key_file\n elif client_key:\n with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k:\n k.write(base64.b64decode(client_key))\n kubernetes.client.configuration.key_file = k.name\n else:\n kubernetes.client.configuration.key_file = None\n return {}",
"def connectToDeviceOrExit(timeout=60, verbose=False, ignoresecuredevice=False, ignoreversioncheck=False, serialno=None):\n '''\n Connects to a device which serial number is obtained from the script arguments if available\n or using the default regex C{.*}.\n\n If the connection is not successful the script exits.\n\n History\n -------\n In MonkeyRunner times, this method was a way of overcoming one of its limitations.\n L{MonkeyRunner.waitForConnection()} returns a L{MonkeyDevice} even if the connection failed.\n Then, to detect this situation, C{device.wake()} is attempted and if it fails then it is\n assumed the previous connection failed.\n\n @type timeout: int\n @param timeout: timeout for the connection\n @type verbose: bool\n @param verbose: Verbose output\n @type ignoresecuredevice: bool\n @param ignoresecuredevice: Ignores the check for a secure device\n @type ignoreversioncheck: bool\n @param ignoreversioncheck: Ignores the check for a supported ADB version\n @type serialno: str\n @param serialno: The device or emulator serial number\n\n @return: the device and serialno used for the connection\n '''\n\n progname = os.path.basename(sys.argv[0])\n if serialno is None:\n # eat all the extra options the invoking script may have added\n args = sys.argv\n while len(args) > 1 and args[1][0] == '-':\n args.pop(1)\n serialno = args[1] if len(args) > 1 else \\\n os.environ['ANDROID_SERIAL'] if os.environ.has_key('ANDROID_SERIAL') \\\n else '.*'\n if IP_RE.match(serialno):\n # If matches an IP address format and port was not specified add the default\n serialno += ':%d' % ADB_DEFAULT_PORT\n if verbose:\n print >> sys.stderr, 'Connecting to a device with serialno=%s with a timeout of %d secs...' % \\\n (serialno, timeout)\n ViewClient.setAlarm(timeout+5)\n # NOTE: timeout is used for 2 different timeouts, the one to set the alarm to timeout the connection with\n # adb and the timeout used by adb (once connected) for the sockets\n device = adbclient.AdbClient(serialno, ignoreversioncheck=ignoreversioncheck, timeout=timeout)\n ViewClient.setAlarm(0)\n if verbose:\n print >> sys.stderr, 'Connected to device with serialno=%s' % serialno\n secure = device.getSystemProperty('ro.secure')\n debuggable = device.getSystemProperty('ro.debuggable')\n versionProperty = device.getProperty(VERSION_SDK_PROPERTY)\n if versionProperty:\n version = int(versionProperty)\n else:\n if verbose:\n print \"Couldn't obtain device SDK version\"\n version = -1\n\n # we are going to use UiAutomator for versions >= 16 that's why we ignore if the device\n # is secure if this is true\n if secure == '1' and debuggable == '0' and not ignoresecuredevice and version < 16:\n print >> sys.stderr, \"%s: ERROR: Device is secure, AndroidViewClient won't work.\" % progname\n if verbose:\n print >> sys.stderr, \" secure=%s debuggable=%s version=%d ignoresecuredevice=%s\" % \\\n (secure, debuggable, version, ignoresecuredevice)\n sys.exit(2)\n if device.serialno:\n # If we know the serialno because it was set by AdbClient, use it\n serialno = device.serialno\n\n ipPortRE = re.compile(IP_DOMAIN_NAME_PORT_REGEX, re.IGNORECASE)\n\n if re.search(\"[.*()+]\", serialno) and not ipPortRE.match(serialno):\n # if a regex was used we have to determine the serialno used\n serialno = ViewClient.__obtainDeviceSerialNumber(device)\n if verbose:\n print >> sys.stderr, 'Actual device serialno=%s' % serialno\n return device, serialno",
"def connect(creds, max_retries=100):\n \"\"\"Construct a connection value to Google Storage API\n\n The credentials are retrieved using get_credentials that checks\n the environment for the correct values.\n\n \"\"\"\n credentials, project = google.auth.default()\n return RetryClient(max_retries=max_retries, project=project,\n credentials=credentials)",
"def connect(self):\n '''connect\n\n High-level api: opens the NetConf connection and exchanges\n capabilities. Since topology YAML file is parsed by BaseConnection,\n the following parameters can be specified in your YAML file.\n\n Parameters\n ----------\n\n host : `string`\n Hostname or IP address to connect to.\n port : `int`, optional\n By default port is 830, but some devices use the default SSH port\n of 22 so this may need to be specified.\n timeout : `int`, optional\n An optional keyed argument to set timeout value in seconds. By\n default this value is 30 seconds.\n username : `string`\n The username to use for SSH authentication.\n password : `string`\n The password used if using password authentication, or the\n passphrase to use for unlocking keys that require it.\n key_filename : `string`\n a filename where a the private key to be used can be found.\n allow_agent : `boolean`\n Enables querying SSH agent (if found) for keys. The default value\n is True.\n hostkey_verify : `boolean`\n Enables hostkey verification from ~/.ssh/known_hosts. The default\n value is False.\n look_for_keys : `boolean`\n Enables looking in the usual locations for ssh keys\n (e.g. ~/.ssh/id_*). The default value is True.\n ssh_config : `string`\n Enables parsing of an OpenSSH configuration file, if set to its\n path, e.g. ~/.ssh/config or to True. If the value is True,\n ncclient uses ~/.ssh/config. The default value is None.\n\n Raises\n ------\n\n Exception\n If the YAML file does not have correct connections section, or\n establishing transport to ip:port is failed, ssh authentication is\n failed, or other transport failures.\n\n Note\n ----\n\n There is no return from this method. If something goes wrong, an\n exception will be raised.\n\n\n YAML Example::\n\n devices:\n asr22:\n type: 'ASR'\n tacacs:\n login_prompt: \"login:\"\n password_prompt: \"Password:\"\n username: \"admin\"\n passwords:\n tacacs: admin\n enable: admin\n line: admin\n connections:\n a:\n protocol: telnet\n ip: \"1.2.3.4\"\n port: 2004\n vty:\n protocol : telnet\n ip : \"2.3.4.5\"\n netconf:\n class: yang.connector.Netconf\n ip : \"2.3.4.5\"\n port: 830\n username: admin\n password: admin\n\n Code Example::\n\n >>> from pyats.topology import loader\n >>> testbed = loader.load('/users/xxx/xxx/asr22.yaml')\n >>> device = testbed.devices['asr22']\n >>> device.connect(alias='nc', via='netconf')\n >>>\n\n Expected Results::\n\n >>> device.nc.connected\n True\n >>> for iter in device.nc.server_capabilities:\n ... print(iter)\n ...\n urn:ietf:params:xml:ns:yang:smiv2:RFC-1215?module=RFC-1215\n urn:ietf:params:xml:ns:yang:smiv2:SNMPv2-TC?module=SNMPv2-TC\n ...\n >>>\n '''\n\n if self.connected:\n return\n\n logger.debug(self.session)\n if not self.session.is_alive():\n self._session = transport.SSHSession(self._device_handler)\n\n # default values\n defaults = {\n 'host': None,\n 'port': 830,\n 'timeout': 30,\n 'username': None,\n 'password': None,\n 'key_filename': None,\n 'allow_agent': False,\n 'hostkey_verify': False,\n 'look_for_keys': False,\n 'ssh_config': None,\n }\n defaults.update(self.connection_info)\n\n # remove items\n disregards = ['class', 'model', 'protocol', 'async_mode', 'raise_mode']\n defaults = {k: v for k, v in defaults.items() if k not in disregards}\n\n # rename ip -> host, cast to str type\n if 'ip' in defaults:\n defaults['host'] = str(defaults.pop('ip'))\n\n # rename user -> username\n if 'user' in defaults:\n defaults['username'] = str(defaults.pop('user'))\n\n defaults = {k: getattr(self, k, v) for k, v in defaults.items()}\n\n try:\n self.session.connect(**defaults)\n except Exception:\n if self.session.transport:\n self.session.close()\n raise\n\n @atexit.register\n def cleanup():\n if self.session.transport:\n self.session.close()",
"def api(server, command, *args, **kwargs):\n '''\n Call the Spacewalk xmlrpc api.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-run spacewalk.api spacewalk01.domain.com systemgroup.create MyGroup Description\n salt-run spacewalk.api spacewalk01.domain.com systemgroup.create arguments='[\"MyGroup\", \"Description\"]'\n\n State Example:\n\n .. code-block:: yaml\n\n create_group:\n salt.runner:\n - name: spacewalk.api\n - server: spacewalk01.domain.com\n - command: systemgroup.create\n - arguments:\n - MyGroup\n - Description\n '''\n if 'arguments' in kwargs:\n arguments = kwargs['arguments']\n else:\n arguments = args\n\n call = '{0} {1}'.format(command, arguments)\n try:\n client, key = _get_session(server)\n except Exception as exc:\n err_msg = 'Exception raised when connecting to spacewalk server ({0}): {1}'.format(server, exc)\n log.error(err_msg)\n return {call: err_msg}\n\n namespace, method = command.split('.')\n endpoint = getattr(getattr(client, namespace), method)\n\n try:\n output = endpoint(key, *arguments)\n except Exception as e:\n output = 'API call failed: {0}'.format(e)\n\n return {call: output}"
] |
[
0.7073503732681274,
0.7046567797660828,
0.6999513506889343,
0.6967020630836487,
0.6959550976753235,
0.6951824426651001,
0.6902841925621033,
0.6831939220428467,
0.6800869703292847,
0.6785790324211121,
0.677520215511322,
0.677182674407959
] |
Given filename[s] of any file accessible to the docker instance, this
function will upload that file[s] to galaxy using the current history.
Does not return anything.
|
def put(filenames, file_type='auto', history_id=None):
"""
Given filename[s] of any file accessible to the docker instance, this
function will upload that file[s] to galaxy using the current history.
Does not return anything.
"""
history_id = history_id or os.environ['HISTORY_ID']
gi = get_galaxy_connection(history_id=history_id)
for filename in filenames:
log.debug('Uploading gx=%s history=%s localpath=%s ft=%s', gi, history_id, filename, file_type)
history = gi.histories.get(history_id)
history.upload_dataset(filename, file_type=file_type)
|
[
"def _galaxy_library_upload(finfo, sample_info, config):\n \"\"\"Upload results to galaxy library.\n \"\"\"\n folder_name = \"%s_%s\" % (config[\"fc_date\"], config[\"fc_name\"])\n storage_dir = utils.safe_makedir(os.path.join(config[\"dir\"], folder_name))\n if finfo.get(\"type\") == \"directory\":\n storage_file = None\n if finfo.get(\"ext\") == \"qc\":\n pdf_file = qcsummary.prep_pdf(finfo[\"path\"], config)\n if pdf_file:\n finfo[\"path\"] = pdf_file\n finfo[\"type\"] = \"pdf\"\n storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)\n else:\n storage_file = filesystem.copy_finfo(finfo, storage_dir, pass_uptodate=True)\n if \"galaxy_url\" in config and \"galaxy_api_key\" in config:\n galaxy_url = config[\"galaxy_url\"]\n if not galaxy_url.endswith(\"/\"):\n galaxy_url += \"/\"\n gi = GalaxyInstance(galaxy_url, config[\"galaxy_api_key\"])\n else:\n raise ValueError(\"Galaxy upload requires `galaxy_url` and `galaxy_api_key` in config\")\n if storage_file and sample_info and not finfo.get(\"index\", False) and not finfo.get(\"plus\", False):\n _to_datalibrary_safe(storage_file, gi, folder_name, sample_info, config)",
"def _file_to_folder(gi, fname, sample_info, libitems, library, folder):\n \"\"\"Check if file exists on Galaxy, if not upload to specified folder.\n \"\"\"\n full_name = os.path.join(folder[\"name\"], os.path.basename(fname))\n\n # Handle VCF: Galaxy reports VCF files without the gzip extension\n file_type = \"vcf_bgzip\" if full_name.endswith(\".vcf.gz\") else \"auto\"\n if full_name.endswith(\".vcf.gz\"):\n full_name = full_name.replace(\".vcf.gz\", \".vcf\")\n\n for item in libitems:\n if item[\"name\"] == full_name:\n return item\n logger.info(\"Uploading to Galaxy library '%s': %s\" % (library.name, full_name))\n return gi.libraries.upload_from_galaxy_filesystem(str(library.id), fname, folder_id=str(folder[\"id\"]),\n link_data_only=\"link_to_files\",\n dbkey=sample_info[\"genome_build\"],\n file_type=file_type,\n roles=str(library.roles) if library.roles else None)",
"def update_file(finfo, sample_info, config):\n \"\"\"Update file in Galaxy data libraries.\n \"\"\"\n if GalaxyInstance is None:\n raise ImportError(\"Could not import bioblend.galaxy\")\n if \"dir\" not in config:\n raise ValueError(\"Galaxy upload requires `dir` parameter in config specifying the \"\n \"shared filesystem path to move files to.\")\n if \"outputs\" in config:\n _galaxy_tool_copy(finfo, config[\"outputs\"])\n else:\n _galaxy_library_upload(finfo, sample_info, config)",
"def upload(self, filename, number_of_hosts):\n \"\"\"Upload the given file to the specified number of hosts.\n\n :param filename: The filename of the file to upload.\n :type filename: str\n :param number_of_hosts: The number of hosts to connect to.\n :type number_of_hosts: int\n :returns: A list of dicts with 'host_name' and 'url' keys for all\n successful uploads or an empty list if all uploads failed.\n :rtype: list\n \"\"\"\n return self.multiupload(filename, self.random_hosts(number_of_hosts))",
"def upload(\n state, host, hostname, filename,\n remote_filename=None, use_remote_sudo=False,\n ssh_keyscan=False, ssh_user=None,\n):\n '''\n Upload files to other servers using ``scp``.\n\n + hostname: hostname to upload to\n + filename: file to upload\n + remote_filename: where to upload the file to (defaults to ``filename``)\n + use_remote_sudo: upload to a temporary location and move using sudo\n + ssh_keyscan: execute ``ssh.keyscan`` before uploading the file\n + ssh_user: connect with this user\n '''\n\n remote_filename = remote_filename or filename\n\n # Figure out where we're connecting (host or user@host)\n connection_target = hostname\n if ssh_user:\n connection_target = '@'.join((ssh_user, hostname))\n\n if ssh_keyscan:\n yield keyscan(state, host, hostname)\n\n # If we're not using sudo on the remote side, just scp the file over\n if not use_remote_sudo:\n yield 'scp {0} {1}:{2}'.format(filename, connection_target, remote_filename)\n\n else:\n # Otherwise - we need a temporary location for the file\n temp_remote_filename = state.get_temp_filename()\n\n # scp it to the temporary location\n upload_cmd = 'scp {0} {1}:{2}'.format(\n filename, connection_target, temp_remote_filename,\n )\n\n yield upload_cmd\n\n # And sudo sudo to move it\n yield command(state, host, connection_target, 'sudo mv {0} {1}'.format(\n temp_remote_filename, remote_filename,\n ))",
"def upload_file(\n self,\n filename,\n upload_as=None,\n blocksize=None,\n callback=None,\n information_callback=None,\n allow_timeout=False,\n ):\n \"\"\"Uploads a file with given filename to this room.\n You may specify upload_as to change the name it is uploaded as.\n You can also specify a blocksize and a callback if you wish.\n Returns the file's id on success and None on failure.\"\"\"\n\n with delayed_close(\n filename if hasattr(filename, \"read\") else open(filename, \"rb\")\n ) as file:\n filename = upload_as or os.path.split(filename)[1]\n try:\n file.seek(0, 2)\n if file.tell() > self.config.max_file:\n raise ValueError(\n f\"File must be at most {self.config.max_file >> 30} GB\"\n )\n finally:\n try:\n file.seek(0)\n except Exception:\n pass\n\n files = Data(\n {\"file\": {\"name\": filename, \"value\": file}},\n blocksize=blocksize,\n callback=callback,\n )\n\n headers = {\"Origin\": BASE_URL}\n headers.update(files.headers)\n\n while True:\n key, server, file_id = self._generate_upload_key(\n allow_timeout=allow_timeout\n )\n info = dict(\n key=key,\n server=server,\n file_id=file_id,\n room=self.room_id,\n filename=filename,\n len=files.len,\n resumecount=0,\n )\n if information_callback:\n if information_callback(info) is False:\n continue\n break\n\n params = {\"room\": self.room_id, \"key\": key, \"filename\": filename}\n\n if self.key:\n params[\"roomKey\"] = self.key\n if self.password:\n params[\"password\"] = self.password\n\n while True:\n try:\n post = self.conn.post(\n f\"https://{server}/upload\",\n params=params,\n data=files,\n headers=headers,\n )\n post.raise_for_status()\n break\n\n except requests.exceptions.ConnectionError as ex:\n if \"aborted\" not in repr(ex): # ye, that's nasty but \"compatible\"\n raise\n try:\n resume = self.conn.get(\n f\"https://{server}/rest/uploadStatus\",\n params={\"key\": key, \"c\": 1},\n ).text\n resume = from_json(resume)\n resume = resume[\"receivedBytes\"]\n if resume <= 0:\n raise ConnectionError(\"Cannot resume\")\n file.seek(resume)\n files = Data(\n {\"file\": {\"name\": filename, \"value\": file}},\n blocksize=blocksize,\n callback=callback,\n logical_offset=resume,\n )\n headers.update(files.headers)\n params[\"startAt\"] = resume\n info[\"resumecount\"] += 1\n if information_callback:\n information_callback(info)\n except requests.exceptions.ConnectionError as iex:\n # ye, that's nasty but \"compatible\"\n if \"aborted\" not in repr(iex):\n raise\n continue # another day, another try\n return file_id",
"async def _upload_to_module(hw, serialnum, fw_filename, loop):\n \"\"\"\n This method remains in the API currently because of its use of the robot\n singleton's copy of the api object & driver. This should move to the server\n lib project eventually and use its own driver object (preferably involving\n moving the drivers themselves to the serverlib)\n \"\"\"\n\n # ensure there is a reference to the port\n if not hw.is_connected():\n hw.connect()\n\n hw.discover_modules()\n hw_mods = hw.attached_modules.values()\n\n res = {}\n for module in hw_mods:\n if module.device_info.get('serial') == serialnum:\n log.info(\"Module with serial {} found\".format(serialnum))\n bootloader_port = await modules.enter_bootloader(module)\n if bootloader_port:\n module._port = bootloader_port\n # else assume old bootloader connection on existing module port\n log.info(\"Uploading file to port: {}\".format(\n module.port))\n log.info(\"Flashing firmware. This will take a few seconds\")\n try:\n res = await asyncio.wait_for(\n modules.update_firmware(\n module, fw_filename, loop),\n UPDATE_TIMEOUT)\n except asyncio.TimeoutError:\n return {'message': 'AVRDUDE not responding'}\n break\n if not res:\n res = {'message': 'Module {} not found'.format(serialnum)}\n return res",
"function(fileName) {\n let file = assets[fileName] || {};\n fileName = basePath + \"/\" + fileName.replace(/\\\\/g, '/');\n let key = path.posix.join(uploadPath, fileName);\n\n return new Promise((resolve, reject) => {\n let begin = Date.now();\n cos.putObject(\n {\n Bucket: bucket,\n Region: region,\n Key: key,\n Body: fs.createReadStream(file.existsAt),\n ContentLength: fs.statSync(file.existsAt).size\n },\n function(err, body) {\n uploadedFiles++;\n spinner.text = tip(uploadedFiles, totalFiles);\n\n if (err) return reject(err);\n body.duration = Date.now() - begin;\n resolve(body);\n }\n );\n });\n }",
"def upload_gallery_file(self, folder_name, file_name, data=None, input_file_path=None,\n prevent_share=False, content_type=\"image/png\", scope='content/write'):\n \"\"\"\n Upload a file to a folder in the Mxit user's gallery\n User authentication required with the following scope: 'content/write'\n \"\"\"\n if input_file_path:\n with open(input_file_path, 'rb') as f:\n data = f.read()\n\n if not data:\n raise ValueError('Either the data of a file or the path to a file must be provided')\n\n params = {\n 'fileName': file_name,\n 'preventShare': 'true' if prevent_share else 'false',\n }\n\n return _post(\n token=self.oauth.get_user_token(scope),\n uri='/user/media/file/' + urllib.quote(folder_name) + '?' + urllib.urlencode(params),\n data=data,\n content_type=content_type,\n )",
"def _to_datalibrary(fname, gi, folder_name, sample_info, config):\n \"\"\"Upload a file to a Galaxy data library in a project specific folder.\n \"\"\"\n library = _get_library(gi, sample_info, config)\n libitems = gi.libraries.show_library(library.id, contents=True)\n folder = _get_folder(gi, folder_name, library, libitems)\n _file_to_folder(gi, fname, sample_info, libitems, library, folder)",
"def upload_file(self, filepath, overwrite=True):\n \"\"\"Uploads a file to the temporary sauce storage.\"\"\"\n method = 'POST'\n filename = os.path.split(filepath)[1]\n endpoint = '/rest/v1/storage/{}/{}?overwrite={}'.format(\n self.client.sauce_username, filename, \"true\" if overwrite else \"false\")\n with open(filepath, 'rb') as filehandle:\n body = filehandle.read()\n return self.client.request(method, endpoint, body,\n content_type='application/octet-stream')",
"def uploadFile(self, filename, ispickle=False, athome=False):\n \"\"\"\n Uploads a single file to Redunda.\n\n :param str filename: The name of the file to upload\n :param bool ispickle: Optional variable to be set to True is the file is a pickle; default is False.\n :returns: returns nothing\n \"\"\"\n print(\"Uploading file {} to Redunda.\".format(filename))\n\n _, tail = os.path.split(filename)\n \n url = \"https://redunda.sobotics.org/bots/data/{}?key={}\".format(tail, self.key)\n \n #Set the content type to 'application/octet-stream'\n header = {\"Content-type\": \"application/octet-stream\"}\n \n filedata = \"\"\n\n if athome:\n filename = str(os.path.expanduser(\"~\")) + filename\n\n #Read the data from a file to a string.\n if filename.endswith(\".pickle\") or ispickle:\n try:\n with open(filename, \"rb\") as fileToRead:\n data = pickle.load(fileToRead)\n except pickle.PickleError as perr:\n print(\"Pickling error occurred: {}\".format(perr))\n return\n filedata = json.dumps(data)\n else:\n try:\n with open(filename, \"r\") as fileToRead:\n filedata = fileToRead.read()\n except IOError as ioerr:\n print(\"IOError occurred: {}\".format(ioerr))\n return\n\n requestToMake = request.Request(url, data=filedata.encode(\"utf-8\"), headers=header)\n\n #Make the request.\n response = request.urlopen(requestToMake)\n\n if response.code >= 400:\n print(\"Error occurred while uploading file '{}' with error code {}.\".format(filename,response.code))"
] |
[
0.752392053604126,
0.7501546740531921,
0.7467193007469177,
0.7044772505760193,
0.6977962851524353,
0.6887232065200806,
0.6879751086235046,
0.6821802854537964,
0.6746819615364075,
0.6736552715301514,
0.6727142333984375,
0.6706317067146301
] |
Given the history_id that is displayed to the user, this function will
download the file[s] from the history and stores them under /import/
Return value[s] are the path[s] to the dataset[s] stored under /import/
|
def get(datasets_identifiers, identifier_type='hid', history_id=None):
"""
Given the history_id that is displayed to the user, this function will
download the file[s] from the history and stores them under /import/
Return value[s] are the path[s] to the dataset[s] stored under /import/
"""
history_id = history_id or os.environ['HISTORY_ID']
# The object version of bioblend is to slow in retrieving all datasets from a history
# fallback to the non-object path
gi = get_galaxy_connection(history_id=history_id, obj=False)
for dataset_identifier in datasets_identifiers:
file_path = '/import/%s' % dataset_identifier
log.debug('Downloading gx=%s history=%s dataset=%s', gi, history_id, dataset_identifier)
# Cache the file requests. E.g. in the example of someone doing something
# silly like a get() for a Galaxy file in a for-loop, wouldn't want to
# re-download every time and add that overhead.
if not os.path.exists(file_path):
hc = HistoryClient(gi)
dc = DatasetClient(gi)
history = hc.show_history(history_id, contents=True)
datasets = {ds[identifier_type]: ds['id'] for ds in history}
if identifier_type == 'hid':
dataset_identifier = int(dataset_identifier)
dc.download_dataset(datasets[dataset_identifier], file_path=file_path, use_default_filename=False)
else:
log.debug('Cached, not re-downloading')
return file_path
|
[
"def get_by_historics_id(historics_id, page = 1, per_page = 20, order_by = :created_at, order_dir = :desc, include_finished = 0, all = false)\n params = {\n :historics_id => historics_id,\n :page => page,\n :per_page => per_page,\n :order_by => order_by,\n :order_dir => order_dir,\n :include_finished => include_finished,\n :all => all\n }\n DataSift.request(:GET, 'push/get', @config, params)\n end",
"def dataset_download_file(self,\n dataset,\n file_name,\n path=None,\n force=False,\n quiet=True):\n \"\"\" download a single file for a dataset\n\n Parameters\n ==========\n dataset: the string identified of the dataset\n should be in format [owner]/[dataset-name]\n file_name: the dataset configuration file\n path: if defined, download to this location\n force: force the download if the file already exists (default False)\n quiet: suppress verbose output (default is True)\n \"\"\"\n if '/' in dataset:\n self.validate_dataset_string(dataset)\n dataset_urls = dataset.split('/')\n owner_slug = dataset_urls[0]\n dataset_slug = dataset_urls[1]\n else:\n owner_slug = self.get_config_value(self.CONFIG_NAME_USER)\n dataset_slug = dataset\n\n if path is None:\n effective_path = self.get_default_download_dir(\n 'datasets', owner_slug, dataset_slug)\n else:\n effective_path = path\n\n response = self.process_response(\n self.datasets_download_file_with_http_info(\n owner_slug=owner_slug,\n dataset_slug=dataset_slug,\n file_name=file_name,\n _preload_content=False))\n url = response.retries.history[0].redirect_location.split('?')[0]\n outfile = os.path.join(effective_path, url.split('/')[-1])\n if force or self.download_needed(response, outfile, quiet):\n self.download_file(response, outfile, quiet)\n return True\n else:\n return False",
"def get_full_history(self, force=None, last_update=None, flush=False):\n '''\n Fields change depending on when you run activity_import,\n such as \"last_updated\" type fields which don't have activity\n being tracked, which means we'll always end up with different\n hash values, so we need to always remove all existing object\n states and import fresh\n '''\n return self._run_object_import(force=force, last_update=last_update,\n flush=flush, full_history=True)",
"def _import(self, record_key, record_data, overwrite=True, last_modified=0.0, **kwargs):\n \n '''\n a helper method for other storage clients to import into appdata\n \n :param record_key: string with key for record\n :param record_data: byte data for body of record\n :param overwrite: [optional] boolean to overwrite existing records\n :param last_modified: [optional] float to record last modified date\n :param kwargs: [optional] keyword arguments from other import methods \n :return: boolean indicating whether record was imported\n '''\n \n title = '%s._import' % self.__class__.__name__\n \n # verify permissions\n if not self.permissions_write:\n raise Exception('%s requires an access_token with write permissions.' % title)\n \n # retrieve file id\n file_id, parent_id = self._get_id(record_key)\n \n # check overwrite condition\n if file_id:\n if overwrite:\n try:\n self.drive.delete(fileId=file_id).execute()\n except:\n raise DriveConnectionError(title)\n else:\n return False\n \n # # check size of file\n # import sys\n # record_optimal = self.fields.metadata['record_optimal_bytes']\n # record_size = sys.getsizeof(record_data)\n # error_prefix = '%s(record_key=\"%s\", record_data=b\"...\")' % (title, record_key)\n # if record_size > record_optimal:\n # print('[WARNING] %s exceeds optimal record data size of %s bytes.' % (error_prefix, record_optimal))\n \n # prepare file body\n from googleapiclient.http import MediaInMemoryUpload\n media_body = MediaInMemoryUpload(body=record_data, resumable=True)\n \n # determine path segments\n path_segments = record_key.split(os.sep)\n \n # construct upload kwargs\n create_kwargs = {\n 'body': {\n 'name': path_segments.pop()\n },\n 'media_body': media_body,\n 'fields': 'id'\n }\n \n # walk through parent directories\n parent_id = ''\n if path_segments:\n \n # construct query and creation arguments\n walk_folders = True\n folder_kwargs = {\n 'body': {\n 'name': '',\n 'mimeType' : 'application/vnd.google-apps.folder'\n },\n 'fields': 'id'\n }\n query_kwargs = {\n 'spaces': self.drive_space,\n 'fields': 'files(id, parents)'\n }\n while path_segments:\n folder_name = path_segments.pop(0)\n folder_kwargs['body']['name'] = folder_name\n \n # search for folder id in existing hierarchy\n if walk_folders:\n walk_query = \"name = '%s'\" % folder_name\n if parent_id:\n walk_query += \"and '%s' in parents\" % parent_id\n query_kwargs['q'] = walk_query\n try:\n response = self.drive.list(**query_kwargs).execute()\n except:\n raise DriveConnectionError(title)\n file_list = response.get('files', [])\n else:\n file_list = []\n if file_list:\n parent_id = file_list[0].get('id')\n \n # or create folder\n # https://developers.google.com/drive/v3/web/folder\n else:\n try:\n if not parent_id:\n if self.drive_space == 'appDataFolder':\n folder_kwargs['body']['parents'] = [ self.drive_space ]\n else:\n del folder_kwargs['body']['parents']\n else:\n folder_kwargs['body']['parents'] = [parent_id]\n response = self.drive.create(**folder_kwargs).execute()\n parent_id = response.get('id')\n walk_folders = False\n except:\n raise DriveConnectionError(title)\n \n # add parent id to file creation kwargs\n if parent_id:\n create_kwargs['body']['parents'] = [parent_id]\n elif self.drive_space == 'appDataFolder':\n create_kwargs['body']['parents'] = [self.drive_space] \n \n # modify file time\n import re\n if re.search('\\\\.drep$', create_kwargs['body']['name']):\n from labpack.records.time import labDT\n drep_time = labDT.fromEpoch(1).isoformat()\n create_kwargs['body']['modifiedTime'] = drep_time\n elif last_modified:\n from labpack.records.time import labDT\n mod_time = labDT.fromEpoch(last_modified).isoformat()\n create_kwargs['body']['modifiedTime'] = mod_time\n \n # send create request\n try:\n self.drive.create(**create_kwargs).execute()\n except:\n raise DriveConnectionError(title)\n \n return True",
"def load_history(self):\r\n \"\"\"Load history from a .py file in user home directory\"\"\"\r\n if osp.isfile(self.history_filename):\r\n rawhistory, _ = encoding.readlines(self.history_filename)\r\n rawhistory = [line.replace('\\n', '') for line in rawhistory]\r\n if rawhistory[1] != self.INITHISTORY[1]:\r\n rawhistory[1] = self.INITHISTORY[1]\r\n else:\r\n rawhistory = self.INITHISTORY\r\n history = [line for line in rawhistory \\\r\n if line and not line.startswith('#')]\r\n\r\n # Truncating history to X entries:\r\n while len(history) >= CONF.get('historylog', 'max_entries'):\r\n del history[0]\r\n while rawhistory[0].startswith('#'):\r\n del rawhistory[0]\r\n del rawhistory[0]\r\n\r\n # Saving truncated history:\r\n try:\r\n encoding.writelines(rawhistory, self.history_filename)\r\n except EnvironmentError:\r\n pass\r\n\r\n return history",
"def fast_sync_fetch(working_dir, import_url):\n \"\"\"\n Get the data for an import snapshot.\n Store it to a temporary path\n Return the path on success\n Return None on error\n \"\"\"\n try:\n fd, tmppath = tempfile.mkstemp(prefix='.blockstack-fast-sync-', dir=working_dir)\n except Exception, e:\n log.exception(e)\n return None\n \n log.debug(\"Fetch {} to {}...\".format(import_url, tmppath))\n\n try:\n path, headers = urllib.urlretrieve(import_url, tmppath)\n except Exception, e:\n os.close(fd)\n log.exception(e)\n return None\n \n os.close(fd)\n return tmppath",
"def get_clean_isd_history(dest=os.path.join(folder, 'isd-history-cleaned.tsv'),\n url=\"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.csv\"): # pragma: no cover\n '''Basic method to update the isd-history file from the NOAA. This is \n useful as new weather stations are updated all the time.\n \n This function requires pandas to run. If fluids is installed for the \n superuser, this method must be called in an instance of Python running\n as the superuser (administrator).\n \n Retrieving the file from ftp typically takes several seconds.\n Pandas reads the file in ~30 ms and writes it in ~220 ms. Reading it with \n the code below takes ~220 ms but is necessary to prevent a pandas \n dependency.\n \n Parameters\n ----------\n dest : str, optional\n The file to store the data retrieved; leave as the default argument\n for it to be accessible by fluids.\n url : str, optional\n The location of the data file; this can be anywhere that can be read\n by pandas, including a local file as would be useful in an offline\n situation.\n '''\n import pandas as pd\n df = pd.read_csv(url)\n df.to_csv(dest, sep='\\t', index=False, header=False)",
"def _grab_history(self):\n \"\"\"Calculate the needed history/changelog changes\n\n Every history heading looks like '1.0 b4 (1972-12-25)'. Extract them,\n check if the first one matches the version and whether it has a the\n current date.\n \"\"\"\n default_location = None\n config = self.setup_cfg.config\n if config and config.has_option('zest.releaser', 'history_file'):\n default_location = config.get('zest.releaser', 'history_file')\n history_file = self.vcs.history_file(location=default_location)\n if not history_file:\n logger.warn(\"No history file found\")\n self.data['history_lines'] = None\n self.data['history_file'] = None\n return\n logger.debug(\"Checking %s\", history_file)\n history_lines = open(history_file).read().split('\\n')\n # ^^^ TODO: .readlines()?\n headings = utils.extract_headings_from_history(history_lines)\n if not len(headings):\n logger.error(\"No detectable version heading in the history \"\n \"file %s\", history_file)\n sys.exit()\n good_heading = self.data['history_header'] % self.data\n # ^^^ history_header is a string with %(abc)s replacements.\n line = headings[0]['line']\n previous = history_lines[line]\n history_lines[line] = good_heading\n logger.debug(\"Set heading from %r to %r.\", previous, good_heading)\n history_lines[line + 1] = utils.fix_rst_heading(\n heading=good_heading,\n below=history_lines[line + 1])\n logger.debug(\"Set line below heading to %r\",\n history_lines[line + 1])\n self.data['history_lines'] = history_lines\n self.data['history_file'] = history_file",
"def load_data(path):\n \"\"\"\n loads the data that has been save with Script.save.\n Args:\n path: path to folder saved by Script.save or raw_data folder within\n Returns:\n a dictionary with the data of form\n data = {param_1_name: param_1_data, ...}\n \"\"\"\n\n\n # check that path exists\n if not os.path.exists(path):\n print(path)\n raise AttributeError('Path given does not exist!')\n\n # windows can't deal with long filenames (>260 chars) so we have to use the prefix '\\\\\\\\?\\\\'\n # if len(path.split('\\\\\\\\?\\\\')) == 1:\n # path = '\\\\\\\\?\\\\' + os.path.abspath(path)\n\n\n # if raw_data folder exists, get a list of directories from within it; otherwise, get names of all .csv files in\n # current directory\n data = {}\n # if self.RAW_DATA_DIR in os.listdir(path): #8/26/16 AK: self not defined in static context\n # data_files = os.listdir(os.path.join(path, self.RAW_DATA_DIR + '/'))\n # path = os.path.join(path, self.RAW_DATA_DIR + '/')\n #\n # else:\n if 'raw_data' in os.listdir(path): #temporarily hardcoded\n data_files = os.listdir(os.path.join(path, 'raw_data' + '/'))\n path = os.path.join(path, 'raw_data' + '/')\n\n else:\n data_files = glob.glob(os.path.join(path, '*.csv'))\n\n # If no data files were found, raise error\n if not data_files:\n raise AttributeError('Could not find data files in {:s}'.format(path))\n\n # import data from each csv\n for data_file in data_files:\n # get data name, read the data from the csv, and save it to dictionary\n data_name = data_file.split('-')[-1][0:-4] # JG: why do we strip of the date?\n imported_data_df = pd.read_csv(os.path.join(path, data_file))\n\n # check if there are real headers, if the headers are digits than we ignore them because then they are just indecies\n # real headers are strings (however, the digits are also of type str! that why we use the isdigit method)\n column_headers = list(imported_data_df.columns.values)\n if sum([int(x.isdigit()) for x in column_headers]) != len(column_headers):\n data[data_name] = {h: imported_data_df[h].as_matrix() for h in column_headers}\n else:\n # note, np.squeeze removes extraneous length-1 dimensions from the returned 'matrix' from the dataframe\n data[data_name] = np.squeeze(imported_data_df.as_matrix())\n\n return data",
"def load_wdhistory(self, workdir=None):\r\n \"\"\"Load history from a text file in user home directory\"\"\"\r\n if osp.isfile(self.LOG_PATH):\r\n wdhistory, _ = encoding.readlines(self.LOG_PATH)\r\n wdhistory = [name for name in wdhistory if os.path.isdir(name)]\r\n else:\r\n if workdir is None:\r\n workdir = get_home_dir()\r\n wdhistory = [ workdir ]\r\n return wdhistory",
"def get_user_history (history_id=None):\n \"\"\"\n Get all visible dataset infos of user history.\n Return a list of dict of each dataset.\n \"\"\" \n history_id = history_id or os.environ['HISTORY_ID']\n gi = get_galaxy_connection(history_id=history_id, obj=False)\n hc = HistoryClient(gi)\n history = hc.show_history(history_id, visible=True, contents=True)\n return history",
"def read_history\n return super if History.disabled?\n File.exist?(history_file_path) && history.empty? &&\n File.readlines(history_file_path).each{ |e| history << e.chomp }\n end"
] |
[
0.6840371489524841,
0.6734036803245544,
0.6674093008041382,
0.6614606380462646,
0.6590308547019958,
0.6583523750305176,
0.6583167910575867,
0.6522727608680725,
0.6482543349266052,
0.6470222473144531,
0.6463623642921448,
0.6455193758010864
] |
Get all visible dataset infos of user history.
Return a list of dict of each dataset.
|
def get_user_history (history_id=None):
"""
Get all visible dataset infos of user history.
Return a list of dict of each dataset.
"""
history_id = history_id or os.environ['HISTORY_ID']
gi = get_galaxy_connection(history_id=history_id, obj=False)
hc = HistoryClient(gi)
history = hc.show_history(history_id, visible=True, contents=True)
return history
|
[
"def list(self):\n \"\"\"Lists all datasets for a particular account.\n\n Returns\n -------\n request.Response\n The response contains a list of JSON objects describing datasets.\n \"\"\"\n \n uri = URITemplate(self.baseuri + '/{owner}').expand(\n owner=self.username)\n return self.session.get(uri)",
"def get_dataset_list(self):\n \"\"\" Returns the list of available datasets for the current user.\n\n :return: a pandas Dataframe\n \"\"\"\n url = self.address + \"/datasets\"\n header = self.__check_authentication()\n response = requests.get(url, headers=header)\n response = response.json()\n datasets = response.get(\"datasets\")\n res = pd.DataFrame.from_dict(datasets)\n return self.process_info_list(res, \"info\")",
"def datasets(self):\n \"\"\"\n Return all datasets\n\n :return:\n \"\"\"\n\n return self.session.query(Dataset).filter(Dataset.vid != ROOT_CONFIG_NAME_V).all()",
"def datasets(self, visible=True):\r\n \"\"\"\r\n Returns a list of the data sets that are assigned with this\r\n chart widget.\r\n \r\n :param visible | <bool>\r\n \r\n :return [<XChartDataSet>, ..]\r\n \"\"\"\r\n if visible is not None:\r\n return filter(lambda x: x.isVisible(), self._datasets)\r\n return self._datasets[:]",
"def get_datasets(dataset_ids,**kwargs):\n \"\"\"\n Get a single dataset, by ID\n \"\"\"\n\n user_id = int(kwargs.get('user_id'))\n datasets = []\n if len(dataset_ids) == 0:\n return []\n try:\n dataset_rs = db.DBSession.query(Dataset.id,\n Dataset.type,\n Dataset.unit_id,\n Dataset.name,\n Dataset.hidden,\n Dataset.cr_date,\n Dataset.created_by,\n DatasetOwner.user_id,\n null().label('metadata'),\n case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],\n else_=Dataset.value).label('value')).filter(\n Dataset.id.in_(dataset_ids)).outerjoin(DatasetOwner,\n and_(DatasetOwner.dataset_id==Dataset.id,\n DatasetOwner.user_id==user_id)).all()\n\n #convert the value row into a string as it is returned as a binary\n for dataset_row in dataset_rs:\n dataset_dict = dataset_row._asdict()\n\n if dataset_row.value is not None:\n dataset_dict['value'] = str(dataset_row.value)\n\n if dataset_row.hidden == 'N' or (dataset_row.hidden == 'Y' and dataset_row.user_id is not None):\n metadata = db.DBSession.query(Metadata).filter(Metadata.dataset_id == dataset_row.id).all()\n dataset_dict['metadata'] = metadata\n else:\n dataset_dict['metadata'] = []\n\n datasets.append(namedtuple('Dataset', dataset_dict.keys())(**dataset_dict))\n\n\n except NoResultFound:\n raise ResourceNotFoundError(\"Datasets not found.\")\n\n return datasets",
"def get_datasets(self):\n # type: () -> List[hdx.data.dataset.Dataset]\n \"\"\"Get any datasets in the showcase\n\n Returns:\n List[Dataset]: List of datasets\n \"\"\"\n assoc_result, datasets_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id',\n action=self.actions()['list_datasets'])\n datasets = list()\n if assoc_result:\n for dataset_dict in datasets_dicts:\n dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration)\n datasets.append(dataset)\n return datasets",
"def dataset_list(self,\n sort_by=None,\n size=None,\n file_type=None,\n license_name=None,\n tag_ids=None,\n search=None,\n user=None,\n mine=False,\n page=1):\n \"\"\" return a list of datasets!\n\n Parameters\n ==========\n sort_by: how to sort the result, see valid_sort_bys for options\n size: the size of the dataset, see valid_sizes for string options\n file_type: the format, see valid_file_types for string options\n license_name: string descriptor for license, see valid_license_names\n tag_ids: tag identifiers to filter the search\n search: a search term to use (default is empty string)\n user: username to filter the search to\n mine: boolean if True, group is changed to \"my\" to return personal\n page: the page to return (default is 1)\n \"\"\"\n valid_sort_bys = ['hottest', 'votes', 'updated', 'active', 'published']\n if sort_by and sort_by not in valid_sort_bys:\n raise ValueError('Invalid sort by specified. Valid options are ' +\n str(valid_sort_bys))\n\n valid_sizes = ['all', 'small', 'medium', 'large']\n if size and size not in valid_sizes:\n raise ValueError('Invalid size specified. Valid options are ' +\n str(valid_sizes))\n\n valid_file_types = ['all', 'csv', 'sqlite', 'json', 'bigQuery']\n if file_type and file_type not in valid_file_types:\n raise ValueError('Invalid file type specified. Valid options are '\n + str(valid_file_types))\n\n valid_license_names = ['all', 'cc', 'gpl', 'odb', 'other']\n if license_name and license_name not in valid_license_names:\n raise ValueError('Invalid license specified. Valid options are ' +\n str(valid_license_names))\n\n if int(page) <= 0:\n raise ValueError('Page number must be >= 1')\n\n group = 'public'\n if mine:\n group = 'my'\n if user:\n raise ValueError('Cannot specify both mine and a user')\n if user:\n group = 'user'\n\n datasets_list_result = self.process_response(\n self.datasets_list_with_http_info(\n group=group,\n sort_by=sort_by or 'hottest',\n size=size or 'all',\n filetype=file_type or 'all',\n license=license_name or 'all',\n tagids=tag_ids or '',\n search=search or '',\n user=user or '',\n page=page))\n return [Dataset(d) for d in datasets_list_result]",
"def get_datasets_info(self, token=None, url=API_GET_DATASETS_INFO):\n \"\"\" Gets information on all datasets for this account\n returns: requests object\n \"\"\"\n auth = 'Bearer ' + self.check_for_token(token)\n h = {'Authorization': auth, 'Cache-Control':'no-cache'}\n the_url = url\n r = requests.get(the_url, headers=h)\n\n return r",
"def datasets(self, limit=0, offset=0, order=None, **kwargs):\n '''\n Returns the list of datasets associated with a particular domain.\n WARNING: Large limits (>1000) will return megabytes of data,\n which can be slow on low-bandwidth networks, and is also a lot of\n data to hold in memory.\n\n This method performs a get request on these type of URLs:\n https://data.edmonton.ca/api/catalog/v1\n\n limit: max number of results to return, default is all (0)\n offset: the offset of result set\n order: field to sort on, optionally with ' ASC' or ' DESC' suffix\n ids: list of dataset IDs to consider\n domains: list of domains to search\n categories: list of categories\n tags: list of tags\n only: list of logical types to return, among `api`, `calendar`,\n `chart`, `datalens`, `dataset`, `federated_href`, `file`,\n `filter`, `form`, `href`, `link`, `map`, `measure`, `story`,\n `visualization`\n shared_to: list of users IDs or team IDs that datasets have to be\n shared with, or the string `site` meaning anyone on the domain.\n Note that you may only specify yourself or a team that you are\n on.\n Also note that if you search for assets shared to you, assets\n owned by you might be not be returned.\n column_names: list of column names that must be present in the\n tabular datasets\n q: text query that will be used by Elasticsearch to match results\n min_should_match: string specifying the number of words from `q`\n that should match. Refer to Elasticsearch docs for the format,\n the default is '3<60%', meaning that 60% of the terms must\n match, or all of them if there are 3 or fewer.\n attribution: string specifying the organization datasets must come\n from\n license: string used to filter on results having a specific license\n derived_from: string containing the ID of a dataset that must be a\n parent of the result datasets (for example, charts are derived\n from a parent dataset)\n provenance: string 'official' or 'community'\n for_user: string containing a user ID that must own the returned\n datasets\n visibility: string 'open' or 'internal'\n public: boolean indicating that all returned datasets should be\n public (True) or private (False)\n published: boolean indicating that returned datasets should have\n been published (True) or not yet published (False)\n approval_status: string 'pending', 'rejected', 'approved',\n 'not_ready' filtering results by their current status in the\n approval pipeline\n explicitly_hidden: boolean filtering out datasets that have been\n explicitly hidden on a domain (False) or returning only those\n (True)\n derived: boolean allowing to search only for derived datasets\n (True) or only those from which other datasets were derived\n (False)\n '''\n # Those filters can be passed multiple times; this function expects\n # an iterable for them\n filter_multiple = set(['ids', 'domains', 'categories', 'tags', 'only',\n 'shared_to', 'column_names'])\n # Those filters only get a single value\n filter_single = set([\n 'q', 'min_should_match', 'attribution', 'license', 'derived_from',\n 'provenance', 'for_user', 'visibility', 'public', 'published',\n 'approval_status', 'explicitly_hidden', 'derived'\n ])\n all_filters = filter_multiple.union(filter_single)\n for key in kwargs:\n if key not in all_filters:\n raise TypeError(\"Unexpected keyword argument %s\" % key)\n params = []\n if limit:\n params.append(('limit', limit))\n for key, value in kwargs.items():\n if key in filter_multiple:\n for item in value:\n params.append((key, item))\n elif key in filter_single:\n params.append((key, value))\n # TODO: custom domain-specific metadata\n # https://socratadiscovery.docs.apiary.io/\n # #reference/0/find-by-domain-specific-metadata\n\n if order:\n params.append(('order', order))\n\n results = self._perform_request(\"get\", DATASETS_PATH,\n params=params + [('offset', offset)])\n numResults = results['resultSetSize']\n # no more results to fetch, or limit reached\n if (limit >= numResults or limit == len(results['results']) or\n numResults == len(results['results'])):\n return results['results']\n\n if limit != 0:\n raise Exception(\"Unexpected number of results returned from endpoint.\\\n Expected {0}, got {1}.\".format(limit, len(results['results'])))\n\n # get all remaining results\n all_results = results['results']\n while len(all_results) != numResults:\n offset += len(results[\"results\"])\n results = self._perform_request(\"get\", DATASETS_PATH,\n params=params + [('offset', offset)])\n all_results.extend(results['results'])\n\n return all_results",
"def fetch_datasets(self, **kwargs):\n \"\"\"\n List datasets as owner\n Fetch datasets that the currently authenticated user has access to because he or she is the owner of the dataset.\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please define a `callback` function\n to be invoked when receiving the response.\n >>> def callback_function(response):\n >>> pprint(response)\n >>>\n >>> thread = api.fetch_datasets(callback=callback_function)\n\n :param callback function: The callback function\n for asynchronous request. (optional)\n :param str limit: Maximum number of items to include in a page of results.\n :param str next: Token from previous result page to be used when requesting a subsequent page.\n :return: PaginatedDatasetResults\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.fetch_datasets_with_http_info(**kwargs)\n else:\n (data) = self.fetch_datasets_with_http_info(**kwargs)\n return data",
"def datasets(self):\n \"\"\"List of datasets in this mart.\"\"\"\n if self._datasets is None:\n self._datasets = self._fetch_datasets()\n return self._datasets",
"def _dataset_info(dataset):\n \"\"\"Return information about dataset as a dict.\"\"\"\n info = {}\n\n info[\"uri\"] = dataset.uri\n info[\"uuid\"] = dataset.uuid\n\n # Computer and human readable size of dataset.\n tot_size = sum([dataset.item_properties(i)[\"size_in_bytes\"]\n for i in dataset.identifiers])\n info[\"size_int\"] = tot_size\n info[\"size_str\"] = sizeof_fmt(tot_size)\n\n info[\"creator\"] = dataset._admin_metadata[\"creator_username\"]\n info[\"name\"] = dataset._admin_metadata[\"name\"]\n\n info[\"date\"] = date_fmt(dataset._admin_metadata[\"frozen_at\"])\n\n info[\"num_items\"] = len(dataset.identifiers)\n\n info[\"readme_content\"] = dataset.get_readme_content()\n\n return info"
] |
[
0.734828531742096,
0.7280716300010681,
0.7208768129348755,
0.7172368168830872,
0.7131471633911133,
0.7089791893959045,
0.707393229007721,
0.7036842107772827,
0.7008451819419861,
0.6986203789710999,
0.6985385417938232,
0.6975969672203064
] |
Act block is a single node - either the act node itself, or the node
that wraps the act node.
|
def build_act(cls: Type[_Block], node: ast.stmt, test_func_node: ast.FunctionDef) -> _Block:
"""
Act block is a single node - either the act node itself, or the node
that wraps the act node.
"""
add_node_parents(test_func_node)
# Walk up the parent nodes of the parent node to find test's definition.
act_block_node = node
while act_block_node.parent != test_func_node: # type: ignore
act_block_node = act_block_node.parent # type: ignore
return cls([act_block_node], LineType.act)
|
[
"def load_act_node(self) -> ActNode:\n \"\"\"\n Raises:\n ValidationError: AAA01 when no act block is found and AAA02 when\n multiple act blocks are found.\n \"\"\"\n act_nodes = ActNode.build_body(self.node.body)\n\n if not act_nodes:\n raise ValidationError(self.first_line_no, self.node.col_offset, 'AAA01 no Act block found in test')\n\n # Allow `pytest.raises` and `self.assertRaises()` in assert nodes - if\n # any of the additional nodes are `pytest.raises`, then raise\n for a_n in act_nodes[1:]:\n if a_n.block_type in [ActNodeType.marked_act, ActNodeType.result_assignment]:\n raise ValidationError(\n self.first_line_no,\n self.node.col_offset,\n 'AAA02 multiple Act blocks found in test',\n )\n\n return act_nodes[0]",
"def build_arrange(cls: Type[_Block], nodes: List[ast.stmt], max_line_number: int) -> _Block:\n \"\"\"\n Arrange block is all non-pass and non-docstring nodes before the Act\n block start.\n \"\"\"\n return cls(filter_arrange_nodes(nodes, max_line_number), LineType.arrange)",
"def on_block(node)\n builder = DefinitionBuilder::RubyBlock.new(node, self)\n definition = builder.build\n\n associate_node(node, definition)\n\n push_scope(definition)\n end",
"def build_assert(cls: Type[_Block], nodes: List[ast.stmt], min_line_number: int) -> _Block:\n \"\"\"\n Assert block is all nodes that are after the Act node.\n\n Note:\n The filtering is *still* running off the line number of the Act\n node, when instead it should be using the last line of the Act\n block.\n \"\"\"\n return cls(filter_assert_nodes(nodes, min_line_number), LineType._assert)",
"def acts_as_node(params: nil, fields: nil)\n configuration = {params: params, fields: fields}\n\n ActsAsNode.register_class(self.name)\n\n # Store acts_as_node configuration\n cattr_accessor :acts_as_node_configuration\n self.acts_as_node_configuration = configuration\n end",
"function(node){\n if (!node) this.expect('atblock');\n node = new nodes.Atblock;\n this.state.push('atblock');\n node.block = this.block(node, false);\n this.state.pop();\n return node;\n }",
"def define_action(action, &block)\n action = action.to_sym\n defined_actions << action unless defined_actions.include?(action)\n if action.in?(Evvnt::ClassTemplateMethods.instance_methods)\n define_class_action(action, &block)\n end\n if action.in?(Evvnt::InstanceTemplateMethods.instance_methods)\n define_instance_action(action, &block)\n end\n action\n end",
"def build(cls: Type[AN], node: ast.stmt) -> List[AN]:\n \"\"\"\n Starting at this ``node``, check if it's an act node. If it's a context\n manager, recurse into child nodes.\n\n Returns:\n List of all act nodes found.\n \"\"\"\n if node_is_result_assignment(node):\n return [cls(node, ActNodeType.result_assignment)]\n if node_is_pytest_raises(node):\n return [cls(node, ActNodeType.pytest_raises)]\n if node_is_unittest_raises(node):\n return [cls(node, ActNodeType.unittest_raises)]\n\n token = node.first_token # type: ignore\n # Check if line marked with '# act'\n if token.line.strip().endswith('# act'):\n return [cls(node, ActNodeType.marked_act)]\n\n # Recurse (downwards) if it's a context manager\n if isinstance(node, ast.With):\n return cls.build_body(node.body)\n\n return []",
"def define_instance_action(action, &block)\n body = block_given? ? block : InstanceTemplateMethods.instance_method(action)\n define_method(action, body)\n end",
"def build_body(cls: Type[AN], body: List[ast.stmt]) -> List:\n \"\"\"\n Note:\n Return type is probably ``-> List[AN]``, but can't get it to pass.\n \"\"\"\n act_nodes = [] # type: List[ActNode]\n for child_node in body:\n act_nodes += ActNode.build(child_node)\n return act_nodes",
"def do_block(parser, token):\n \"\"\"\n Process several nodes inside a single block\n Block functions take ``context``, ``nodelist`` as first arguments\n If the second to last argument is ``as``, the rendered result is stored in the context and is named whatever the last argument is.\n\n Syntax::\n\n {% [block] [var args...] [name=value kwargs...] [as varname] %}\n ... nodelist ...\n {% end[block] %}\n\n Examples::\n\n {% render_block as rendered_output %}\n {{ request.path }}/blog/{{ blog.slug }}\n {% endrender_block %}\n\n {% highlight_block python %}\n import this\n {% endhighlight_block %}\n\n \"\"\"\n name, args, kwargs = get_signature(token, contextable=True)\n kwargs['nodelist'] = parser.parse(('end%s' % name,))\n parser.delete_first_token()\n return BlockNode(parser, name, *args, **kwargs)",
"def _get_activation(self, act):\n \"\"\"Get activation block based on the name. \"\"\"\n if isinstance(act, str):\n if act.lower() == 'gelu':\n return GELU()\n else:\n return gluon.nn.Activation(act)\n assert isinstance(act, gluon.Block)\n return act"
] |
[
0.7772277593612671,
0.7153825759887695,
0.7083226442337036,
0.7023711204528809,
0.7007628679275513,
0.6940969228744507,
0.6882621049880981,
0.6849969029426575,
0.6822970509529114,
0.6797627210617065,
0.6778395175933838,
0.6772918105125427
] |
Arrange block is all non-pass and non-docstring nodes before the Act
block start.
|
def build_arrange(cls: Type[_Block], nodes: List[ast.stmt], max_line_number: int) -> _Block:
"""
Arrange block is all non-pass and non-docstring nodes before the Act
block start.
"""
return cls(filter_arrange_nodes(nodes, max_line_number), LineType.arrange)
|
[
"def load_act_node(self) -> ActNode:\n \"\"\"\n Raises:\n ValidationError: AAA01 when no act block is found and AAA02 when\n multiple act blocks are found.\n \"\"\"\n act_nodes = ActNode.build_body(self.node.body)\n\n if not act_nodes:\n raise ValidationError(self.first_line_no, self.node.col_offset, 'AAA01 no Act block found in test')\n\n # Allow `pytest.raises` and `self.assertRaises()` in assert nodes - if\n # any of the additional nodes are `pytest.raises`, then raise\n for a_n in act_nodes[1:]:\n if a_n.block_type in [ActNodeType.marked_act, ActNodeType.result_assignment]:\n raise ValidationError(\n self.first_line_no,\n self.node.col_offset,\n 'AAA02 multiple Act blocks found in test',\n )\n\n return act_nodes[0]",
"def check_arrange_act_spacing(self) -> typing.Generator[AAAError, None, None]:\n \"\"\"\n * When no spaces found, point error at line above act block\n * When too many spaces found, point error at 2nd blank line\n \"\"\"\n yield from self.check_block_spacing(\n LineType.arrange,\n LineType.act,\n 'AAA03 expected 1 blank line before Act block, found {}',\n )",
"protected function arrangeBlocks()\n {\n $this->blocks = array();\n foreach ($this->alBlocks as $alBlock) {\n $this->blocks[$alBlock->getSlotName()][] = $alBlock;\n }\n }",
"def build_act(cls: Type[_Block], node: ast.stmt, test_func_node: ast.FunctionDef) -> _Block:\n \"\"\"\n Act block is a single node - either the act node itself, or the node\n that wraps the act node.\n \"\"\"\n add_node_parents(test_func_node)\n # Walk up the parent nodes of the parent node to find test's definition.\n act_block_node = node\n while act_block_node.parent != test_func_node: # type: ignore\n act_block_node = act_block_node.parent # type: ignore\n return cls([act_block_node], LineType.act)",
"def _nest_at_rules(self, rule, scope, block):\n \"\"\"\n Implements @-blocks\n \"\"\"\n # TODO handle @charset, probably?\n # Interpolate the current block\n # TODO this seems like it should be done in the block header. and more\n # generally?\n calculator = self._make_calculator(rule.namespace)\n if block.header.argument:\n # TODO is this correct? do ALL at-rules ALWAYS allow both vars and\n # interpolation?\n node = calculator.parse_vars_and_interpolations(\n block.header.argument)\n block.header.argument = node.evaluate(calculator).render()\n\n # TODO merge into RuleAncestry\n new_ancestry = list(rule.ancestry.headers)\n if block.directive == '@media' and new_ancestry:\n for i, header in reversed(list(enumerate(new_ancestry))):\n if header.is_selector:\n continue\n elif header.directive == '@media':\n new_ancestry[i] = BlockAtRuleHeader(\n '@media',\n \"%s and %s\" % (header.argument, block.argument))\n break\n else:\n new_ancestry.insert(i, block.header)\n else:\n new_ancestry.insert(0, block.header)\n else:\n new_ancestry.append(block.header)\n\n rule.descendants += 1\n new_rule = SassRule(\n source_file=rule.source_file,\n import_key=rule.import_key,\n lineno=block.lineno,\n num_header_lines=block.header.num_lines,\n unparsed_contents=block.unparsed_contents,\n\n legacy_compiler_options=rule.legacy_compiler_options,\n options=rule.options.copy(),\n #properties\n #extends_selectors\n ancestry=RuleAncestry(new_ancestry),\n\n namespace=rule.namespace.derive(),\n nested=rule.nested + 1,\n )\n self.rules.append(new_rule)\n rule.namespace.use_import(rule.source_file)\n self.manage_children(new_rule, scope)\n\n self._warn_unused_imports(new_rule)",
"def build_assert(cls: Type[_Block], nodes: List[ast.stmt], min_line_number: int) -> _Block:\n \"\"\"\n Assert block is all nodes that are after the Act node.\n\n Note:\n The filtering is *still* running off the line number of the Act\n node, when instead it should be using the last line of the Act\n block.\n \"\"\"\n return cls(filter_assert_nodes(nodes, min_line_number), LineType._assert)",
"def filter_arrange_nodes(nodes: List[ast.stmt], max_line_number: int) -> List[ast.stmt]:\n \"\"\"\n Finds all nodes that are before the ``max_line_number`` and are not\n docstrings or ``pass``.\n \"\"\"\n return [\n node for node in nodes if node.lineno < max_line_number and not isinstance(node, ast.Pass)\n and not (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str))\n ]",
"JCBlock block(int pos, long flags) {\n accept(LBRACE);\n List<JCStatement> stats = blockStatements();\n JCBlock t = F.at(pos).Block(flags, stats);\n while (token.kind == CASE || token.kind == DEFAULT) {\n syntaxError(\"orphaned\", token.kind);\n switchBlockStatementGroups();\n }\n // the Block node has a field \"endpos\" for first char of last token, which is\n // usually but not necessarily the last char of the last token.\n t.endpos = token.pos;\n accept(RBRACE);\n return toP(t);\n }",
"def normalize_block\n statements = value.to_vool\n index = statements.length - 1\n snd = statements.statements[index]\n raise \"Expecting Send #{snd.class}:#{snd}\" unless snd.is_a?( Vool::SendStatement)\n statements.statements[index] = assignment( snd )\n statements\n end",
"def nestSections(block, level=1):\n \"\"\"\n Sections aren't handled by CommonMark at the moment.\n This function adds sections to a block of nodes.\n 'title' nodes with an assigned level below 'level' will be put in a child section.\n If there are no child nodes with titles of level 'level' then nothing is done\n \"\"\"\n cur = block.first_child\n if cur is not None:\n children = []\n # Do we need to do anything?\n nest = False\n while cur is not None:\n if cur.t == 'heading' and cur.level == level:\n nest = True\n break\n cur = cur.nxt\n if not nest:\n return\n\n section = Node('MDsection', 0)\n section.parent = block\n cur = block.first_child\n while cur is not None:\n if cur.t == 'heading' and cur.level == level:\n # Found a split point, flush the last section if needed\n if section.first_child is not None:\n finalizeSection(section)\n children.append(section)\n section = Node('MDsection', 0)\n nxt = cur.nxt\n # Avoid adding sections without titles at the start\n if section.first_child is None:\n if cur.t == 'heading' and cur.level == level:\n section.append_child(cur)\n else:\n children.append(cur)\n else:\n section.append_child(cur)\n cur = nxt\n\n # If there's only 1 child then don't bother\n if section.first_child is not None:\n finalizeSection(section)\n children.append(section)\n\n block.first_child = None\n block.last_child = None\n nextLevel = level + 1\n for child in children:\n # Handle nesting\n if child.t == 'MDsection':\n nestSections(child, level=nextLevel)\n\n # Append\n if block.first_child is None:\n block.first_child = child\n else:\n block.last_child.nxt = child\n child.parent = block\n child.nxt = None\n child.prev = block.last_child\n block.last_child = child",
"private void normalizeBlocks(Node n) {\n if (NodeUtil.isControlStructure(n)\n && !n.isLabel()\n && !n.isSwitch()) {\n for (Node c = n.getFirstChild(); c != null; c = c.getNext()) {\n if (NodeUtil.isControlStructureCodeBlock(n, c) && !c.isBlock()) {\n Node newBlock = IR.block().srcref(n);\n n.replaceChild(c, newBlock);\n newBlock.setIsAddedBlock(true);\n if (!c.isEmpty()) {\n newBlock.addChildrenToFront(c);\n }\n c = newBlock;\n reportChange();\n }\n }\n }\n }",
"protected function _prepareBlocks()\n {\n $cacheKey = \"{$this->_View->theme}_{$this->_machineName}\";\n $blocks = TableRegistry::get('Block.Blocks')\n ->find('all')\n ->cache($cacheKey, 'blocks')\n ->contain(['Roles', 'BlockRegions'])\n ->matching('BlockRegions', function ($q) {\n return $q->where([\n 'BlockRegions.theme' => $this->_View->theme(),\n 'BlockRegions.region' => $this->_machineName,\n ]);\n })\n ->where(['Blocks.status' => 1])\n ->order(['BlockRegions.ordering' => 'ASC']);\n\n $blocks->sortBy(function ($block) {\n return $block->region->ordering;\n }, SORT_ASC);\n\n // remove blocks that cannot be rendered based on current request.\n $blocks = $blocks->filter(function ($block) {\n return $this->_filterBlock($block) && $block->renderable();\n });\n\n $this->blocks($blocks);\n }"
] |
[
0.7250023484230042,
0.7102052569389343,
0.7045885920524597,
0.6968176364898682,
0.6899915337562561,
0.6815516948699951,
0.675894558429718,
0.6735600829124451,
0.6681910157203674,
0.6652069091796875,
0.664015531539917,
0.6602808237075806
] |
Assert block is all nodes that are after the Act node.
Note:
The filtering is *still* running off the line number of the Act
node, when instead it should be using the last line of the Act
block.
|
def build_assert(cls: Type[_Block], nodes: List[ast.stmt], min_line_number: int) -> _Block:
"""
Assert block is all nodes that are after the Act node.
Note:
The filtering is *still* running off the line number of the Act
node, when instead it should be using the last line of the Act
block.
"""
return cls(filter_assert_nodes(nodes, min_line_number), LineType._assert)
|
[
"def build_arrange(cls: Type[_Block], nodes: List[ast.stmt], max_line_number: int) -> _Block:\n \"\"\"\n Arrange block is all non-pass and non-docstring nodes before the Act\n block start.\n \"\"\"\n return cls(filter_arrange_nodes(nodes, max_line_number), LineType.arrange)",
"def filter_assert_nodes(nodes: List[ast.stmt], min_line_number: int) -> List[ast.stmt]:\n \"\"\"\n Finds all nodes that are after the ``min_line_number``\n \"\"\"\n return [node for node in nodes if node.lineno > min_line_number]",
"def load_act_node(self) -> ActNode:\n \"\"\"\n Raises:\n ValidationError: AAA01 when no act block is found and AAA02 when\n multiple act blocks are found.\n \"\"\"\n act_nodes = ActNode.build_body(self.node.body)\n\n if not act_nodes:\n raise ValidationError(self.first_line_no, self.node.col_offset, 'AAA01 no Act block found in test')\n\n # Allow `pytest.raises` and `self.assertRaises()` in assert nodes - if\n # any of the additional nodes are `pytest.raises`, then raise\n for a_n in act_nodes[1:]:\n if a_n.block_type in [ActNodeType.marked_act, ActNodeType.result_assignment]:\n raise ValidationError(\n self.first_line_no,\n self.node.col_offset,\n 'AAA02 multiple Act blocks found in test',\n )\n\n return act_nodes[0]",
"def check_act_assert_spacing(self) -> typing.Generator[AAAError, None, None]:\n \"\"\"\n * When no spaces found, point error at line above assert block\n * When too many spaces found, point error at 2nd blank line\n \"\"\"\n yield from self.check_block_spacing(\n LineType.act,\n LineType._assert,\n 'AAA04 expected 1 blank line before Assert block, found {}',\n )",
"def _sanity_check_query_root_block(ir_blocks):\n \"\"\"Assert that QueryRoot is always the first block, and only the first block.\"\"\"\n if not isinstance(ir_blocks[0], QueryRoot):\n raise AssertionError(u'The first block was not QueryRoot: {}'.format(ir_blocks))\n for block in ir_blocks[1:]:\n if isinstance(block, QueryRoot):\n raise AssertionError(u'Found QueryRoot after the first block: {}'.format(ir_blocks))",
"function requiresBraceOfConsequent(node) {\n if (node.alternate && node.consequent.type === \"BlockStatement\") {\n if (node.consequent.body.length >= 2) {\n return true;\n }\n\n for (\n let currentNode = node.consequent.body[0];\n currentNode;\n currentNode = astUtils.getTrailingStatement(currentNode)\n ) {\n if (currentNode.type === \"IfStatement\" && !currentNode.alternate) {\n return true;\n }\n }\n }\n\n return false;\n }",
"def build_act(cls: Type[_Block], node: ast.stmt, test_func_node: ast.FunctionDef) -> _Block:\n \"\"\"\n Act block is a single node - either the act node itself, or the node\n that wraps the act node.\n \"\"\"\n add_node_parents(test_func_node)\n # Walk up the parent nodes of the parent node to find test's definition.\n act_block_node = node\n while act_block_node.parent != test_func_node: # type: ignore\n act_block_node = act_block_node.parent # type: ignore\n return cls([act_block_node], LineType.act)",
"def check_arrange_act_spacing(self) -> typing.Generator[AAAError, None, None]:\n \"\"\"\n * When no spaces found, point error at line above act block\n * When too many spaces found, point error at 2nd blank line\n \"\"\"\n yield from self.check_block_spacing(\n LineType.arrange,\n LineType.act,\n 'AAA03 expected 1 blank line before Act block, found {}',\n )",
"function maybeAsiHazardAfter(node) {\n const t = node.type;\n\n if (t === \"DoWhileStatement\" ||\n t === \"BreakStatement\" ||\n t === \"ContinueStatement\" ||\n t === \"DebuggerStatement\" ||\n t === \"ImportDeclaration\" ||\n t === \"ExportAllDeclaration\"\n ) {\n return false;\n }\n if (t === \"ReturnStatement\") {\n return Boolean(node.argument);\n }\n if (t === \"ExportNamedDeclaration\") {\n return Boolean(node.declaration);\n }\n if (isEndOfArrowBlock(sourceCode.getLastToken(node, 1))) {\n return false;\n }\n\n return true;\n }",
"def _sanity_check_block_pairwise_constraints(ir_blocks):\n \"\"\"Assert that adjacent blocks obey all invariants.\"\"\"\n for first_block, second_block in pairwise(ir_blocks):\n # Always Filter before MarkLocation, never after.\n if isinstance(first_block, MarkLocation) and isinstance(second_block, Filter):\n raise AssertionError(u'Found Filter after MarkLocation block: {}'.format(ir_blocks))\n\n # There's no point in marking the same location twice in a row.\n if isinstance(first_block, MarkLocation) and isinstance(second_block, MarkLocation):\n raise AssertionError(u'Found consecutive MarkLocation blocks: {}'.format(ir_blocks))\n\n # Traverse blocks with optional=True are immediately followed\n # by a MarkLocation, CoerceType or Filter block.\n if isinstance(first_block, Traverse) and first_block.optional:\n if not isinstance(second_block, (MarkLocation, CoerceType, Filter)):\n raise AssertionError(u'Expected MarkLocation, CoerceType or Filter after Traverse '\n u'with optional=True. Found: {}'.format(ir_blocks))\n\n # Backtrack blocks with optional=True are immediately followed by a MarkLocation block.\n if isinstance(first_block, Backtrack) and first_block.optional:\n if not isinstance(second_block, MarkLocation):\n raise AssertionError(u'Expected MarkLocation after Backtrack with optional=True, '\n u'but none was found: {}'.format(ir_blocks))\n\n # Recurse blocks are immediately preceded by a MarkLocation or Backtrack block.\n if isinstance(second_block, Recurse):\n if not (isinstance(first_block, MarkLocation) or isinstance(first_block, Backtrack)):\n raise AssertionError(u'Expected MarkLocation or Backtrack before Recurse, but none '\n u'was found: {}'.format(ir_blocks))",
"def _sanity_check_output_source_follower_blocks(ir_blocks):\n \"\"\"Ensure there are no Traverse / Backtrack / Recurse blocks after an OutputSource block.\"\"\"\n seen_output_source = False\n for block in ir_blocks:\n if isinstance(block, OutputSource):\n seen_output_source = True\n elif seen_output_source:\n if isinstance(block, (Backtrack, Traverse, Recurse)):\n raise AssertionError(u'Found Backtrack / Traverse / Recurse '\n u'after OutputSource block: '\n u'{}'.format(ir_blocks))",
"function isOneLinerBlock(node) {\n const parent = node.parent;\n const nextToken = sourceCode.getTokenAfter(node);\n\n if (!nextToken || nextToken.value !== \"}\") {\n return false;\n }\n return (\n !!parent &&\n parent.type === \"BlockStatement\" &&\n parent.loc.start.line === parent.loc.end.line\n );\n }"
] |
[
0.7359182834625244,
0.729572057723999,
0.7285538911819458,
0.7074596285820007,
0.7068079113960266,
0.7063937187194824,
0.7021381258964539,
0.6958063244819641,
0.6923790574073792,
0.69187992811203,
0.6896698474884033,
0.6874670386314392
] |
Raises:
EmptyBlock: when block has no nodes
|
def get_span(self, first_line_no: int) -> Tuple[int, int]:
"""
Raises:
EmptyBlock: when block has no nodes
"""
if not self.nodes:
raise EmptyBlock('span requested from {} block with no nodes'.format(self.line_type))
return (
get_first_token(self.nodes[0]).start[0] - first_line_no,
get_last_token(self.nodes[-1]).start[0] - first_line_no,
)
|
[
"def p_block_empty(self, p):\n 'block : BEGIN END'\n p[0] = Block((), lineno=p.lineno(1))\n p.set_lineno(0, p.lineno(1))",
"public void removeEmptyBlocks() {\n Block curr = blocklistHead;\n Block prev = null;\n int effId = 0;\n while (curr != null) {\n if (!curr.isEmpty()) {\n curr.id = effId++;\n if (prev != null) {\n prev.nextBlock = curr;\n } else {\n blocklistHead = curr;\n }\n prev = curr;\n }\n curr = curr.nextBlock;\n }\n if (prev != null) {\n prev.nextBlock = null;\n } else {\n blocklistHead = null;\n }\n numBlocks = effId;\n }",
"def visit_emptynode(self, node, parent):\n \"\"\"visit an EmptyNode node by returning a fresh instance of it\"\"\"\n return nodes.EmptyNode(\n getattr(node, \"lineno\", None), getattr(node, \"col_offset\", None), parent\n )",
"def make_empty(self, axes=None):\n \"\"\" return an empty BlockManager with the items axis of len 0 \"\"\"\n if axes is None:\n axes = [ensure_index([])] + [ensure_index(a)\n for a in self.axes[1:]]\n\n # preserve dtype if possible\n if self.ndim == 1:\n blocks = np.array([], dtype=self.array_dtype)\n else:\n blocks = []\n return self.__class__(blocks, axes)",
"static boolean isEmptyBlock(Node block) {\n if (!block.isBlock()) {\n return false;\n }\n\n for (Node n = block.getFirstChild(); n != null; n = n.getNext()) {\n if (!n.isEmpty()) {\n return false;\n }\n }\n return true;\n }",
"function createEmptyContent(opts: Options): Block {\n return Block.create({\n type: opts.typeContent,\n nodes: [Text.create()]\n });\n}",
"def empty( self, node ):\n \"\"\"Calculate empty space as a fraction of total space\"\"\"\n overall = self.overall( node )\n if overall:\n return (overall - self.children_sum( self.children(node), node))/float(overall)\n return 0",
"def p_namedblock_empty(self, p):\n 'namedblock : BEGIN COLON ID END'\n p[0] = Block((), p[3], lineno=p.lineno(1))\n p.set_lineno(0, p.lineno(1))",
"private void addEmptyElements() {\n for (Node node : nodes) {\r\n boolean empty = true;\r\n if(node instanceof NestableNode) {\r\n\t List<Node> nl = ((NestableNode) node).getChildren();\r\n\t for (Node n : nl) {\r\n\t if (n instanceof Element) {\r\n\t empty = false;\r\n\t break;\r\n\t } else if (n instanceof Text) {\r\n\t // TODO: Should we trim the text and see if it's length 0?\r\n\t String value = ((Text) n).getContent();\r\n\t if (value.length() > 0) {\r\n\t empty = false;\r\n\t break;\r\n\t }\r\n\t }\r\n\t }\r\n } \r\n if (empty) {\r\n result.add(node);\r\n }\r\n }\r\n }",
"def on_chunk(&block)\n raise \"Body already read\" if @on_chunk\n raise ArgumentError, \"block must be given\" unless block_given?\n @on_chunk = block\n unless @body.empty?\n @body.each(&block)\n @body.clear\n end\n self\n end",
"private static Node getFirstNonEmptyChild(Node n) {\n for (Node c = n.getFirstChild(); c != null; c = c.getNext()) {\n if (c.isBlock()) {\n Node result = getFirstNonEmptyChild(c);\n if (result != null) {\n return result;\n }\n } else if (!c.isEmpty()) {\n return c;\n }\n }\n return null;\n }",
"function NoEmpty(type) {\n type = typeof type == 'string' ?\n Block.create({\n type,\n nodes: [\n Text.create()\n ]\n }) : type;\n\n const onBeforeChange = (state) => {\n const { document } = state;\n\n // If document is not empty, it continues\n if (!document.nodes.isEmpty()) {\n return;\n }\n\n // Reset the state\n return State.create({\n document: Document.create({\n nodes: [type]\n })\n });\n };\n\n return {\n onBeforeChange\n };\n}"
] |
[
0.7330018877983093,
0.7327352166175842,
0.7067156434059143,
0.7066289186477661,
0.7002111673355103,
0.6924017667770386,
0.6880962252616882,
0.6878398060798645,
0.6829017400741577,
0.6765238642692566,
0.674486517906189,
0.6738160848617554
] |
r"""Developer script aliases.
|
def cli_aliases(self):
r"""Developer script aliases.
"""
scripting_groups = []
aliases = {}
for cli_class in self.cli_classes:
instance = cli_class()
if getattr(instance, "alias", None):
scripting_group = getattr(instance, "scripting_group", None)
if scripting_group:
scripting_groups.append(scripting_group)
entry = (scripting_group, instance.alias)
if (scripting_group,) in aliases:
message = "alias conflict between scripting group"
message += " {!r} and {}"
message = message.format(
scripting_group, aliases[(scripting_group,)].__name__
)
raise Exception(message)
if entry in aliases:
message = "alias conflict between {} and {}"
message = message.format(
aliases[entry].__name__, cli_class.__name__
)
raise Exception(message)
aliases[entry] = cli_class
else:
entry = (instance.alias,)
if entry in scripting_groups:
message = "alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.alias)
raise Exception(message)
if entry in aliases:
message = "alias conflict be {} and {}"
message = message.format(cli_class.__name__, aliases[entry])
raise Exception(message)
aliases[(instance.alias,)] = cli_class
else:
if instance.program_name in scripting_groups:
message = "Alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.program_name)
raise Exception(message)
aliases[(instance.program_name,)] = cli_class
alias_map = {}
for key, value in aliases.items():
if len(key) == 1:
alias_map[key[0]] = value
else:
if key[0] not in alias_map:
alias_map[key[0]] = {}
alias_map[key[0]][key[1]] = value
return alias_map
|
[
"def alias(*aliases):\n \"\"\"Decorator to add aliases for Cmdln.do_* command handlers.\n\n Example:\n class MyShell(cmdln.Cmdln):\n @cmdln.alias(\"!\", \"sh\")\n def do_shell(self, argv):\n #...implement 'shell' command\n \"\"\"\n\n def decorate(f):\n if not hasattr(f, \"aliases\"):\n f.aliases = []\n f.aliases += aliases\n return f\n\n return decorate",
"def do_alias(self, arg):\n \"\"\"alias [name [command [parameter parameter ...] ]]\n Create an alias called 'name' that executes 'command'. The\n command must *not* be enclosed in quotes. Replaceable\n parameters can be indicated by %1, %2, and so on, while %* is\n replaced by all the parameters. If no command is given, the\n current alias for name is shown. If no name is given, all\n aliases are listed.\n\n Aliases may be nested and can contain anything that can be\n legally typed at the pdb prompt. Note! You *can* override\n internal pdb commands with aliases! Those internal commands\n are then hidden until the alias is removed. Aliasing is\n recursively applied to the first word of the command line; all\n other words in the line are left alone.\n\n As an example, here are two useful aliases (especially when\n placed in the .pdbrc file):\n\n # Print instance variables (usage \"pi classInst\")\n alias pi for k in %1.__dict__.keys(): print(\"%1.\",k,\"=\",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n \"\"\"\n args = arg.split()\n if len(args) == 0:\n keys = sorted(self.aliases.keys())\n for alias in keys:\n self.message(\"%s = %s\" % (alias, self.aliases[alias]))\n return\n if args[0] in self.aliases and len(args) == 1:\n self.message(\"%s = %s\" % (args[0], self.aliases[args[0]]))\n else:\n self.aliases[args[0]] = ' '.join(args[1:])",
"def compose(scripts, name='main', description=None, prog=None,\n version=None):\n \"\"\"\n Collects together different scripts and builds a single\n script dispatching to the subparsers depending on\n the first argument, i.e. the name of the subparser to invoke.\n\n :param scripts: a list of script instances\n :param name: the name of the composed parser\n :param description: description of the composed parser\n :param prog: name of the script printed in the usage message\n :param version: version of the script printed with --version\n \"\"\"\n assert len(scripts) >= 1, scripts\n parentparser = argparse.ArgumentParser(\n description=description, add_help=False)\n parentparser.add_argument(\n '--version', '-v', action='version', version=version)\n subparsers = parentparser.add_subparsers(\n help='available subcommands; use %s help <subcmd>' % prog,\n prog=prog)\n\n def gethelp(cmd=None):\n if cmd is None:\n print(parentparser.format_help())\n return\n subp = subparsers._name_parser_map.get(cmd)\n if subp is None:\n print('No help for unknown command %r' % cmd)\n else:\n print(subp.format_help())\n help_script = Script(gethelp, 'help', help=False)\n progname = '%s ' % prog if prog else ''\n help_script.arg('cmd', progname + 'subcommand')\n for s in list(scripts) + [help_script]:\n subp = subparsers.add_parser(s.name, description=s.description)\n for args, kw in s.all_arguments:\n subp.add_argument(*args, **kw)\n subp.set_defaults(_func=s.func)\n\n def main(**kw):\n try:\n func = kw.pop('_func')\n except KeyError:\n parentparser.print_usage()\n else:\n return func(**kw)\n main.__name__ = name\n return Script(main, name, parentparser)",
"def alias_name():\n \"\"\"\n Returns list of alias name by query paramaters\n ---\n\n tags:\n\n - Query functions\n\n parameters:\n\n - name: alias_name\n in: query\n type: string\n required: false\n description: 'Other names used to refer to a gene'\n default: 'peptidase nexin-II'\n\n - name: is_previous_name\n in: query\n type: boolean\n required: false\n description: 'Other names used to refer to a gene'\n default: false\n\n - name: hgnc_symbol\n in: query\n type: string\n required: false\n description: 'HGNC symbol'\n default: APP\n\n - name: hgnc_identifier\n in: query\n type: integer\n required: false\n description: 'HGNC identifier'\n default: 620\n\n - name: limit\n in: query\n type: integer\n required: false\n default: 1\n \"\"\"\n allowed_str_args = ['alias_name', 'hgnc_symbol', 'hgnc_identifier']\n\n allowed_int_args = ['limit', ]\n\n allowed_bool_args = ['is_previous_name', ]\n\n args = get_args(\n request_args=request.args,\n allowed_int_args=allowed_int_args,\n allowed_str_args=allowed_str_args,\n allowed_bool_args=allowed_bool_args,\n )\n\n return jsonify(query.alias_name(**args))",
"def get_argv_tail(scriptname, prefer_main=None, argv=None):\n r\"\"\"\n gets the rest of the arguments after a script has been invoked hack.\n accounts for python -m scripts.\n\n Args:\n scriptname (str):\n\n CommandLine:\n python -m utool.util_arg --test-get_argv_tail\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_arg import * # NOQA\n >>> import utool as ut\n >>> from os.path import relpath, dirname\n >>> scriptname = 'utool.util_arg'\n >>> prefer_main = False\n >>> argv=['python', '-m', 'utool.util_arg', '--test-get_argv_tail']\n >>> tail = get_argv_tail(scriptname, prefer_main, argv)\n >>> # hack\n >>> tail[0] = ut.ensure_unixslash(relpath(tail[0], dirname(dirname(ut.__file__))))\n >>> result = ut.repr2(tail)\n >>> print(result)\n ['utool/util_arg.py', '--test-get_argv_tail']\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_arg import * # NOQA\n >>> import utool as ut\n >>> from os.path import relpath, dirname\n >>> scriptname = 'utprof.py'\n >>> prefer_main = True\n >>> argv=['utprof.py', '-m', 'utool', '--tf', 'get_argv_tail']\n >>> tail = get_argv_tail(scriptname, prefer_main, argv)\n >>> # hack\n >>> tail[0] = ut.ensure_unixslash(relpath(tail[0], dirname(dirname(ut.__file__))))\n >>> result = ut.repr2(tail)\n >>> print(result)\n ['utool/__main__.py', '--tf', 'get_argv_tail']\n \"\"\"\n if argv is None:\n argv = sys.argv\n import utool as ut\n modname = ut.get_argval('-m', help_='specify module name to profile', argv=argv)\n if modname is not None:\n # hack to account for -m scripts\n modpath = ut.get_modpath(modname, prefer_main=prefer_main)\n argvx = argv.index(modname) + 1\n argv_tail = [modpath] + argv[argvx:]\n else:\n try:\n argvx = argv.index(scriptname)\n except ValueError:\n for argvx, arg in enumerate(argv):\n # HACK\n if scriptname in arg:\n break\n argv_tail = argv[(argvx + 1):]\n return argv_tail",
"def names(args):\n \"\"\"\n %prog names namelist templatefile\n\n Generate name blocks from the `namelist` file. The `namelist` file is\n tab-delimited that contains >=4 columns of data. Three columns are mandatory.\n First name, middle initial and last name. First row is table header. For the\n extra columns, the first column will go in the `$N0` field in the template\n file, second to the `$N1` field, etc.\n\n In the alternative mode, the namelist just contains several sections. First\n row will go in the `$N0` in the template file, second to the `$N1` field.\n\n The namelist may look like:\n [Sequence]\n Bruce A. Roe, Frederic Debelle, Giles Oldroyd, Rene Geurts\n [Manuscript]\n Haibao Tang1, Vivek Krishnakumar1, Shelby Bidwell1, Benjamin Rosen1\n\n Then in this example Sequence section goes into N0, Manuscript goes into N1.\n\n Useful hints for constructing the template file can be found in:\n <http://www.ncbi.nlm.nih.gov/IEB/ToolBox/CPP_DOC/asn_spec/seq.asn.html>\n\n Often the template file can be retrieved from web form:\n <http://www.ncbi.nlm.nih.gov/WebSub/template.cgi>\n \"\"\"\n p = OptionParser(names.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(p.print_help())\n\n namelist, templatefile = args\n\n # First check the alternative format\n if open(namelist).read()[0] == '[':\n out = parse_names(namelist)\n make_template(templatefile, out)\n return\n\n reader = csv.reader(open(namelist), delimiter=\"\\t\")\n header = next(reader)\n ncols = len(header)\n assert ncols > 3\n nextras = ncols - 3\n\n blocks = []\n bools = []\n for row in reader:\n first, middle, last = row[:3]\n extras = row[3:]\n bools.append([(x.upper() == 'Y') for x in extras])\n middle = middle.strip()\n if middle != \"\":\n middle = middle.rstrip('.') + '.'\n initials = \"{0}.{1}\".format(first[0], middle)\n suffix = \"\"\n nameblock = NameTemplate.format(last=last, first=first,\n initials=initials, suffix=suffix)\n blocks.append(nameblock)\n\n selected_idx = zip(*bools)\n out = [] * nextras\n for i, sbools in enumerate(selected_idx):\n selected = []\n for b, ss in zip(blocks, sbools):\n if ss:\n selected.append(b)\n bigblock = \",\\n\".join(selected)\n out.append(bigblock)\n logging.debug(\"List N{0} contains a total of {1} names.\".format(i,\n len(selected)))\n\n make_template(templatefile, out)",
"def keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyCFFIBuild,\n \"install\": DummyCFFIInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n return {\n \"setup_requires\": [CFFI_DEPENDENCY],\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": CFFI_MODULES,\n }",
"def evil(expr, lookup, operators, cast, reducer, tokenizer):\n \"\"\"evil evaluates an expression according to the eval description given.\n\n :param expr: An expression to evaluate.\n :param lookup: A callable which takes a single pattern argument and returns\n a set of results. The pattern can be anything that is not an\n operator token or round brackets.\n :param operators: A precedence-ordered dictionary of (function, side)\n tuples keyed on the operator token.\n :param reducer: A callable which takes a sequential list of values (from\n operations or lookups) and combines them into a result.\n Typical behaviour is that of the + operator. The return\n type should be the same as cast.\n :param cast: A callable which transforms the results of the lookup into\n the type expected by the operators and the type of the result.\n :param tokenizer: A callable which will break the query into tokens for\n evaluation per the lookup and operators. Defaults to\n setquery.query_tokenizer.\n :raises: SyntaxError\n :returns:\n\n \"\"\"\n operators = OrderedDict((op[0], op[1:]) for op in operators)\n if \"(\" in operators or \")\" in operators:\n raise ValueError(\"( and ) are reserved operators\")\n\n operator_tokens = [\"(\", \")\"] + operators.keys()\n tokens = iter(tokenizer(expr, operator_tokens))\n levels = [[]]\n\n while True:\n # Token evaluation and pattern lookups\n\n expr = levels.pop() # The currently-constructed expression\n new_level = False # We should step into a subexpression\n first_token = len(expr) == 0 # The first (sub)exp. token\n\n prev_op_side = None # The side of the last-seen operator\n try:\n # Try to get the side of the last operator from an expression\n # which we are going to continue constructing.\n prev_op_side = operators[expr[-1]][1]\n except:\n pass\n\n for token in tokens:\n\n if token == \"(\":\n new_level = True\n break\n elif token == \")\":\n break\n elif token in operators:\n op_side = operators[token][1]\n if first_token and op_side & OP_LEFT:\n raise SyntaxError(\"Operators which act on expressions to \"\n \"their left or both sides cannot be at \"\n \"the beginning of an expression.\")\n if prev_op_side is not None:\n if prev_op_side & OP_RIGHT and op_side & OP_LEFT:\n raise SyntaxError(\"Operators cannot be beside one \"\n \"another if they act on expressions \"\n \"facing one-another.\")\n expr.append(token)\n prev_op_side = op_side\n continue\n else:\n expr.append(cast(lookup(token)))\n prev_op_side = None\n\n first_token = False\n\n if new_level:\n levels.append(expr)\n levels.append([])\n continue\n elif prev_op_side is not None and prev_op_side & OP_RIGHT:\n raise SyntaxError(\"Operators which act on expressions to their \"\n \"right or both sides cannot be at the end of \"\n \"an expression.\")\n\n # Operator evaluation\n\n explen = len(expr)\n for op, (op_eval, op_side) in operators.iteritems():\n if op_side is OP_RIGHT:\n\n # Apply right-sided operators. We loop from the end backward so\n # that multiple such operators next to noe another are resolved\n # in the correct order\n t = explen - 1\n while t >= 0:\n if expr[t] == op:\n expr[t] = op_eval(expr[t + 1])\n del expr[t + 1]\n explen -= 1\n t -= 1\n\n else:\n\n # Apply left- and both-sided operators. We loop forward so that\n # that multiple such operators next to one another are resolved\n # in the correct order.\n t = 0\n while t < explen:\n if expr[t] == op:\n # Apply left- or both-sided operators\n if op_side is OP_LEFT:\n expr[t] = op_eval(expr[t - 1])\n del expr[t - 1]\n t -= 1\n explen -= 1\n elif op_side is OP_BOTH:\n expr[t] = op_eval(expr[t - 1], expr[t + 1])\n del expr[t + 1], expr[t - 1]\n t -= 1\n explen -= 2\n t += 1\n\n if len(levels) > 0:\n levels[-1].append(reducer(expr))\n else:\n break\n\n return reducer(expr)",
"def attrs(\n maybe_cls=None,\n these=None,\n repr_ns=None,\n repr=True,\n cmp=True,\n hash=None,\n init=True,\n slots=False,\n frozen=False,\n weakref_slot=True,\n str=False,\n auto_attribs=False,\n kw_only=False,\n cache_hash=False,\n auto_exc=False,\n):\n r\"\"\"\n A class decorator that adds `dunder\n <https://wiki.python.org/moin/DunderAlias>`_\\ -methods according to the\n specified attributes using :func:`attr.ib` or the *these* argument.\n\n :param these: A dictionary of name to :func:`attr.ib` mappings. This is\n useful to avoid the definition of your attributes within the class body\n because you can't (e.g. if you want to add ``__repr__`` methods to\n Django models) or don't want to.\n\n If *these* is not ``None``, ``attrs`` will *not* search the class body\n for attributes and will *not* remove any attributes from it.\n\n If *these* is an ordered dict (:class:`dict` on Python 3.6+,\n :class:`collections.OrderedDict` otherwise), the order is deduced from\n the order of the attributes inside *these*. Otherwise the order\n of the definition of the attributes is used.\n\n :type these: :class:`dict` of :class:`str` to :func:`attr.ib`\n\n :param str repr_ns: When using nested classes, there's no way in Python 2\n to automatically detect that. Therefore it's possible to set the\n namespace explicitly for a more meaningful ``repr`` output.\n :param bool repr: Create a ``__repr__`` method with a human readable\n representation of ``attrs`` attributes..\n :param bool str: Create a ``__str__`` method that is identical to\n ``__repr__``. This is usually not necessary except for\n :class:`Exception`\\ s.\n :param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,\n ``__gt__``, and ``__ge__`` methods that compare the class as if it were\n a tuple of its ``attrs`` attributes. But the attributes are *only*\n compared, if the types of both classes are *identical*!\n :param hash: If ``None`` (default), the ``__hash__`` method is generated\n according how *cmp* and *frozen* are set.\n\n 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.\n 2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to\n None, marking it unhashable (which it is).\n 3. If *cmp* is False, ``__hash__`` will be left untouched meaning the\n ``__hash__`` method of the base class will be used (if base class is\n ``object``, this means it will fall back to id-based hashing.).\n\n Although not recommended, you can decide for yourself and force\n ``attrs`` to create one (e.g. if the class is immutable even though you\n didn't freeze it programmatically) by passing ``True`` or not. Both of\n these cases are rather special and should be used carefully.\n\n See the `Python documentation \\\n <https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_\n and the `GitHub issue that led to the default behavior \\\n <https://github.com/python-attrs/attrs/issues/136>`_ for more details.\n :type hash: ``bool`` or ``None``\n :param bool init: Create a ``__init__`` method that initializes the\n ``attrs`` attributes. Leading underscores are stripped for the\n argument name. If a ``__attrs_post_init__`` method exists on the\n class, it will be called after the class is fully initialized.\n :param bool slots: Create a slots_-style class that's more\n memory-efficient. See :ref:`slots` for further ramifications.\n :param bool frozen: Make instances immutable after initialization. If\n someone attempts to modify a frozen instance,\n :exc:`attr.exceptions.FrozenInstanceError` is raised.\n\n Please note:\n\n 1. This is achieved by installing a custom ``__setattr__`` method\n on your class so you can't implement an own one.\n\n 2. True immutability is impossible in Python.\n\n 3. This *does* have a minor a runtime performance :ref:`impact\n <how-frozen>` when initializing new instances. In other words:\n ``__init__`` is slightly slower with ``frozen=True``.\n\n 4. If a class is frozen, you cannot modify ``self`` in\n ``__attrs_post_init__`` or a self-written ``__init__``. You can\n circumvent that limitation by using\n ``object.__setattr__(self, \"attribute_name\", value)``.\n\n .. _slots: https://docs.python.org/3/reference/datamodel.html#slots\n :param bool weakref_slot: Make instances weak-referenceable. This has no\n effect unless ``slots`` is also enabled.\n :param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes\n (Python 3.6 and later only) from the class body.\n\n In this case, you **must** annotate every field. If ``attrs``\n encounters a field that is set to an :func:`attr.ib` but lacks a type\n annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is\n raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't\n want to set a type.\n\n If you assign a value to those attributes (e.g. ``x: int = 42``), that\n value becomes the default value like if it were passed using\n ``attr.ib(default=42)``. Passing an instance of :class:`Factory` also\n works as expected.\n\n Attributes annotated as :data:`typing.ClassVar` are **ignored**.\n\n .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/\n :param bool kw_only: Make all attributes keyword-only (Python 3+)\n in the generated ``__init__`` (if ``init`` is ``False``, this\n parameter is ignored).\n :param bool cache_hash: Ensure that the object's hash code is computed\n only once and stored on the object. If this is set to ``True``,\n hashing must be either explicitly or implicitly enabled for this\n class. If the hash code is cached, avoid any reassignments of\n fields involved in hash code computation or mutations of the objects\n those fields point to after object creation. If such changes occur,\n the behavior of the object's hash code is undefined.\n :param bool auto_exc: If the class subclasses :class:`BaseException`\n (which implicitly includes any subclass of any exception), the\n following happens to behave like a well-behaved Python exceptions\n class:\n\n - the values for *cmp* and *hash* are ignored and the instances compare\n and hash by the instance's ids (N.B. ``attrs`` will *not* remove\n existing implementations of ``__hash__`` or the equality methods. It\n just won't add own ones.),\n - all attributes that are either passed into ``__init__`` or have a\n default value are additionally available as a tuple in the ``args``\n attribute,\n - the value of *str* is ignored leaving ``__str__`` to base classes.\n\n .. versionadded:: 16.0.0 *slots*\n .. versionadded:: 16.1.0 *frozen*\n .. versionadded:: 16.3.0 *str*\n .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.\n .. versionchanged:: 17.1.0\n *hash* supports ``None`` as value which is also the default now.\n .. versionadded:: 17.3.0 *auto_attribs*\n .. versionchanged:: 18.1.0\n If *these* is passed, no attributes are deleted from the class body.\n .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.\n .. versionadded:: 18.2.0 *weakref_slot*\n .. deprecated:: 18.2.0\n ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a\n :class:`DeprecationWarning` if the classes compared are subclasses of\n each other. ``__eq`` and ``__ne__`` never tried to compared subclasses\n to each other.\n .. versionadded:: 18.2.0 *kw_only*\n .. versionadded:: 18.2.0 *cache_hash*\n .. versionadded:: 19.1.0 *auto_exc*\n \"\"\"\n\n def wrap(cls):\n\n if getattr(cls, \"__class__\", None) is None:\n raise TypeError(\"attrs only works with new-style classes.\")\n\n is_exc = auto_exc is True and issubclass(cls, BaseException)\n\n builder = _ClassBuilder(\n cls,\n these,\n slots,\n frozen,\n weakref_slot,\n auto_attribs,\n kw_only,\n cache_hash,\n is_exc,\n )\n\n if repr is True:\n builder.add_repr(repr_ns)\n if str is True:\n builder.add_str()\n if cmp is True and not is_exc:\n builder.add_cmp()\n\n if hash is not True and hash is not False and hash is not None:\n # Can't use `hash in` because 1 == True for example.\n raise TypeError(\n \"Invalid value for hash. Must be True, False, or None.\"\n )\n elif hash is False or (hash is None and cmp is False):\n if cache_hash:\n raise TypeError(\n \"Invalid value for cache_hash. To use hash caching,\"\n \" hashing must be either explicitly or implicitly \"\n \"enabled.\"\n )\n elif (\n hash is True\n or (hash is None and cmp is True and frozen is True)\n and is_exc is False\n ):\n builder.add_hash()\n else:\n if cache_hash:\n raise TypeError(\n \"Invalid value for cache_hash. To use hash caching,\"\n \" hashing must be either explicitly or implicitly \"\n \"enabled.\"\n )\n builder.make_unhashable()\n\n if init is True:\n builder.add_init()\n else:\n if cache_hash:\n raise TypeError(\n \"Invalid value for cache_hash. To use hash caching,\"\n \" init must be True.\"\n )\n\n return builder.build_class()\n\n # maybe_cls's type depends on the usage of the decorator. It's a class\n # if it's used as `@attrs` but ``None`` if used as `@attrs()`.\n if maybe_cls is None:\n return wrap\n else:\n return wrap(maybe_cls)",
"def mkRepr(instance, *argls, **kwargs):\n r\"\"\"Convinience function to implement ``__repr__``. `kwargs` values are\n ``repr`` ed. Special behavior for ``instance=None``: just the\n arguments are formatted.\n\n Example:\n\n >>> class Thing:\n ... def __init__(self, color, shape, taste=None):\n ... self.color, self.shape, self.taste = color, shape, taste\n ... def __repr__(self):\n ... return mkRepr(self, self.color, self.shape, taste=self.taste)\n ...\n >>> maggot = Thing('white', 'cylindrical', 'chicken')\n >>> maggot\n Thing('white', 'cylindrical', taste='chicken')\n >>> Thing('Color # 132942430-214809804-412430988081-241234', 'unkown', taste=maggot)\n Thing('Color # 132942430-214809804-412430988081-241234',\n 'unkown',\n taste=Thing('white', 'cylindrical', taste='chicken'))\n \"\"\"\n width=79\n maxIndent=15\n minIndent=2\n args = (map(repr, argls) + [\"%s=%r\" % (k, v)\n for (k,v) in sorted(kwargs.items())]) or [\"\"]\n if instance is not None:\n start = \"%s(\" % instance.__class__.__name__\n args[-1] += \")\"\n else:\n start = \"\"\n if len(start) <= maxIndent and len(start) + len(args[0]) <= width and \\\n max(map(len,args)) <= width: # XXX mag of last condition bit arbitrary\n indent = len(start)\n args[0] = start + args[0]\n if sum(map(len, args)) + 2*(len(args) - 1) <= width:\n return \", \".join(args)\n else:\n indent = minIndent\n args[0] = start + \"\\n\" + \" \" * indent + args[0]\n return (\",\\n\" + \" \" * indent).join(args)",
"def hook_symbol(self, symbol_name, simproc, kwargs=None, replace=None):\n \"\"\"\n Resolve a dependency in a binary. Looks up the address of the given symbol, and then hooks that\n address. If the symbol was not available in the loaded libraries, this address may be provided\n by the CLE externs object.\n\n Additionally, if instead of a symbol name you provide an address, some secret functionality will\n kick in and you will probably just hook that address, UNLESS you're on powerpc64 ABIv1 or some\n yet-unknown scary ABI that has its function pointers point to something other than the actual\n functions, in which case it'll do the right thing.\n\n :param symbol_name: The name of the dependency to resolve.\n :param simproc: The SimProcedure instance (or function) with which to hook the symbol\n :param kwargs: If you provide a SimProcedure for the hook, these are the keyword\n arguments that will be passed to the procedure's `run` method\n eventually.\n :param replace: Control the behavior on finding that the address is already hooked. If\n true, silently replace the hook. If false, warn and do not replace the\n hook. If none (default), warn and replace the hook.\n :returns: The address of the new symbol.\n :rtype: int\n \"\"\"\n if type(symbol_name) is not int:\n sym = self.loader.find_symbol(symbol_name)\n if sym is None:\n # it could be a previously unresolved weak symbol..?\n new_sym = None\n for reloc in self.loader.find_relevant_relocations(symbol_name):\n if not reloc.symbol.is_weak:\n raise Exception(\"Symbol is strong but we couldn't find its resolution? Report to @rhelmot.\")\n if new_sym is None:\n new_sym = self.loader.extern_object.make_extern(symbol_name)\n reloc.resolve(new_sym)\n reloc.relocate([])\n\n if new_sym is None:\n l.error(\"Could not find symbol %s\", symbol_name)\n return None\n sym = new_sym\n\n basic_addr = sym.rebased_addr\n else:\n basic_addr = symbol_name\n symbol_name = None\n\n hook_addr, _ = self.simos.prepare_function_symbol(symbol_name, basic_addr=basic_addr)\n\n self.hook(hook_addr, simproc, kwargs=kwargs, replace=replace)\n return hook_addr",
"def compile_sequence(cycles, program_or_profile='program',\n unit_converter=None):\n \"\"\" Makes the command list for a move sequence.\n\n Constructs the list of commands to execute the given sequence of\n motion. Program/command line commands or profile commands can be\n generated depending on the value of `program_or_profile` so that the\n commands can be used to construct a program or profile later. Types\n of motion supported (see Notes for how to specify) are moves from\n one position to another (the motion will always come to a stop\n before doing the next motion), waiting a given interval of time till\n starting the next move, and looping over a sequence of moves.\n\n Parameters\n ----------\n cycles : iterable of dicts\n The iterable of cycles of motion to do one after another. See\n Notes for format.\n program_or_profile : {'program', 'profile'}, optional\n Whether program or profile motion commands should be used.\n Anything other than these two values implies the default.\n unit_converter : UnitConverter, optional\n ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert\n the units in `cycles` to motor units. ``None`` indicates that\n they are already in motor units.\n\n Returns\n -------\n commands : list of str\n ``list`` of ``str`` commands making up the move sequence.\n\n Notes\n -----\n `cycles` is an iterable of individual cycles of motion. Each cycle\n is a ``dict`` that represents a sequence of moves that could\n possibly be looped over. The field ``'iterations'`` gives how many\n times the sequence of moves should be done (a value > 1 implies a\n loop). Then the field ``'moves'`` is an iterable of the individual\n moves. Each individual move is a ``dict`` with the acceleration\n (``'A'``), deceleration (``'AD'`` with 0 meaning the value of the\n acceleration is used), velocity (``'V'``), and the distance/position\n (``'D'``). Back in the cycle, the field ``'wait_times'`` is an\n iterable of numbers giving the time in seconds to wait after each\n move before going onto the next.\n\n See Also\n --------\n get_sequence_time\n convert_sequence_to_motor_units\n GeminiMotorDrive.utilities.UnitConverter\n\n Examples\n --------\n\n Simple program style two motions with a pause in between.\n\n >>> from GeminiMotorDrive.compilers.move_sequence import *\n >>> cycles = [{'iterations':1, 'wait_times':[1, 0],\n ... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},\n ... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]\n >>> compile_sequence(cycles)\n ['A100',\n 'AD0',\n 'V100',\n 'D-1000',\n 'GO1',\n 'WAIT(AS.1=b0)',\n 'T1',\n 'A90',\n 'GO1',\n 'WAIT(AS.1=b0)']\n\n The same motion but in profile style commands\n\n >>> from GeminiMotorDrive.compilers.move_sequence import *\n >>> cycles = [{'iterations':1, 'wait_times':[1, 0],\n ... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},\n ... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]\n >>> compile_sequence(cycles, program_or_profile='profile')\n ['A100',\n 'AD100',\n 'V100',\n 'D-1000',\n 'VF0',\n 'GOBUF1',\n 'GOWHEN(T=1000)',\n 'A90',\n 'AD90',\n 'VF0',\n 'GOBUF1']\n\n Another motion with a back and forth loop (100 iterations) in the\n middle, done in program style commands.\n\n >>> from GeminiMotorDrive.compilers.move_sequence import *\n >>> cycles = [{'iterations':1, 'wait_times':[1],\n ... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100}]},\n ... {'iterations':100, 'wait_times':[0, 0],\n ... 'moves':[{'A':50, 'AD':40, 'D':-1000, 'V':30},\n ... {'A':50, 'AD':40, 'D':1000, 'V':30}]},\n ... {'iterations':1, 'wait_times':[0],\n ... 'moves':[{'A':100, 'AD':0, 'D':1000, 'V':100}]}]\n >>> compile_sequence(cycles)\n ['A100',\n 'AD0',\n 'V100',\n 'D-1000',\n 'GO1',\n 'WAIT(AS.1=b0)',\n 'T1',\n 'L100',\n 'A50',\n 'AD40',\n 'V30',\n 'D-1000',\n 'GO1',\n 'WAIT(AS.1=b0)',\n 'D~',\n 'GO1',\n 'WAIT(AS.1=b0)',\n 'LN',\n 'A100',\n 'AD0',\n 'V100',\n 'GO1',\n 'WAIT(AS.1=b0)']\n\n \"\"\"\n # If needed, cycles needs to be converted to motor units.\n if unit_converter is None:\n cv_cycles = cycles\n else:\n cv_cycles = convert_sequence_to_motor_units(cycles, \\\n unit_converter=unit_converter)\n\n # Initially, we have no commands in our command list.\n commands = []\n\n # The A, AD, D, and V parameters of the previous motion should be\n # kept track of because if they don't change from one motion to the\n # next, the commands to set them don't need to be included. They\n # will be started blank since there are no previous motions yet.\n previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}\n\n # Construct each cycle one by one.\n for cycle in cv_cycles:\n # If more than one iteration is being done, a loop needs to be\n # setup. It will be either 'L' or 'PLOOP' with the number of\n # iterations attached if it is a program or a profile\n # respectively. Since it will be tough to keep track of what\n # motion changed from the end of a loop to the beginning of it,\n # it is easier to just forget all previous motion values and set\n # them all at the beginning of the loop (clear previous_motion).\n iterations = int(cycle['iterations'])\n if iterations > 1:\n previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}\n if program_or_profile != 'profile':\n commands.append('L' + str(iterations))\n else:\n commands.append('PLOOP' + str(iterations))\n\n # Construct each individual move in the cycle.\n for i in range(0, len(cycle['moves'])):\n # Grab the motion indicated by the current move.\n new_motion = cycle['moves'][i]\n\n # If we are doing a profile, AD must be set explicitly\n # to A if it is 0.\n if program_or_profile == 'profile' \\\n and new_motion['AD'] == 0.0:\n new_motion['AD'] = new_motion['A']\n\n # Set A, AD, and V if they have changed.\n for k in ('A', 'AD', 'V'):\n if previous_motion[k] != new_motion[k]:\n # Grab it and round it to 4 places after the decimal\n # point because that is the most that is\n # supported. Then, if it is an integer value,\n # convert it to an integer because that is what the\n # drive will send back if requested (makes\n # comparisons easier). Then add the command.\n val = round(float(new_motion[k]), 4)\n if val == int(val):\n val = int(val)\n commands.append(k + str(val))\n\n # If the sign of D has flipped, we just need to issue a 'D~'\n # command. If the value has changed in another way, it needs\n # to be reset.\n if previous_motion['D'] != new_motion['D']:\n if previous_motion['D'] == -new_motion['D']:\n commands.append('D~')\n else:\n commands.append('D'\n + str(int(new_motion['D'])))\n\n # Grab the amount of time that should be waited after the\n # move is done.\n wait_time = cycle['wait_times'][i]\n\n # Give the motion command (GO or GOBUF), tell the drive to\n # wait till the motor has stopped (a WAIT command if it is a\n # program and a VF0 command if it is a profile), and make it\n # wait the period of time wait_time (T and GOWHEN commands).\n if program_or_profile != 'profile':\n commands.append('GO1')\n commands.append('WAIT(AS.1=b0)')\n if wait_time != 0:\n # The wait time needs to be rounded to 3 places\n # after the decimal. If it is an integer, it should\n # be converted to an int so that the drive will send\n # back what we send (makes compairisons easier).\n wait_time = round(float(wait_time), 3)\n if wait_time == int(wait_time):\n wait_time = int(wait_time)\n commands.append('T' + str(wait_time))\n else:\n commands.append('VF0')\n commands.append('GOBUF1')\n if wait_time != 0:\n commands.append('GOWHEN(T='\n + str(int(1000*wait_time))\n + ')')\n\n # Before going onto the next move, previous_motion needs to\n # be set to the one just done.\n previous_motion = new_motion\n\n # Done with all the moves of the cycle. If we are looping, the\n # loop end needs to be put in.\n if iterations > 1:\n if program_or_profile != 'profile':\n commands.append('LN')\n else:\n commands.append('PLN')\n\n # Done constructing the command list.\n return commands"
] |
[
0.7415752410888672,
0.733905017375946,
0.7323334217071533,
0.7297677397727966,
0.7236826419830322,
0.7190187573432922,
0.712995171546936,
0.7084210515022278,
0.7059233784675598,
0.7049492001533508,
0.7047342658042908,
0.7040401101112366
] |
r"""Developer script program names.
|
def cli_program_names(self):
r"""Developer script program names.
"""
program_names = {}
for cli_class in self.cli_classes:
instance = cli_class()
program_names[instance.program_name] = cli_class
return program_names
|
[
"def p_program(p):\n \"\"\" program : line\n \"\"\"\n if p[1] is not None:\n [MEMORY.add_instruction(x) for x in p[1] if isinstance(x, Asm)]",
"def _get_programs_dict():\n \"\"\"\n Builds and returns programs dictionary\n\n This will have to import the packages in COLLABORATORS_S in order to get their absolute path.\n\n Returns:\n dictionary: {\"packagename\": [ExeInfo0, ...], ...}\n\n \"packagename\" examples: \"f311.explorer\", \"numpy\"\n \"\"\"\n global __programs_dict\n\n if __programs_dict is not None:\n return __programs_dict\n\n d = __programs_dict = OrderedDict()\n\n for pkgname in COLLABORATORS_S:\n try:\n package = importlib.import_module(pkgname)\n except ImportError:\n # I think it is better to be silent when a collaborator package is not installed\n continue\n\n path_ = os.path.join(os.path.split(package.__file__)[0], \"scripts\")\n bulk = a99.get_exe_info(path_, flag_protected=True)\n d[pkgname] = {\"description\": a99.get_obj_doc0(package), \"exeinfo\": bulk}\n\n return __programs_dict",
"def get_module_name(package):\n \"\"\"\n package must have these attributes:\n e.g.:\n package.DISTRIBUTION_NAME = \"DragonPyEmulator\"\n package.DIST_GROUP = \"console_scripts\"\n package.ENTRY_POINT = \"DragonPy\"\n\n :return: a string like: \"dragonpy.core.cli\"\n \"\"\"\n distribution = get_distribution(package.DISTRIBUTION_NAME)\n entry_info = distribution.get_entry_info(package.DIST_GROUP, package.ENTRY_POINT)\n if not entry_info:\n raise RuntimeError(\n \"Can't find entry info for distribution: %r (group: %r, entry point: %r)\" % (\n package.DISTRIBUTION_NAME, package.DIST_GROUP, package.ENTRY_POINT\n )\n )\n return entry_info.module_name",
"def program(self):\n \"\"\"\n program: (statement)*\n \"\"\"\n root = Program()\n\n while self.token.nature != Nature.EOF:\n root.children.append(self.statement())\n\n return root",
"def compose(scripts, name='main', description=None, prog=None,\n version=None):\n \"\"\"\n Collects together different scripts and builds a single\n script dispatching to the subparsers depending on\n the first argument, i.e. the name of the subparser to invoke.\n\n :param scripts: a list of script instances\n :param name: the name of the composed parser\n :param description: description of the composed parser\n :param prog: name of the script printed in the usage message\n :param version: version of the script printed with --version\n \"\"\"\n assert len(scripts) >= 1, scripts\n parentparser = argparse.ArgumentParser(\n description=description, add_help=False)\n parentparser.add_argument(\n '--version', '-v', action='version', version=version)\n subparsers = parentparser.add_subparsers(\n help='available subcommands; use %s help <subcmd>' % prog,\n prog=prog)\n\n def gethelp(cmd=None):\n if cmd is None:\n print(parentparser.format_help())\n return\n subp = subparsers._name_parser_map.get(cmd)\n if subp is None:\n print('No help for unknown command %r' % cmd)\n else:\n print(subp.format_help())\n help_script = Script(gethelp, 'help', help=False)\n progname = '%s ' % prog if prog else ''\n help_script.arg('cmd', progname + 'subcommand')\n for s in list(scripts) + [help_script]:\n subp = subparsers.add_parser(s.name, description=s.description)\n for args, kw in s.all_arguments:\n subp.add_argument(*args, **kw)\n subp.set_defaults(_func=s.func)\n\n def main(**kw):\n try:\n func = kw.pop('_func')\n except KeyError:\n parentparser.print_usage()\n else:\n return func(**kw)\n main.__name__ = name\n return Script(main, name, parentparser)",
"def compile_sequence(cycles, program_or_profile='program',\n unit_converter=None):\n \"\"\" Makes the command list for a move sequence.\n\n Constructs the list of commands to execute the given sequence of\n motion. Program/command line commands or profile commands can be\n generated depending on the value of `program_or_profile` so that the\n commands can be used to construct a program or profile later. Types\n of motion supported (see Notes for how to specify) are moves from\n one position to another (the motion will always come to a stop\n before doing the next motion), waiting a given interval of time till\n starting the next move, and looping over a sequence of moves.\n\n Parameters\n ----------\n cycles : iterable of dicts\n The iterable of cycles of motion to do one after another. See\n Notes for format.\n program_or_profile : {'program', 'profile'}, optional\n Whether program or profile motion commands should be used.\n Anything other than these two values implies the default.\n unit_converter : UnitConverter, optional\n ``GeminiMotorDrive.utilities.UnitConverter`` to use to convert\n the units in `cycles` to motor units. ``None`` indicates that\n they are already in motor units.\n\n Returns\n -------\n commands : list of str\n ``list`` of ``str`` commands making up the move sequence.\n\n Notes\n -----\n `cycles` is an iterable of individual cycles of motion. Each cycle\n is a ``dict`` that represents a sequence of moves that could\n possibly be looped over. The field ``'iterations'`` gives how many\n times the sequence of moves should be done (a value > 1 implies a\n loop). Then the field ``'moves'`` is an iterable of the individual\n moves. Each individual move is a ``dict`` with the acceleration\n (``'A'``), deceleration (``'AD'`` with 0 meaning the value of the\n acceleration is used), velocity (``'V'``), and the distance/position\n (``'D'``). Back in the cycle, the field ``'wait_times'`` is an\n iterable of numbers giving the time in seconds to wait after each\n move before going onto the next.\n\n See Also\n --------\n get_sequence_time\n convert_sequence_to_motor_units\n GeminiMotorDrive.utilities.UnitConverter\n\n Examples\n --------\n\n Simple program style two motions with a pause in between.\n\n >>> from GeminiMotorDrive.compilers.move_sequence import *\n >>> cycles = [{'iterations':1, 'wait_times':[1, 0],\n ... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},\n ... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]\n >>> compile_sequence(cycles)\n ['A100',\n 'AD0',\n 'V100',\n 'D-1000',\n 'GO1',\n 'WAIT(AS.1=b0)',\n 'T1',\n 'A90',\n 'GO1',\n 'WAIT(AS.1=b0)']\n\n The same motion but in profile style commands\n\n >>> from GeminiMotorDrive.compilers.move_sequence import *\n >>> cycles = [{'iterations':1, 'wait_times':[1, 0],\n ... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100},\n ... {'A':90, 'AD':0, 'D':-1000, 'V':100}]}]\n >>> compile_sequence(cycles, program_or_profile='profile')\n ['A100',\n 'AD100',\n 'V100',\n 'D-1000',\n 'VF0',\n 'GOBUF1',\n 'GOWHEN(T=1000)',\n 'A90',\n 'AD90',\n 'VF0',\n 'GOBUF1']\n\n Another motion with a back and forth loop (100 iterations) in the\n middle, done in program style commands.\n\n >>> from GeminiMotorDrive.compilers.move_sequence import *\n >>> cycles = [{'iterations':1, 'wait_times':[1],\n ... 'moves':[{'A':100, 'AD':0, 'D':-1000, 'V':100}]},\n ... {'iterations':100, 'wait_times':[0, 0],\n ... 'moves':[{'A':50, 'AD':40, 'D':-1000, 'V':30},\n ... {'A':50, 'AD':40, 'D':1000, 'V':30}]},\n ... {'iterations':1, 'wait_times':[0],\n ... 'moves':[{'A':100, 'AD':0, 'D':1000, 'V':100}]}]\n >>> compile_sequence(cycles)\n ['A100',\n 'AD0',\n 'V100',\n 'D-1000',\n 'GO1',\n 'WAIT(AS.1=b0)',\n 'T1',\n 'L100',\n 'A50',\n 'AD40',\n 'V30',\n 'D-1000',\n 'GO1',\n 'WAIT(AS.1=b0)',\n 'D~',\n 'GO1',\n 'WAIT(AS.1=b0)',\n 'LN',\n 'A100',\n 'AD0',\n 'V100',\n 'GO1',\n 'WAIT(AS.1=b0)']\n\n \"\"\"\n # If needed, cycles needs to be converted to motor units.\n if unit_converter is None:\n cv_cycles = cycles\n else:\n cv_cycles = convert_sequence_to_motor_units(cycles, \\\n unit_converter=unit_converter)\n\n # Initially, we have no commands in our command list.\n commands = []\n\n # The A, AD, D, and V parameters of the previous motion should be\n # kept track of because if they don't change from one motion to the\n # next, the commands to set them don't need to be included. They\n # will be started blank since there are no previous motions yet.\n previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}\n\n # Construct each cycle one by one.\n for cycle in cv_cycles:\n # If more than one iteration is being done, a loop needs to be\n # setup. It will be either 'L' or 'PLOOP' with the number of\n # iterations attached if it is a program or a profile\n # respectively. Since it will be tough to keep track of what\n # motion changed from the end of a loop to the beginning of it,\n # it is easier to just forget all previous motion values and set\n # them all at the beginning of the loop (clear previous_motion).\n iterations = int(cycle['iterations'])\n if iterations > 1:\n previous_motion = {'A': [], 'AD': [], 'D': [], 'V': []}\n if program_or_profile != 'profile':\n commands.append('L' + str(iterations))\n else:\n commands.append('PLOOP' + str(iterations))\n\n # Construct each individual move in the cycle.\n for i in range(0, len(cycle['moves'])):\n # Grab the motion indicated by the current move.\n new_motion = cycle['moves'][i]\n\n # If we are doing a profile, AD must be set explicitly\n # to A if it is 0.\n if program_or_profile == 'profile' \\\n and new_motion['AD'] == 0.0:\n new_motion['AD'] = new_motion['A']\n\n # Set A, AD, and V if they have changed.\n for k in ('A', 'AD', 'V'):\n if previous_motion[k] != new_motion[k]:\n # Grab it and round it to 4 places after the decimal\n # point because that is the most that is\n # supported. Then, if it is an integer value,\n # convert it to an integer because that is what the\n # drive will send back if requested (makes\n # comparisons easier). Then add the command.\n val = round(float(new_motion[k]), 4)\n if val == int(val):\n val = int(val)\n commands.append(k + str(val))\n\n # If the sign of D has flipped, we just need to issue a 'D~'\n # command. If the value has changed in another way, it needs\n # to be reset.\n if previous_motion['D'] != new_motion['D']:\n if previous_motion['D'] == -new_motion['D']:\n commands.append('D~')\n else:\n commands.append('D'\n + str(int(new_motion['D'])))\n\n # Grab the amount of time that should be waited after the\n # move is done.\n wait_time = cycle['wait_times'][i]\n\n # Give the motion command (GO or GOBUF), tell the drive to\n # wait till the motor has stopped (a WAIT command if it is a\n # program and a VF0 command if it is a profile), and make it\n # wait the period of time wait_time (T and GOWHEN commands).\n if program_or_profile != 'profile':\n commands.append('GO1')\n commands.append('WAIT(AS.1=b0)')\n if wait_time != 0:\n # The wait time needs to be rounded to 3 places\n # after the decimal. If it is an integer, it should\n # be converted to an int so that the drive will send\n # back what we send (makes compairisons easier).\n wait_time = round(float(wait_time), 3)\n if wait_time == int(wait_time):\n wait_time = int(wait_time)\n commands.append('T' + str(wait_time))\n else:\n commands.append('VF0')\n commands.append('GOBUF1')\n if wait_time != 0:\n commands.append('GOWHEN(T='\n + str(int(1000*wait_time))\n + ')')\n\n # Before going onto the next move, previous_motion needs to\n # be set to the one just done.\n previous_motion = new_motion\n\n # Done with all the moves of the cycle. If we are looping, the\n # loop end needs to be put in.\n if iterations > 1:\n if program_or_profile != 'profile':\n commands.append('LN')\n else:\n commands.append('PLN')\n\n # Done constructing the command list.\n return commands",
"def names(args):\n \"\"\"\n %prog names namelist templatefile\n\n Generate name blocks from the `namelist` file. The `namelist` file is\n tab-delimited that contains >=4 columns of data. Three columns are mandatory.\n First name, middle initial and last name. First row is table header. For the\n extra columns, the first column will go in the `$N0` field in the template\n file, second to the `$N1` field, etc.\n\n In the alternative mode, the namelist just contains several sections. First\n row will go in the `$N0` in the template file, second to the `$N1` field.\n\n The namelist may look like:\n [Sequence]\n Bruce A. Roe, Frederic Debelle, Giles Oldroyd, Rene Geurts\n [Manuscript]\n Haibao Tang1, Vivek Krishnakumar1, Shelby Bidwell1, Benjamin Rosen1\n\n Then in this example Sequence section goes into N0, Manuscript goes into N1.\n\n Useful hints for constructing the template file can be found in:\n <http://www.ncbi.nlm.nih.gov/IEB/ToolBox/CPP_DOC/asn_spec/seq.asn.html>\n\n Often the template file can be retrieved from web form:\n <http://www.ncbi.nlm.nih.gov/WebSub/template.cgi>\n \"\"\"\n p = OptionParser(names.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(p.print_help())\n\n namelist, templatefile = args\n\n # First check the alternative format\n if open(namelist).read()[0] == '[':\n out = parse_names(namelist)\n make_template(templatefile, out)\n return\n\n reader = csv.reader(open(namelist), delimiter=\"\\t\")\n header = next(reader)\n ncols = len(header)\n assert ncols > 3\n nextras = ncols - 3\n\n blocks = []\n bools = []\n for row in reader:\n first, middle, last = row[:3]\n extras = row[3:]\n bools.append([(x.upper() == 'Y') for x in extras])\n middle = middle.strip()\n if middle != \"\":\n middle = middle.rstrip('.') + '.'\n initials = \"{0}.{1}\".format(first[0], middle)\n suffix = \"\"\n nameblock = NameTemplate.format(last=last, first=first,\n initials=initials, suffix=suffix)\n blocks.append(nameblock)\n\n selected_idx = zip(*bools)\n out = [] * nextras\n for i, sbools in enumerate(selected_idx):\n selected = []\n for b, ss in zip(blocks, sbools):\n if ss:\n selected.append(b)\n bigblock = \",\\n\".join(selected)\n out.append(bigblock)\n logging.debug(\"List N{0} contains a total of {1} names.\".format(i,\n len(selected)))\n\n make_template(templatefile, out)",
"def get_argv_tail(scriptname, prefer_main=None, argv=None):\n r\"\"\"\n gets the rest of the arguments after a script has been invoked hack.\n accounts for python -m scripts.\n\n Args:\n scriptname (str):\n\n CommandLine:\n python -m utool.util_arg --test-get_argv_tail\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_arg import * # NOQA\n >>> import utool as ut\n >>> from os.path import relpath, dirname\n >>> scriptname = 'utool.util_arg'\n >>> prefer_main = False\n >>> argv=['python', '-m', 'utool.util_arg', '--test-get_argv_tail']\n >>> tail = get_argv_tail(scriptname, prefer_main, argv)\n >>> # hack\n >>> tail[0] = ut.ensure_unixslash(relpath(tail[0], dirname(dirname(ut.__file__))))\n >>> result = ut.repr2(tail)\n >>> print(result)\n ['utool/util_arg.py', '--test-get_argv_tail']\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_arg import * # NOQA\n >>> import utool as ut\n >>> from os.path import relpath, dirname\n >>> scriptname = 'utprof.py'\n >>> prefer_main = True\n >>> argv=['utprof.py', '-m', 'utool', '--tf', 'get_argv_tail']\n >>> tail = get_argv_tail(scriptname, prefer_main, argv)\n >>> # hack\n >>> tail[0] = ut.ensure_unixslash(relpath(tail[0], dirname(dirname(ut.__file__))))\n >>> result = ut.repr2(tail)\n >>> print(result)\n ['utool/__main__.py', '--tf', 'get_argv_tail']\n \"\"\"\n if argv is None:\n argv = sys.argv\n import utool as ut\n modname = ut.get_argval('-m', help_='specify module name to profile', argv=argv)\n if modname is not None:\n # hack to account for -m scripts\n modpath = ut.get_modpath(modname, prefer_main=prefer_main)\n argvx = argv.index(modname) + 1\n argv_tail = [modpath] + argv[argvx:]\n else:\n try:\n argvx = argv.index(scriptname)\n except ValueError:\n for argvx, arg in enumerate(argv):\n # HACK\n if scriptname in arg:\n break\n argv_tail = argv[(argvx + 1):]\n return argv_tail",
"def prt(show=False):\n \"\"\"\n 通过 ``装饰器`` 打印函数 ``参数, 运行时间, 返回值`` 等信息\n\n - 如果 ``prt(show==True)`` 打印函数信息,\n - 否则, 不做任何处理\n\n .. code:: python\n\n @prt(True)\n def say():\n print 'say'\n\n '''\n hello, world\n ----------------------------------------------------------------\n function(say) :\n arguments = (),{}\n return = None\n cost = 0.000015 sec\n ----------------------------------------------------------------\n '''\n\n\n @prt(False)\n def say():\n print 'say'\n\n # 输出 hello, world\n\n - 定义一个全局变量 ``bl`` (调试模式设置为 ``True``, 在生产模式为\n ``False``)\n\n .. note: 调试模式, 设置为 True, 在生产模式时, 设置为False\n\n :param show: ``True/False``\n :type show: bool\n \"\"\"\n\n def dec(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n result = None\n ts = time.time()\n if show:\n try:\n result = fn(*args, **kwargs)\n except Exception as e:\n print(e)\n finally:\n te = time.time()\n print(\"-\" * 64)\n print(\"function({}) :\".format(fn.__name__))\n print(\"\\t{:<16} = {},{}\".format('arguments', args, kwargs))\n print(\"\\t{:<16} = {}\".format('return', result))\n print(\"\\t{:<16} = {:.6f} sec\".format('cost', te - ts))\n print(\"-\" * 64)\n return result\n else:\n return fn(*args, **kwargs)\n\n return wrapper\n\n return dec",
"def get_script_module(script_information, package='pylabcontrol', verbose=False):\n \"\"\"\n wrapper to get the module for a script\n\n Args:\n script_information: information of the script. This can be\n - a dictionary\n - a Script instance\n - name of Script class\n package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string\n Returns:\n module\n\n \"\"\"\n\n module, _, _, _, _, _, _ = Script.get_script_information(script_information=script_information, package=package, verbose=verbose)\n\n return module",
"def tree_libs(start_path, filt_func=None):\n \"\"\" Return analysis of library dependencies within `start_path`\n\n Parameters\n ----------\n start_path : str\n root path of tree to search for libraries depending on other libraries.\n filt_func : None or callable, optional\n If None, inspect all files for library dependencies. If callable,\n accepts filename as argument, returns True if we should inspect the\n file, False otherwise.\n\n Returns\n -------\n lib_dict : dict\n dictionary with (key, value) pairs of (``libpath``,\n ``dependings_dict``).\n\n ``libpath`` is canonical (``os.path.realpath``) filename of library, or\n library name starting with {'@rpath', '@loader_path',\n '@executable_path'}.\n\n ``dependings_dict`` is a dict with (key, value) pairs of\n (``depending_libpath``, ``install_name``), where ``dependings_libpath``\n is the canonical (``os.path.realpath``) filename of the library\n depending on ``libpath``, and ``install_name`` is the \"install_name\" by\n which ``depending_libpath`` refers to ``libpath``.\n\n Notes\n -----\n\n See:\n\n * https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html\n * http://matthew-brett.github.io/pydagogue/mac_runtime_link.html\n \"\"\"\n lib_dict = {}\n for dirpath, dirnames, basenames in os.walk(start_path):\n for base in basenames:\n depending_libpath = realpath(pjoin(dirpath, base))\n if not filt_func is None and not filt_func(depending_libpath):\n continue\n rpaths = get_rpaths(depending_libpath)\n for install_name in get_install_names(depending_libpath):\n lib_path = (install_name if install_name.startswith('@')\n else realpath(install_name))\n lib_path = resolve_rpath(lib_path, rpaths)\n if lib_path in lib_dict:\n lib_dict[lib_path][depending_libpath] = install_name\n else:\n lib_dict[lib_path] = {depending_libpath: install_name}\n return lib_dict",
"def jsmin_for_posers(script, keep_bang_comments=False):\n r\"\"\"\n Minify javascript based on `jsmin.c by Douglas Crockford`_\\.\n\n Instead of parsing the stream char by char, it uses a regular\n expression approach which minifies the whole script with one big\n substitution regex.\n\n .. _jsmin.c by Douglas Crockford:\n http://www.crockford.com/javascript/jsmin.c\n\n :Warning: This function is the digest of a _make_jsmin() call. It just\n utilizes the resulting regexes. It's here for fun and may\n vanish any time. Use the `jsmin` function instead.\n\n :Parameters:\n `script` : ``str``\n Script to minify\n\n `keep_bang_comments` : ``bool``\n Keep comments starting with an exclamation mark? (``/*!...*/``)\n\n :Return: Minified script\n :Rtype: ``str``\n \"\"\"\n if not keep_bang_comments:\n rex = (\n r'([^\\047\"/\\000-\\040]+)|((?:(?:\\047[^\\047\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]'\n r'|\\r?\\n|\\r)[^\\047\\\\\\r\\n]*)*\\047)|(?:\"[^\"\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]'\n r'|\\r?\\n|\\r)[^\"\\\\\\r\\n]*)*\"))[^\\047\"/\\000-\\040]*)|(?<=[(,=:\\[!&|?'\n r'{};\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*'\n r'][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\0'\n r'14\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)*((?:/(?![\\r'\n r'\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r'\n r'\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)[^\\047\"/\\000-\\040]*)|(?<'\n r'=[\\000-#%-,./:-@\\[-^`{-~-]return)(?:[\\000-\\011\\013\\014\\016-\\04'\n r'0]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?['\n r'\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^'\n r'*]*\\*+)*/)))*((?:/(?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:'\n r'\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)['\n r'^\\047\"/\\000-\\040]*)|(?<=[^\\000-!#%&(*,./:-@\\[\\\\^`{|~])(?:[\\000'\n r'-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?'\n r':((?:(?://[^\\r\\n]*)?[\\r\\n]))(?:[\\000-\\011\\013\\014\\016-\\040]|(?'\n r':/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+(?=[^\\000-\\040\"#%-\\047)*,.'\n r'/:-@\\\\-^`|-~])|(?<=[^\\000-#%-,./:-@\\[-^`{-~-])((?:[\\000-\\011\\0'\n r'13\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=[^\\00'\n r'0-#%-,./:-@\\[-^`{-~-])|(?<=\\+)((?:[\\000-\\011\\013\\014\\016-\\040]'\n r'|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=\\+)|(?<=-)((?:[\\000-'\n r'\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?'\n r'=-)|(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]'\n r'*\\*+)*/))+|(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\0'\n r'16-\\040]|(?:/\\*[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+'\n )\n\n def subber(match):\n \"\"\" Substitution callback \"\"\"\n groups = match.groups()\n return (\n groups[0] or\n groups[1] or\n groups[2] or\n groups[3] or\n (groups[4] and '\\n') or\n (groups[5] and ' ') or\n (groups[6] and ' ') or\n (groups[7] and ' ') or\n ''\n )\n else:\n rex = (\n r'([^\\047\"/\\000-\\040]+)|((?:(?:\\047[^\\047\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]'\n r'|\\r?\\n|\\r)[^\\047\\\\\\r\\n]*)*\\047)|(?:\"[^\"\\\\\\r\\n]*(?:\\\\(?:[^\\r\\n]'\n r'|\\r?\\n|\\r)[^\"\\\\\\r\\n]*)*\"))[^\\047\"/\\000-\\040]*)|((?:/\\*![^*]*\\*'\n r'+(?:[^/*][^*]*\\*+)*/)[^\\047\"/\\000-\\040]*)|(?<=[(,=:\\[!&|?{};\\r'\n r'\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*'\n r'][^*]*\\*+)*/))*(?:(?:(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\0'\n r'14\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)*((?:/('\n r'?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:(?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:'\n r'\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/\\\\\\[\\r\\n]*)*/)[^\\047\"/\\000-\\040]'\n r'*)|(?<=[\\000-#%-,./:-@\\[-^`{-~-]return)(?:[\\000-\\011\\013\\014\\0'\n r'16-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*(?:(?:(?://['\n r'^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*'\n r']*\\*+(?:[^/*][^*]*\\*+)*/)))*((?:/(?![\\r\\n/*])[^/\\\\\\[\\r\\n]*(?:('\n r'?:\\\\[^\\r\\n]|(?:\\[[^\\\\\\]\\r\\n]*(?:\\\\[^\\r\\n][^\\\\\\]\\r\\n]*)*\\]))[^/'\n r'\\\\\\[\\r\\n]*)*/)[^\\047\"/\\000-\\040]*)|(?<=[^\\000-!#%&(*,./:-@\\[\\\\'\n r'^`{|~])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:['\n r'^/*][^*]*\\*+)*/))*(?:((?:(?://[^\\r\\n]*)?[\\r\\n]))(?:[\\000-\\011'\n r'\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+'\n r'(?=[^\\000-\\040\"#%-\\047)*,./:-@\\\\-^`|-~])|(?<=[^\\000-#%-,./:-@'\n r'\\[-^`{-~-])((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*'\n r'+(?:[^/*][^*]*\\*+)*/)))+(?=[^\\000-#%-,./:-@\\[-^`{-~-])|(?<=\\+)'\n r'((?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^'\n r'*]*\\*+)*/)))+(?=\\+)|(?<=-)((?:[\\000-\\011\\013\\014\\016-\\040]|(?:'\n r'/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/)))+(?=-)|(?:[\\000-\\011\\013'\n r'\\014\\016-\\040]|(?:/\\*(?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))+|(?:(?'\n r':(?://[^\\r\\n]*)?[\\r\\n])(?:[\\000-\\011\\013\\014\\016-\\040]|(?:/\\*('\n r'?!!)[^*]*\\*+(?:[^/*][^*]*\\*+)*/))*)+'\n )\n\n def subber(match):\n \"\"\" Substitution callback \"\"\"\n groups = match.groups()\n return (\n groups[0] or\n groups[1] or\n groups[2] or\n groups[3] or\n groups[4] or\n (groups[5] and '\\n') or\n (groups[6] and ' ') or\n (groups[7] and ' ') or\n (groups[8] and ' ') or\n ''\n )\n\n return _re.sub(rex, subber, '\\n%s\\n' % script).strip()"
] |
[
0.762545108795166,
0.7605868577957153,
0.7520274519920349,
0.7458399534225464,
0.7413025498390198,
0.7278997898101807,
0.7268261313438416,
0.7258724570274353,
0.7242015600204468,
0.7233635783195496,
0.7226311564445496,
0.7222805023193359
] |
:return: statistics as flat table {port/strea,/tpld name {group_stat name: value}}
|
def get_flat_stats(self):
"""
:return: statistics as flat table {port/strea,/tpld name {group_stat name: value}}
"""
flat_stats = OrderedDict()
for obj, port_stats in self.statistics.items():
flat_obj_stats = OrderedDict()
for group_name, group_values in port_stats.items():
for stat_name, stat_value in group_values.items():
full_stat_name = group_name + '_' + stat_name
flat_obj_stats[full_stat_name] = stat_value
flat_stats[obj.name] = flat_obj_stats
return flat_stats
|
[
"def stats(self):\n \"\"\"Basic group statistics.\n\n Returned dict has the following keys:\n\n 'online' - users online count\n 'ingame' - users currently in game count\n 'chatting' - users chatting count\n\n :return: dict\n \"\"\"\n stats_online = CRef.cint()\n stats_ingame = CRef.cint()\n stats_chatting = CRef.cint()\n\n self._iface.get_clan_stats(\n self.group_id,\n stats_online,\n stats_ingame,\n stats_chatting,\n )\n\n return {\n 'online': int(stats_online),\n 'ingame': int(stats_ingame),\n 'chatting': int(stats_chatting),\n }",
"def group_statistics(self, group, selected_meta, stat_code='mean'):\n \"\"\"\n Provides statistics of a group based on the meta data selected.\n\n :param group:The result of a classification or clustering.rst or biclustering algorithm\n :param selected_meta: The metadata that we are interested in\n :param stat_code: 'mean' for mean or 'variance' for variance or 'std' for standard deviation\n :return: returns the statistics properties of the selected metadata\n \"\"\"\n values = self.get_values(group, selected_meta)\n if stat_code == 'mean':\n res = statistics.mean(values)\n elif stat_code == 'variance':\n res = statistics.variance(values)\n elif stat_code == 'std':\n res = statistics.stdev(values)\n return res",
"def get_groups(self, gs=None, processed=[], initial=True):\n '''\n <--------------------------------------- 12 columns ------------------------------------>\n <--- 6 columns ---> <--- 6 columns --->\n ------------------------------------------ ------------------------------------------\n | Info | | Personal |\n |==========================================| |==========================================|\n | ----------------- ------------------ | | |\n | | Passport | | Name | | | Phone Zipcode |\n | |=================| | [.....] [.....] | | | [...........................] [.......] |\n | | CID Country | | <- 6 -> <- 6 -> | | | <--- 8 columns ---> <-4 col-> |\n | | [.....] [.....] | | | | | |\n | | <- 6 -> <- 6 -> | ----------------- | | Address |\n | ----------------- | | [.....................................] |\n ------------------------------------------ | <--- 12 columns ---> |\n | [..] number |\n | <--- 12 columns ---> |\n | |\n ------------------------------------------\n group = [\n (_('Info'),(6,'#8a6d3b','#fcf8e3','center'),\n (_('Identification'),6,\n [\"cid\",6],\n [\"country\",6],\n ),\n (None,6,\n [\"name\",None,6],\n [\"surname\",None,6,False],\n ),\n ),\n (_('Personal'),6,\n [\"phone\",None,8],\n [\"zipcode\",None,4],\n [\"address\",None,12],\n [\"number\",None,12, True],\n ),\n ]\n\n Group: it is defined as tuple with 3 or more elements:\n Grammar: (<Name>, <Attributes>, <Element1>, <Element2>, ..., <ElementN>)\n If <Name> is None: no name will be given to the group and no panel decoration will be shown\n If <Size in columns> is None: default of 6 will be used\n\n <Attributes>:\n it can be an integer that represent the size in columns\n it can be a tuple with several attributes where each element represents:\n (<Size in columns>,'#<Font color>','#<Background color>','<Alignment>')\n\n <Element>:\n it can be a Group\n it can be a Field\n\n Examples:\n ('Info', 6, [\"name\",6], [\"surname\",6]) -> Info panel using 6 columns with 2 boxes 6 columns for each with name and surname inputs\n ('Info', (6,None,'#fcf8e3','center'), [\"name\",6], [\"surname\",6]) -> Info panel using 6 columns with a yellow brackground in centered title, 2 boxes, 6 columns for each with name and surname inputs\n ('Info', 12, ('Name', 6, [\"name\",12]), ('Surname',6, [\"surname\",12])) -> Info panel using 12 columns with 2 panels inside\n of 6 columns each named \"Name\" and \"Surname\" and inside each of them an input \"name\" and \"surname\" where it belongs.\n\n Field: must be a list with at least 1 element in it:\n Grammar: [<Name of field>, <Size in columns>, <Label>]\n\n <Name of field>:\n This must be filled always\n It is the input's name inside the form\n Must exists as a form element or as a grouped form element\n\n <Size in columns>:\n Size of the input in columns\n If it is not defined or if it is defined as None: default of 6 will be used\n\n <Label>:\n It it is defined as False: the label for this field will not be shown\n If it is not defined or if it is defined as None: default of True will be used (default input's label will be shown)\n If it is a string: this string will be shown as a label\n\n Examples:\n ['age'] Input 'age' will be shown with 6 columns and its default label\n ['age',8] Input 'age' will be shown with 8 columns and its default label\n ['age', None, False] Input 'age' will be shown with 6 columns and NO LABEL\n ['age',8,False] Input 'age' will be shown with 8 columns and NO LABEL\n ['age',8,_(\"Age in days\")] Input 'age' will be shown with 8 columns and translated label text \"Age in days\" to user's language\n ['age',8,_(\"Age in days\"), True] Input 'age' will be shown with 8 columns and translated label text \"Age in days\" to user's language, and input inline with label\n ['age',6, None, None, None, None, None, [\"ng-click=functionjs('param1')\", \"ng-change=functionjs2()\"]] Input 'age' with extras functions\n ['age',None,None,None,None, 'filter'] Input 'age' with extras filter ONLY DETAILS\n ['age',6, {'color': 'red'} Input 'age' will be shown with red title\n '''\n\n # Check if language is set\n if not self.__language:\n raise IOError(\"ERROR: No language suplied!\")\n\n # Initialize the list\n if initial:\n processed = []\n # Where to look for fields\n if 'list_fields' in dir(self):\n list_fields = self.list_fields\n check_system = \"html_name\"\n else:\n list_fields = self\n check_system = \"name\"\n\n # Default attributes for fields\n attributes = [\n ('columns', 6),\n ('color', None),\n ('bgcolor', None),\n ('textalign', None),\n ('inline', False), # input in line with label\n ('label', True),\n ('extra', None),\n ('extra_div', None),\n ('foreign_info', {}),\n ]\n labels = [x[0] for x in attributes]\n\n # Get groups if none was given\n if gs is None:\n gs = self.__groups__()\n\n # Prepare the answer\n groups = []\n\n # Prepare focus control\n focus_first = None\n focus_must = None\n\n # html helper for groups and fields\n html_helper = self.html_helper()\n\n # Start processing\n for g in gs:\n token = {}\n token['name'] = g[0]\n\n if token['name'] in html_helper:\n if 'pre' in html_helper[token['name']]:\n token[\"html_helper_pre\"] = html_helper[token['name']]['pre']\n if 'post' in html_helper[token['name']]:\n token[\"html_helper_post\"] = html_helper[token['name']]['post']\n\n styles = g[1]\n if type(styles) is tuple:\n if len(styles) >= 1:\n token['columns'] = g[1][0]\n if len(styles) >= 2:\n token['color'] = g[1][1]\n if len(styles) >= 3:\n token['bgcolor'] = g[1][2]\n if len(styles) >= 4:\n token['textalign'] = g[1][3]\n if len(styles) >= 5:\n token['inline'] = g[1][4]\n if len(styles) >= 7:\n token['extra'] = g[1][5]\n if len(styles) >= 8:\n token['extra_div'] = g[1][6]\n else:\n token['columns'] = g[1]\n fs = g[2:]\n fields = []\n for f in fs:\n # Field\n atr = {}\n # Decide weather this is a Group or not\n if type(f) == tuple:\n # Recursive\n fields += self.get_groups([list(f)], processed, False)\n else:\n try:\n list_type = [str, unicode, ]\n except NameError:\n list_type = [str, ]\n # Check if it is a list\n if type(f) == list:\n # This is a field with attributes, get the name\n field = f[0]\n\n if html_helper and token['name'] in html_helper and 'items' in html_helper[token['name']] and field in html_helper[token['name']]['items']:\n if 'pre' in html_helper[token['name']]['items'][field]:\n atr[\"html_helper_pre\"] = html_helper[token['name']]['items'][field]['pre']\n if 'post' in html_helper[token['name']]['items'][field]:\n atr[\"html_helper_post\"] = html_helper[token['name']]['items'][field]['post']\n\n # Process each attribute (if any)\n dictionary = False\n for idx, element in enumerate(f[1:]):\n if type(element) == dict:\n dictionary = True\n for key in element.keys():\n if key in labels:\n atr[key] = element[key]\n else:\n raise IOError(\"Unknown attribute '{0}' as field '{1}' in list of fields\".format(key, field))\n else:\n if not dictionary:\n if element is not None:\n atr[attributes[idx][0]] = element\n else:\n raise IOError(\"We already processed a dicionary element in this list of fields, you can not add anoother type of elements to it, you must keep going with dictionaries\")\n elif type(f) in list_type:\n field = f\n else:\n raise IOError(\"Uknown element type '{0}' inside group '{1}'\".format(type(f), token['name']))\n\n # Get the Django Field object\n found = None\n for infield in list_fields:\n if infield.__dict__[check_system] == field:\n found = infield\n break\n\n if found:\n # Get attributes (required and original attributes)\n wrequired = found.field.widget.is_required\n wattrs = found.field.widget.attrs\n # Fill base attributes\n atr['name'] = found.html_name\n atr['input'] = found\n atr['focus'] = False\n # Set focus\n if focus_must is None:\n if focus_first is None:\n focus_first = atr\n if wrequired:\n focus_must = atr\n # Autocomplete\n if 'autofill' in dir(self.Meta):\n autofill = self.Meta.autofill.get(found.html_name, None)\n atr['autofill'] = autofill\n if autofill:\n # Check format of the request\n autokind = autofill[0]\n if type(autokind) == str:\n # Using new format\n if autokind == 'select':\n # If autofill is True for this field set the DynamicSelect widget\n found.field.widget = DynamicSelect(wattrs)\n elif autokind == 'multiselect':\n # If autofill is True for this field set the DynamicSelect widget\n found.field.widget = MultiDynamicSelect(wattrs)\n elif autokind == 'input':\n # If autofill is True for this field set the DynamicSelect widget\n found.field.widget = DynamicInput(wattrs)\n else:\n raise IOError(\"Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'\".format(autokind))\n\n # Configure the field\n found.field.widget.is_required = wrequired\n found.field.widget.form_name = self.form_name\n found.field.widget.field_name = infield.html_name\n found.field.widget.autofill_deepness = autofill[1]\n found.field.widget.autofill_url = autofill[2]\n found.field.widget.autofill = autofill[3:]\n else:\n # Get old information [COMPATIBILITY WITH OLD VERSION]\n # If autofill is True for this field set the DynamicSelect widget\n found.field.widget = DynamicSelect(wattrs)\n found.field.widget.is_required = wrequired\n found.field.widget.form_name = self.form_name\n found.field.widget.field_name = infield.html_name\n found.field.widget.autofill_deepness = autofill[0]\n found.field.widget.autofill_url = autofill[1]\n found.field.widget.autofill = autofill[2:]\n else:\n\n # Set we don't have autofill for this field\n atr['autofill'] = None\n\n # Check if we have to replace the widget with a newer one\n if isinstance(found.field.widget, Select) and not isinstance(found.field.widget, DynamicSelect):\n if not isinstance(found.field.widget, MultiStaticSelect):\n found.field.widget = StaticSelect(wattrs)\n found.field.widget.choices = found.field.choices\n found.field.widget.is_required = wrequired\n found.field.widget.form_name = self.form_name\n found.field.widget.field_name = infield.html_name\n\n # Fill all attributes\n for (attribute, default) in attributes:\n if attribute not in atr.keys():\n atr[attribute] = default\n # Fill label\n if atr['label'] is True:\n atr['label'] = found.label\n # Set language\n flang = getattr(found.field, \"set_language\", None)\n if flang:\n flang(self.__language)\n flang = getattr(found.field.widget, \"set_language\", None)\n if flang:\n flang(self.__language)\n # Attach the element\n fields.append(atr)\n # Remember we have processed it\n processed.append(found.__dict__[check_system])\n else:\n raise IOError(\"Unknown field '{0}' specified in group '{1}'\".format(f, token['name']))\n\n token['fields'] = fields\n groups.append(token)\n\n # Add the rest of attributes we didn't use yet\n if initial:\n fields = []\n for infield in list_fields:\n if infield.__dict__[check_system] not in processed:\n # Get attributes (required and original attributes)\n wattrs = infield.field.widget.attrs\n wrequired = infield.field.widget.is_required\n # Prepare attr\n atr = {}\n # Fill base attributes\n atr['name'] = infield.html_name\n atr['input'] = infield\n atr['focus'] = False\n # Set focus\n if focus_must is None:\n if focus_first is None:\n focus_first = atr\n if wrequired:\n focus_must = atr\n # Autocomplete\n if 'autofill' in dir(self.Meta):\n autofill = self.Meta.autofill.get(infield.html_name, None)\n atr['autofill'] = autofill\n if autofill:\n # Check format of the request\n autokind = autofill[0]\n if type(autokind) == str:\n # Get old information\n\n # Using new format\n if autokind == 'select':\n # If autofill is True for this field set the DynamicSelect widget\n infield.field.widget = DynamicSelect(wattrs)\n elif autokind == 'multiselect':\n # If autofill is True for this field set the DynamicSelect widget\n infield.field.widget = MultiDynamicSelect(wattrs)\n elif autokind == 'input':\n # If autofill is True for this field set the DynamicSelect widget\n infield.field.widget = DynamicInput(wattrs)\n else:\n raise IOError(\"Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'\".format(autokind))\n\n # Configure the field\n infield.field.widget.is_required = wrequired\n infield.field.widget.form_name = self.form_name\n infield.field.widget.field_name = infield.html_name\n infield.field.widget.autofill_deepness = autofill[1]\n infield.field.widget.autofill_url = autofill[2]\n infield.field.widget.autofill = autofill[3:]\n else:\n # Get old information [COMPATIBILITY WITH OLD VERSION]\n # If autofill is True for this field set the DynamicSelect widget\n infield.field.widget = DynamicSelect(wattrs)\n infield.field.widget.is_required = wrequired\n infield.field.widget.form_name = self.form_name\n infield.field.widget.field_name = infield.html_name\n infield.field.widget.autofill_deepness = autofill[0]\n infield.field.widget.autofill_url = autofill[1]\n infield.field.widget.autofill = autofill[2:]\n else:\n\n # Set we don't have autofill for this field\n atr['autofill'] = None\n\n # Check if we have to replace the widget with a newer one\n if isinstance(infield.field.widget, Select) and not isinstance(infield.field.widget, DynamicSelect):\n if isinstance(infield.field, NullBooleanField):\n infield.field.widget = CheckboxInput(wattrs)\n elif not isinstance(infield.field.widget, MultiStaticSelect):\n infield.field.widget = StaticSelect(wattrs)\n if hasattr(infield.field.widget, 'choices') and hasattr(infield.field, 'choices'):\n infield.field.widget.choices = infield.field.choices\n infield.field.widget.is_required = wrequired\n infield.field.widget.form_name = self.form_name\n infield.field.widget.field_name = infield.html_name\n\n # Fill all attributes\n for (attribute, default) in attributes:\n if attribute not in atr.keys():\n atr[attribute] = default\n # Fill label\n if atr['label'] is True:\n atr['label'] = infield.label\n # Set language\n flang = getattr(infield.field, \"set_language\", None)\n if flang:\n flang(self.__language)\n flang = getattr(infield.field.widget, \"set_language\", None)\n if flang:\n flang(self.__language)\n # Attach the attribute\n fields.append(atr)\n\n # Save the new elements\n if fields:\n groups.append({'name': None, 'columns': 12, 'fields': fields})\n\n # Set focus\n if focus_must:\n focus_must['focus'] = True\n elif focus_first is not None:\n focus_first['focus'] = True\n\n # Return the resulting groups\n return groups",
"def read_tpld_stats(self):\n \"\"\"\n :return: dictionary {tpld index {group name {stat name: value}}}.\n Sea XenaTpld.stats_captions.\n \"\"\"\n payloads_stats = OrderedDict()\n for tpld in self.tplds.values():\n payloads_stats[tpld] = tpld.read_stats()\n return payloads_stats",
"def read_stats(self):\n \"\"\" Read current statistics from chassis.\n\n :return: dictionary {tpld full index {group name {stat name: stat value}}}\n \"\"\"\n\n self.statistics = TgnObjectsDict()\n for port in self.session.ports.values():\n for tpld in port.tplds.values():\n self.statistics[tpld] = tpld.read_stats()\n return self.statistics",
"def __get_stat_display(self, stats, layer):\n \"\"\"Return a dict of dict with all the stats display.\n stats: Global stats dict\n layer: ~ cs_status\n \"None\": standalone or server mode\n \"Connected\": Client is connected to a Glances server\n \"SNMP\": Client is connected to a SNMP server\n \"Disconnected\": Client is disconnected from the server\n\n :returns: dict of dict\n * key: plugin name\n * value: dict returned by the get_stats_display Plugin method\n \"\"\"\n ret = {}\n\n for p in stats.getPluginsList(enable=False):\n if p == 'quicklook' or p == 'processlist':\n # processlist is done later\n # because we need to know how many processes could be displayed\n continue\n\n # Compute the plugin max size\n plugin_max_width = None\n if p in self._left_sidebar:\n plugin_max_width = max(self._left_sidebar_min_width,\n self.screen.getmaxyx()[1] - 105)\n plugin_max_width = min(self._left_sidebar_max_width,\n plugin_max_width)\n\n # Get the view\n ret[p] = stats.get_plugin(p).get_stats_display(args=self.args,\n max_width=plugin_max_width)\n\n return ret",
"def _dump_stats_group(self, title, items, normal_formatter=None,\n verbose_formatter=None):\n \"\"\"Dump a statistics group.\n \n In verbose mode, do so as a config file so\n that other processors can load the information if they want to.\n :param normal_formatter: the callable to apply to the value\n before displaying it in normal mode\n :param verbose_formatter: the callable to apply to the value\n before displaying it in verbose mode\n \"\"\"\n if self.verbose:\n self.outf.write(\"[%s]\\n\" % (title,))\n for name, value in items:\n if verbose_formatter is not None:\n value = verbose_formatter(value)\n if type(name) == str:\n name = name.replace(' ', '-')\n self.outf.write(\"%s = %s\\n\" % (name, value))\n self.outf.write(\"\\n\")\n else:\n self.outf.write(\"%s:\\n\" % (title,))\n for name, value in items:\n if normal_formatter is not None:\n value = normal_formatter(value)\n self.outf.write(\"\\t%s\\t%s\\n\" % (value, name))",
"def stats(request, date_offset=0, fields=None, title_prefix=None, model='WikiItem'):\n \"\"\"\n In addition to chart data in data['chart'], send statistics data to view in data['stats']\n \"\"\"\n data = {}\n\n modified_chart_data = data['chart']['chartdata']\n if 'y2' in data['chart']['chartdata']:\n matrix = db.Columns([modified_chart_data['y1'], modified_chart_data['y2']], ddof=0, tall=True)\n else:\n fields = ['date/time'] + fields\n matrix = db.Columns([modified_chart_data['x'], modified_chart_data['y']], ddof=0, tall=True)\n if fields and len(fields) > 1:\n fields = fields[:2]\n else:\n fields = [\n data['chart']['chartdata'].get('name1') or 'time',\n data['chart']['chartdata'].get('name2') or data['chart']['chartdata'].get('name') or 'value',\n ]\n fields = util.pluralize_field_names(fields)\n data.update({\n 'stats': {\n 'fields': fields,\n 'heading': 'Statistics',\n 'cov': zip(fields, matrix.cov()),\n 'R': zip(fields, matrix.rho),\n },\n })\n data['chart']['chartdata'] = modified_chart_data\n data['chart']['chart_title'] = 'Time Series'\n return render_to_response('miner/stats.html', data)",
"def format_stats(stats):\n \"\"\"Given a dictionary following this layout:\n\n {\n 'encoded:label': 'Encoded',\n 'encoded:value': 'Yes',\n 'encoded:description': 'Indicates if the column is encoded',\n 'encoded:include': True,\n\n 'size:label': 'Size',\n 'size:value': 128,\n 'size:description': 'Size of the table in MB',\n 'size:include': True,\n }\n\n format_stats will convert the dict into this structure:\n\n {\n 'encoded': {\n 'id': 'encoded',\n 'label': 'Encoded',\n 'value': 'Yes',\n 'description': 'Indicates if the column is encoded',\n 'include': True\n },\n 'size': {\n 'id': 'size',\n 'label': 'Size',\n 'value': 128,\n 'description': 'Size of the table in MB',\n 'include': True\n }\n }\n \"\"\"\n stats_collector = {}\n for stat_key, stat_value in stats.items():\n stat_id, stat_field = stat_key.split(\":\")\n\n stats_collector.setdefault(stat_id, {\"id\": stat_id})\n stats_collector[stat_id][stat_field] = stat_value\n\n # strip out all the stats we don't want\n stats_collector = {\n stat_id: stats\n for stat_id, stats in stats_collector.items()\n if stats.get('include', False)\n }\n\n # we always have a 'has_stats' field, it's never included\n has_stats = {\n 'id': 'has_stats',\n 'label': 'Has Stats?',\n 'value': len(stats_collector) > 0,\n 'description': 'Indicates whether there are statistics for this table',\n 'include': False,\n }\n stats_collector['has_stats'] = has_stats\n return stats_collector",
"def make_statistics_information(info):\n \"\"\"Make statistics information table.\"\"\"\n if not info.splits.total_num_examples:\n # That means that we have yet to calculate the statistics for this.\n return \"None computed\"\n\n stats = [(info.splits.total_num_examples, \"ALL\")]\n for split_name, split_info in info.splits.items():\n stats.append((split_info.num_examples, split_name.upper()))\n # Sort reverse on number of examples.\n stats.sort(reverse=True)\n stats = \"\\n\".join([\n \"{0:10} | {1:>10,}\".format(name, num_exs) for (num_exs, name) in stats\n ])\n return STATISTICS_TABLE.format(split_statistics=stats)",
"def build_stats(counts):\n \"\"\"Return stats information from counts structure.\"\"\"\n stats = {\n 'status': 0,\n 'reportnum': counts['reportnum'],\n 'title': counts['title'],\n 'author': counts['auth_group'],\n 'url': counts['url'],\n 'doi': counts['doi'],\n 'misc': counts['misc'],\n }\n stats_str = \"%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s\" % stats\n stats[\"old_stats_str\"] = stats_str\n stats[\"date\"] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n stats[\"version\"] = version\n return stats",
"def stats(self) -> pd.DataFrame:\n \"\"\"Statistics about flights contained in the structure.\n Useful for a meaningful representation.\n \"\"\"\n key = [\"icao24\", \"callsign\"] if self.flight_ids is None else \"flight_id\"\n return (\n self.data.groupby(key)[[\"timestamp\"]]\n .count()\n .sort_values(\"timestamp\", ascending=False)\n .rename(columns={\"timestamp\": \"count\"})\n )"
] |
[
0.6900011897087097,
0.6807190179824829,
0.6782200932502747,
0.6761449575424194,
0.6706290245056152,
0.6647806167602539,
0.6644486784934998,
0.6615530252456665,
0.6586448550224304,
0.6582878828048706,
0.6572874784469604,
0.6539151668548584
] |
Read current ports statistics from chassis.
:return: dictionary {port name {group name, {stat name: stat value}}}
|
def read_stats(self):
""" Read current ports statistics from chassis.
:return: dictionary {port name {group name, {stat name: stat value}}}
"""
self.statistics = TgnObjectsDict()
for port in self.session.ports.values():
self.statistics[port] = port.read_port_stats()
return self.statistics
|
[
"def read_stats(self):\n \"\"\" Read current statistics from chassis.\n\n :return: dictionary {tpld full index {group name {stat name: stat value}}}\n \"\"\"\n\n self.statistics = TgnObjectsDict()\n for port in self.session.ports.values():\n for tpld in port.tplds.values():\n self.statistics[tpld] = tpld.read_stats()\n return self.statistics",
"def read_stats(self, *stats):\n \"\"\" Read port statistics from chassis.\n\n :param stats: list of requested statistics to read, if empty - read all statistics.\n \"\"\"\n\n self.statistics = OrderedDict()\n for port in self.ports:\n port_stats = IxeStatTotal(port).get_attributes(FLAG_RDONLY, *stats)\n port_stats.update({c + '_rate': v for c, v in\n IxeStatRate(port).get_attributes(FLAG_RDONLY, *stats).items()})\n self.statistics[str(port)] = port_stats\n return self.statistics",
"def read_stats(self):\n \"\"\" Read current statistics from chassis.\n\n :return: dictionary {stream: {tx: {stat name: stat value}} rx: {tpld: {stat group {stat name: value}}}}\n \"\"\"\n\n self.tx_statistics = TgnObjectsDict()\n for port in self.session.ports.values():\n for stream in port.streams.values():\n self.tx_statistics[stream] = stream.read_stats()\n\n tpld_statistics = XenaTpldsStats(self.session).read_stats()\n\n self.statistics = TgnObjectsDict()\n for stream, stream_stats in self.tx_statistics.items():\n self.statistics[stream] = OrderedDict()\n self.statistics[stream]['tx'] = stream_stats\n self.statistics[stream]['rx'] = OrderedDict()\n stream_tpld = stream.get_attribute('ps_tpldid')\n for tpld, tpld_stats in tpld_statistics.items():\n if tpld.id == stream_tpld:\n self.statistics[stream]['rx'][tpld] = tpld_stats\n return self.statistics",
"def read_port_stats(self):\n \"\"\"\n :return: dictionary {group name {stat name: value}}.\n Sea XenaPort.stats_captions.\n \"\"\"\n\n stats_with_captions = OrderedDict()\n for stat_name in self.stats_captions.keys():\n stats_with_captions[stat_name] = self.read_stat(self.stats_captions[stat_name], stat_name)\n return stats_with_captions",
"def read_stats(self, *stats):\n \"\"\" Read stream statistics from chassis.\n\n :param stats: list of requested statistics to read, if empty - read all statistics.\n \"\"\"\n from ixexplorer.ixe_stream import IxePacketGroupStream\n sleep_time = 0.1 # in cases we only want few counters but very fast we need a smaller sleep time\n if not stats:\n stats = [m.attrname for m in IxePgStats.__tcl_members__ if m.flags & FLAG_RDONLY]\n sleep_time = 1\n\n # Read twice to refresh rate statistics.\n for port in self.tx_ports_streams:\n port.api.call_rc('streamTransmitStats get {} 1 4096'.format(port.uri))\n for rx_port in self.rx_ports:\n rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri))\n time.sleep(sleep_time)\n\n self.statistics = OrderedDict()\n for tx_port, streams in self.tx_ports_streams.items():\n for stream in streams:\n stream_stats = OrderedDict()\n tx_port.api.call_rc('streamTransmitStats get {} 1 4096'.format(tx_port.uri))\n stream_tx_stats = IxeStreamTxStats(tx_port, stream.index)\n stream_stats_tx = {c: v for c, v in stream_tx_stats.get_attributes(FLAG_RDONLY).items()}\n stream_stats['tx'] = stream_stats_tx\n stream_stat_pgid = IxePacketGroupStream(stream).groupId\n stream_stats_pg = pg_stats_dict()\n for port in self.session.ports.values():\n stream_stats_pg[str(port)] = OrderedDict(zip(stats, [-1] * len(stats)))\n for rx_port in self.rx_ports:\n if not stream.rx_ports or rx_port in stream.rx_ports:\n rx_port.api.call_rc('packetGroupStats get {} 0 65536'.format(rx_port.uri))\n pg_stats = IxePgStats(rx_port, stream_stat_pgid)\n stream_stats_pg[str(rx_port)] = pg_stats.read_stats(*stats)\n stream_stats['rx'] = stream_stats_pg\n self.statistics[str(stream)] = stream_stats\n return self.statistics",
"def get_ports(self):\n 'Reads logical values at pins.'\n return (self.bus.read_byte_data(self.address, self.STATUS_PORT0), self.bus.read_byte_data(self.address, self.STATUS_PORT1));",
"def ports(self):\n \"\"\"\n :return: dictionary {name: object} of all ports.\n \"\"\"\n\n ports = {}\n for chassis in self.chassis_list.values():\n ports.update({str(p): p for p in chassis.get_objects_by_type('port')})\n return ports",
"def read_stats(self):\n \"\"\"\n :return: dictionary {group name {stat name: value}}.\n Sea XenaTpld.stats_captions.\n \"\"\"\n\n stats_with_captions = OrderedDict()\n for stat_name in self.stats_captions.keys():\n stats_with_captions[stat_name] = self.read_stat(self.stats_captions[stat_name], stat_name)\n return stats_with_captions",
"def get_port_stats(port):\n \"\"\"\n Iterate over connections and count states for specified port\n :param port: port for which stats are collected\n :return: Counter with port states\n \"\"\"\n cnts = defaultdict(int)\n for c in psutil.net_connections():\n c_port = c.laddr[1]\n if c_port != port:\n continue\n status = c.status.lower()\n cnts[status] += 1\n return cnts",
"def get_active_ports(self):\n \"\"\"\n :return: dictionary {index: object} of all ports.\n \"\"\"\n\n if not self.resource_groups:\n return self.ports\n else:\n active_ports = OrderedDict()\n for resource_group in self.resource_groups.values():\n for active_port in resource_group.active_ports:\n active_ports[active_port] = self.ports[active_port]\n return active_ports",
"def get_stats(self):\n \"\"\"Retrieves the bus statistics.\n\n Use like so:\n\n >>> stats = bus.get_stats()\n >>> print(stats)\n std_data: 0, std_remote: 0, ext_data: 0, ext_remote: 0, err_frame: 0, bus_load: 0.0%, overruns: 0\n\n :returns: bus statistics.\n :rtype: can.interfaces.kvaser.structures.BusStatistics\n \"\"\"\n canRequestBusStatistics(self._write_handle)\n stats = structures.BusStatistics()\n canGetBusStatistics(self._write_handle,\n ctypes.pointer(stats),\n ctypes.sizeof(stats))\n return stats",
"def read_stat():\n \"\"\"\n Mocks read_stat as this is a Linux-specific operation.\n \"\"\"\n return [\n {\n \"times\": {\n \"user\": random.randint(0, 999999999),\n \"nice\": random.randint(0, 999999999),\n \"sys\": random.randint(0, 999999999),\n \"idle\": random.randint(0, 999999999),\n \"irq\": random.randint(0, 999999999),\n }\n }\n ]"
] |
[
0.8554100394248962,
0.8375643491744995,
0.7915400862693787,
0.7750445008277893,
0.7738721966743469,
0.7225189208984375,
0.7164445519447327,
0.7023173570632935,
0.6989346146583557,
0.6913663744926453,
0.689207136631012,
0.6844931840896606
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.