query
stringlengths
1
46.9k
pos
stringlengths
75
104k
neg
listlengths
12
12
scores
listlengths
12
12
Writes a block of bytes to the bus using I2C format to the specified command register
def write_block_data(self, cmd, block): """ Writes a block of bytes to the bus using I2C format to the specified command register """ self.bus.write_i2c_block_data(self.address, cmd, block) self.log.debug( "write_block_data: Wrote [%s] to command register 0x%02X" % ( ', '.join(['0x%02X' % x for x in block]), cmd ) )
[ "def write_byte(self, cmd, value):\n \"\"\"\n Writes an 8-bit byte to the specified command register\n \"\"\"\n self.bus.write_byte_data(self.address, cmd, value)\n self.log.debug(\n \"write_byte: Wrote 0x%02X to command register 0x%02X\" % (\n value, cmd\n )\n )", "def write_i2c_block_data(self, address, register, value):\n \"\"\"\n I2C block transactions do not limit the number of bytes transferred\n but the SMBus layer places a limit of 32 bytes.\n\n I2C Block Write: i2c_smbus_write_i2c_block_data()\n ==================================================\n\n The opposite of the Block Read command, this writes bytes to\n a device, to a designated register that is specified through the\n Comm byte. Note that command lengths of 0, 2, or more bytes are\n seupported as they are indistinguishable from data.\n\n S Addr Wr [A] Comm [A] Data [A] Data [A] ... [A] Data [A] P\n\n Functionality flag: I2C_FUNC_SMBUS_WRITE_I2C_BLOCK\n \"\"\"\n return self.smbus.write_i2c_block_data(address, register, value)", "def command(self, *cmd):\n \"\"\"\n Sends a command or sequence of commands through to the I²C address\n - maximum allowed is 32 bytes in one go.\n\n :param cmd: A spread of commands.\n :type cmd: int\n :raises luma.core.error.DeviceNotFoundError: I2C device could not be found.\n \"\"\"\n assert(len(cmd) <= 32)\n\n try:\n self._bus.write_i2c_block_data(self._addr, self._cmd_mode,\n list(cmd))\n except (IOError, OSError) as e:\n if e.errno in [errno.EREMOTEIO, errno.EIO]:\n # I/O error\n raise luma.core.error.DeviceNotFoundError(\n 'I2C device not found on address: 0x{0:02X}'.format(self._addr))\n else: # pragma: no cover\n raise", "def write_block_data(self, address, register, value):\n \"\"\"\n SMBus Block Write: i2c_smbus_write_block_data()\n ================================================\n\n The opposite of the Block Read command, this writes up to 32 bytes to\n a device, to a designated register that is specified through the\n Comm byte. The amount of data is specified in the Count byte.\n\n S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P\n\n Functionality flag: I2C_FUNC_SMBUS_WRITE_BLOCK_DATA\n \"\"\"\n return self.smbus.write_block_data(address, register, value)", "def _write_register(self, reg, value):\n \"\"\"Write 16 bit value to register.\"\"\"\n self.buf[0] = reg\n self.buf[1] = (value >> 8) & 0xFF\n self.buf[2] = value & 0xFF\n with self.i2c_device as i2c:\n i2c.write(self.buf)", "def read_block_data(self, cmd, length):\n \"\"\"\n Read a block of bytes from the bus from the specified command register\n Amount of bytes read in is defined by length\n \"\"\"\n results = self.bus.read_i2c_block_data(self.address, cmd, length)\n self.log.debug(\n \"read_block_data: Read [%s] from command register 0x%02X\" % (\n ', '.join(['0x%02X' % x for x in results]),\n cmd\n )\n )\n return results", "protected void writeRegister(int register, int value) throws IOException {\n\n // create packet in data buffer\n byte packet[] = new byte[3];\n packet[0] = (byte)(register); // register byte\n packet[1] = (byte)(value>>8); // value MSB\n packet[2] = (byte)(value & 0xFF); // value LSB\n\n // write data to I2C device\n device.write(packet, 0, 3);\n }", "def write(self, reg, value):\n \"\"\"Write raw byte value to the specified register\n\n :param reg: the register number (0-69, 250-255)\n :param value: byte value\n \"\"\"\n # TODO: check reg: 0-69, 250-255\n self.__check_range('register_value', value)\n logger.debug(\"Write '%s' to register '%s'\" % (value, reg))\n self.__bus.write_byte_data(self.__address, reg, value)", "def _write8(self, reg, value):\n \"\"\"Write a 8-bit value to a register.\"\"\"\n self.i2c.write8(TCS34725_COMMAND_BIT | reg, value)", "async def i2c_write_request(self, command):\n \"\"\"\n This method performs an I2C write at a given I2C address,\n :param command: {\"method\": \"i2c_write_request\", \"params\": [I2C_DEVICE_ADDRESS, [DATA_TO_WRITE]]}\n :returns:No return message.\n \"\"\"\n device_address = int(command[0])\n params = command[1]\n params = [int(i) for i in params]\n await self.core.i2c_write_request(device_address, params)", "def read_i2c_block_data(self, address, register, length):\n \"\"\"\n I2C block transactions do not limit the number of bytes transferred\n but the SMBus layer places a limit of 32 bytes.\n\n I2C Block Read: i2c_smbus_read_i2c_block_data()\n ================================================\n\n This command reads a block of bytes from a device, from a\n designated register that is specified through the Comm byte.\n\n S Addr Wr [A] Comm [A]\n S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P\n\n Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK\n \"\"\"\n return self.smbus.read_i2c_block_data(address, register, length)", "def write_word(self, cmd, value):\n \"\"\"\n Writes a 16-bit word to the specified command register\n \"\"\"\n self.bus.write_word_data(self.address, cmd, value)\n self.log.debug(\n \"write_word: Wrote 0x%04X to command register 0x%02X\" % (\n value, cmd\n )\n )" ]
[ 0.8196828961372375, 0.8017083406448364, 0.7951396107673645, 0.7795867919921875, 0.7707298398017883, 0.7705806493759155, 0.7704622745513916, 0.7655630707740784, 0.7603850960731506, 0.7573021650314331, 0.7540051937103271, 0.7528988718986511 ]
Read an 8-bit byte directly from the bus
def read_raw_byte(self): """ Read an 8-bit byte directly from the bus """ result = self.bus.read_byte(self.address) self.log.debug("read_raw_byte: Read 0x%02X from the bus" % result) return result
[ "def swd_read8(self, offset):\n \"\"\"Gets a unit of ``8`` bits from the input buffer.\n\n Args:\n self (JLink): the ``JLink`` instance\n offset (int): the offset (in bits) from which to start reading\n\n Returns:\n The integer read from the input buffer.\n \"\"\"\n value = self._dll.JLINK_SWD_GetU8(offset)\n return ctypes.c_uint8(value).value", "def write_raw_byte(self, value):\n \"\"\"\n Writes an 8-bit byte directly to the bus\n \"\"\"\n self.bus.write_byte(self.address, value)\n self.log.debug(\"write_raw_byte: Wrote 0x%02X\" % value)", "def read_byte(self, addr):\n \"\"\"Read a single byte from static memory area (blocks 0-14).\n \"\"\"\n if addr < 0 or addr > 127:\n raise ValueError(\"invalid byte address\")\n log.debug(\"read byte at address {0} ({0:02X}h)\".format(addr))\n cmd = \"\\x01\" + chr(addr) + \"\\x00\" + self.uid\n return self.transceive(cmd)[-1]", "private static byte readByte(boolean[] rawbits, int startIndex) {\n int n = rawbits.length - startIndex;\n if (n >= 8) {\n return (byte) readCode(rawbits, startIndex, 8);\n }\n return (byte) (readCode(rawbits, startIndex, n) << (8 - n));\n }", "def read_block(self, block):\n \"\"\"Read an 8-byte data block at address (block * 8).\n \"\"\"\n if block < 0 or block > 255:\n raise ValueError(\"invalid block number\")\n log.debug(\"read block {0}\".format(block))\n cmd = \"\\x02\" + chr(block) + 8 * chr(0) + self.uid\n return self.transceive(cmd)[1:9]", "def memory_read8(self, addr, num_bytes, zone=None):\n \"\"\"Reads memory from the target system in units of bytes.\n\n Args:\n self (JLink): the ``JLink`` instance\n addr (int): start address to read from\n num_bytes (int): number of bytes to read\n zone (str): memory zone to read from\n\n Returns:\n List of bytes read from the target system.\n\n Raises:\n JLinkException: if memory could not be read.\n \"\"\"\n return self.memory_read(addr, num_bytes, zone=zone, nbits=8)", "def ReadBytes(self, address, num_bytes):\n \"\"\"Reads at most num_bytes starting from offset <address>.\"\"\"\n pdata = ctypes.c_void_p(0)\n data_cnt = ctypes.c_uint32(0)\n\n ret = libc.mach_vm_read(self.task, ctypes.c_ulonglong(address),\n ctypes.c_longlong(num_bytes), ctypes.pointer(pdata),\n ctypes.pointer(data_cnt))\n if ret:\n raise process_error.ProcessError(\"Error in mach_vm_read, ret=%s\" % ret)\n buf = ctypes.string_at(pdata.value, data_cnt.value)\n libc.vm_deallocate(self.mytask, pdata, data_cnt)\n return buf", "def write_byte(self, cmd, value):\n \"\"\"\n Writes an 8-bit byte to the specified command register\n \"\"\"\n self.bus.write_byte_data(self.address, cmd, value)\n self.log.debug(\n \"write_byte: Wrote 0x%02X to command register 0x%02X\" % (\n value, cmd\n )\n )", "def read_byte(self, address):\n \"\"\"Reads unadressed byte from a device. \"\"\"\n LOGGER.debug(\"Reading byte from device %s!\", hex(address))\n return self.driver.read_byte(address)", "def peek_8(library, session, address):\n \"\"\"Read an 8-bit value from the specified address.\n\n Corresponds to viPeek8 function of the VISA library.\n\n :param library: the visa library wrapped by ctypes.\n :param session: Unique logical identifier to a session.\n :param address: Source address to read the value.\n :return: Data read from bus, return value of the library call.\n :rtype: bytes, :class:`pyvisa.constants.StatusCode`\n \"\"\"\n value_8 = ViUInt8()\n ret = library.viPeek8(session, address, byref(value_8))\n return value_8.value, ret", "def read_one_bit\n unless self.length * 8 - bit_pointer > 0\n raise BinaryException.new(\"Readable buffer doesn't exist\" +\n \"(#{self.length * 8 - bit_pointer}bit exists).\")\n end\n response = to_i(bit_pointer/8)[7 - bit_pointer%8]\n bit_pointer_inc(1)\n return response\n end", "def read(self, num_bytes):\n \"\"\"Reads data from the pyboard over the serial port.\"\"\"\n self.check_pyb()\n try:\n return self.pyb.serial.read(num_bytes)\n except (serial.serialutil.SerialException, TypeError):\n # Write failed - assume that we got disconnected\n self.close()\n raise DeviceError('serial port %s closed' % self.dev_name_short)" ]
[ 0.7532457709312439, 0.753187894821167, 0.7348690032958984, 0.7280040979385376, 0.7246701717376709, 0.7218279242515564, 0.7157515287399292, 0.71510249376297, 0.714870035648346, 0.7133709192276001, 0.7075726389884949, 0.707072913646698 ]
Read a block of bytes from the bus from the specified command register Amount of bytes read in is defined by length
def read_block_data(self, cmd, length): """ Read a block of bytes from the bus from the specified command register Amount of bytes read in is defined by length """ results = self.bus.read_i2c_block_data(self.address, cmd, length) self.log.debug( "read_block_data: Read [%s] from command register 0x%02X" % ( ', '.join(['0x%02X' % x for x in results]), cmd ) ) return results
[ "def read_i2c_block_data(self, address, register, length):\n \"\"\"\n I2C block transactions do not limit the number of bytes transferred\n but the SMBus layer places a limit of 32 bytes.\n\n I2C Block Read: i2c_smbus_read_i2c_block_data()\n ================================================\n\n This command reads a block of bytes from a device, from a\n designated register that is specified through the Comm byte.\n\n S Addr Wr [A] Comm [A]\n S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P\n\n Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK\n \"\"\"\n return self.smbus.read_i2c_block_data(address, register, length)", "def read_unsigned_byte(self, cmd):\n \"\"\"\n Read an unsigned byte from the specified command register\n \"\"\"\n result = self.bus.read_byte_data(self.address, cmd)\n self.log.debug(\n \"read_unsigned_byte: Read 0x%02X from command register 0x%02X\" % (\n result, cmd\n )\n )\n return result", "def read_reg(self, addr):\n \"\"\" Read memory address in target \"\"\"\n # we don't call check_command here because read_reg() function is called\n # when detecting chip type, and the way we check for success (STATUS_BYTES_LENGTH) is different\n # for different chip types (!)\n val, data = self.command(self.ESP_READ_REG, struct.pack('<I', addr))\n if byte(data, 0) != 0:\n raise FatalError.WithResult(\"Failed to read register address %08x\" % addr, data)\n return val", "def write_block_data(self, cmd, block):\n \"\"\"\n Writes a block of bytes to the bus using I2C format to the specified\n command register\n \"\"\"\n self.bus.write_i2c_block_data(self.address, cmd, block)\n self.log.debug(\n \"write_block_data: Wrote [%s] to command register 0x%02X\" % (\n ', '.join(['0x%02X' % x for x in block]),\n cmd\n )\n )", "def read_i2c_block_data(self, i2c_addr, register, length, force=None):\n \"\"\"\n Read a block of byte data from a given register.\n\n :param i2c_addr: i2c address\n :type i2c_addr: int\n :param register: Start register\n :type register: int\n :param length: Desired block length\n :type length: int\n :param force:\n :type force: Boolean\n :return: List of bytes\n :rtype: list\n \"\"\"\n if length > I2C_SMBUS_BLOCK_MAX:\n raise ValueError(\"Desired block length over %d bytes\" % I2C_SMBUS_BLOCK_MAX)\n self._set_address(i2c_addr, force=force)\n msg = i2c_smbus_ioctl_data.create(\n read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA\n )\n msg.data.contents.byte = length\n ioctl(self.fd, I2C_SMBUS, msg)\n return msg.data.contents.block[1:length + 1]", "def read_register(self, addr, numBytes):\n \"\"\"Reads @numBytes bytes from the grizzly starting at @addr. Due\n to packet format, cannot read more than 127 packets at a time.\n Returns a byte array of the requested data in little endian.\n @addr should be from the Addr class e.g. Addr.Speed\"\"\"\n assert numBytes <= 0x7f, \"Cannot read more than 127 bytes at a time\"\n cmd = chr(addr) + chr(numBytes)\n cmd += (16 - len(cmd)) * chr(0)\n return self._dev.exchange_bytes(cmd)", "def write_byte(self, cmd, value):\n \"\"\"\n Writes an 8-bit byte to the specified command register\n \"\"\"\n self.bus.write_byte_data(self.address, cmd, value)\n self.log.debug(\n \"write_byte: Wrote 0x%02X to command register 0x%02X\" % (\n value, cmd\n )\n )", "def _read_register(self, reg):\n \"\"\"Read 16 bit register value.\"\"\"\n self.buf[0] = reg\n with self.i2c_device as i2c:\n i2c.write(self.buf, end=1, stop=False)\n i2c.readinto(self.buf, end=2)\n return self.buf[0] << 8 | self.buf[1]", "def read_unsigned_word(self, cmd, little_endian=True):\n \"\"\"\n Read an unsigned word from the specified command register\n We assume the data is in little endian mode, if it is in big endian\n mode then set little_endian to False\n \"\"\"\n result = self.bus.read_word_data(self.address, cmd)\n\n if not little_endian:\n result = ((result << 8) & 0xFF00) + (result >> 8)\n\n self.log.debug(\n \"read_unsigned_word: Read 0x%04X from command register 0x%02X\" % (\n result, cmd\n )\n )\n return result", "def _recv(self):\n '''read some bytes into self.buf'''\n from . import mavutil\n start_time = time.time()\n while time.time() < start_time + self.timeout:\n m = self.mav.recv_match(condition='SERIAL_CONTROL.count!=0',\n type='SERIAL_CONTROL', blocking=False, timeout=0)\n if m is not None and m.count != 0:\n break\n self.mav.mav.serial_control_send(self.port,\n mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |\n mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND,\n 0,\n 0,\n 0, [0]*70)\n m = self.mav.recv_match(condition='SERIAL_CONTROL.count!=0',\n type='SERIAL_CONTROL', blocking=True, timeout=0.01)\n if m is not None and m.count != 0:\n break\n if m is not None:\n if self._debug > 2:\n print(m)\n data = m.data[:m.count]\n self.buf += ''.join(str(chr(x)) for x in data)", "def get_bytes(self, addr, size, **kwargs):\r\n '''Reading bytes of any arbitrary size\r\n\r\n Parameters\r\n ----------.\r\n addr : int\r\n The register address.\r\n size : int\r\n Byte length of the value.\r\n\r\n Returns\r\n -------\r\n data : iterable\r\n Byte array.\r\n '''\r\n return self._intf.read(self._conf['base_addr'] + addr, size)", "def devop_read(self, args, bustype):\n '''read from device'''\n if len(args) < 5:\n print(\"Usage: devop read <spi|i2c> name bus address regstart count\")\n return\n name = args[0]\n bus = int(args[1],base=0)\n address = int(args[2],base=0)\n reg = int(args[3],base=0)\n count = int(args[4],base=0)\n self.master.mav.device_op_read_send(self.target_system,\n self.target_component,\n self.request_id,\n bustype,\n bus,\n address,\n name,\n reg,\n count)\n self.request_id += 1" ]
[ 0.7847602367401123, 0.7669231295585632, 0.7336359024047852, 0.7215681672096252, 0.7152820825576782, 0.7151080965995789, 0.7145650386810303, 0.7091506123542786, 0.7051747441291809, 0.7051296234130859, 0.7014334201812744, 0.7014119625091553 ]
Read an unsigned byte from the specified command register
def read_unsigned_byte(self, cmd): """ Read an unsigned byte from the specified command register """ result = self.bus.read_byte_data(self.address, cmd) self.log.debug( "read_unsigned_byte: Read 0x%02X from command register 0x%02X" % ( result, cmd ) ) return result
[ "def read_unsigned_word(self, cmd, little_endian=True):\n \"\"\"\n Read an unsigned word from the specified command register\n We assume the data is in little endian mode, if it is in big endian\n mode then set little_endian to False\n \"\"\"\n result = self.bus.read_word_data(self.address, cmd)\n\n if not little_endian:\n result = ((result << 8) & 0xFF00) + (result >> 8)\n\n self.log.debug(\n \"read_unsigned_word: Read 0x%04X from command register 0x%02X\" % (\n result, cmd\n )\n )\n return result", "def _read_register(self, reg):\n \"\"\"Read 16 bit register value.\"\"\"\n self.buf[0] = reg\n with self.i2c_device as i2c:\n i2c.write(self.buf, end=1, stop=False)\n i2c.readinto(self.buf, end=2)\n return self.buf[0] << 8 | self.buf[1]", "def read_reg(self, addr):\n \"\"\" Read memory address in target \"\"\"\n # we don't call check_command here because read_reg() function is called\n # when detecting chip type, and the way we check for success (STATUS_BYTES_LENGTH) is different\n # for different chip types (!)\n val, data = self.command(self.ESP_READ_REG, struct.pack('<I', addr))\n if byte(data, 0) != 0:\n raise FatalError.WithResult(\"Failed to read register address %08x\" % addr, data)\n return val", "def read_byte(self, addr):\n \"\"\"Read a single byte from static memory area (blocks 0-14).\n \"\"\"\n if addr < 0 or addr > 127:\n raise ValueError(\"invalid byte address\")\n log.debug(\"read byte at address {0} ({0:02X}h)\".format(addr))\n cmd = \"\\x01\" + chr(addr) + \"\\x00\" + self.uid\n return self.transceive(cmd)[-1]", "def read_block_data(self, cmd, length):\n \"\"\"\n Read a block of bytes from the bus from the specified command register\n Amount of bytes read in is defined by length\n \"\"\"\n results = self.bus.read_i2c_block_data(self.address, cmd, length)\n self.log.debug(\n \"read_block_data: Read [%s] from command register 0x%02X\" % (\n ', '.join(['0x%02X' % x for x in results]),\n cmd\n )\n )\n return results", "def read_bit(self, registeraddress, functioncode=2):\n \"\"\"Read one bit from the slave.\n\n Args:\n * registeraddress (int): The slave register address (use decimal numbers, not hex).\n * functioncode (int): Modbus function code. Can be 1 or 2.\n\n Returns:\n The bit value 0 or 1 (int).\n\n Raises:\n ValueError, TypeError, IOError\n\n \"\"\"\n _checkFunctioncode(functioncode, [1, 2])\n return self._genericCommand(functioncode, registeraddress)", "protected int readRegister(int register) throws IOException {\n\n device.write((byte)register);\n\n // create data buffer for receive data\n byte buffer[] = new byte[2]; // receive 16 bits (2 bytes)\n int byteCount = 0;\n try\n {\n byteCount = device.read(buffer, 0, 2);\n }\n catch (IOException e)\n {\n e.printStackTrace();\n }\n\n if(byteCount == 2){\n\n //System.out.println(\"-----------------------------------------------\");\n //System.out.println(\"[RX] \" + bytesToHex(buffer));\n //System.out.println(\"-----------------------------------------------\");\n short value = getShort(buffer, 0);\n\n // Shift 12-bit results right 4 bits for the ADS1015\n // No-shift required for the ADS1115\n if(bitShift > 0){\n value = (short) (value >> bitShift);\n }\n\n return value;\n }\n else{\n return 0;\n }\n }", "def read_register(self, registeraddress, numberOfDecimals=0, functioncode=3, signed=False):\n \"\"\"Read an integer from one 16-bit register in the slave, possibly scaling it.\n\n The slave register can hold integer values in the range 0 to 65535 (\"Unsigned INT16\").\n\n Args:\n * registeraddress (int): The slave register address (use decimal numbers, not hex).\n * numberOfDecimals (int): The number of decimals for content conversion.\n * functioncode (int): Modbus function code. Can be 3 or 4.\n * signed (bool): Whether the data should be interpreted as unsigned or signed.\n\n If a value of 77.0 is stored internally in the slave register as 770, then use ``numberOfDecimals=1``\n which will divide the received data by 10 before returning the value.\n\n Similarly ``numberOfDecimals=2`` will divide the received data by 100 before returning the value.\n\n Some manufacturers allow negative values for some registers. Instead of\n an allowed integer range 0 to 65535, a range -32768 to 32767 is allowed. This is\n implemented as any received value in the upper range (32768 to 65535) is\n interpreted as negative value (in the range -32768 to -1).\n\n Use the parameter ``signed=True`` if reading from a register that can hold\n negative values. Then upper range data will be automatically converted into\n negative return values (two's complement).\n\n ============== ================== ================ ===============\n ``signed`` Data type in slave Alternative name Range\n ============== ================== ================ ===============\n :const:`False` Unsigned INT16 Unsigned short 0 to 65535\n :const:`True` INT16 Short -32768 to 32767\n ============== ================== ================ ===============\n\n Returns:\n The register data in numerical value (int or float).\n\n Raises:\n ValueError, TypeError, IOError\n\n \"\"\"\n _checkFunctioncode(functioncode, [3, 4])\n _checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals')\n _checkBool(signed, description='signed')\n return self._genericCommand(functioncode, registeraddress, numberOfDecimals=numberOfDecimals, signed=signed)", "def read_register(self, addr, numBytes):\n \"\"\"Reads @numBytes bytes from the grizzly starting at @addr. Due\n to packet format, cannot read more than 127 packets at a time.\n Returns a byte array of the requested data in little endian.\n @addr should be from the Addr class e.g. Addr.Speed\"\"\"\n assert numBytes <= 0x7f, \"Cannot read more than 127 bytes at a time\"\n cmd = chr(addr) + chr(numBytes)\n cmd += (16 - len(cmd)) * chr(0)\n return self._dev.exchange_bytes(cmd)", "def get_value(self, addr, size, offset, **kwargs):\r\n '''Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register\r\n\r\n Parameters\r\n ----------\r\n addr : int\r\n The register address.\r\n size : int\r\n Bit size/length of the value.\r\n offset : int\r\n Offset of the value to be written to the register (in number of bits).\r\n\r\n Returns\r\n -------\r\n reg : int\r\n Register value.\r\n '''\r\n div_offset, mod_offset = divmod(offset, 8)\r\n div_size, mod_size = divmod(size + mod_offset, 8)\r\n if mod_size:\r\n div_size += 1\r\n ret = self._intf.read(self._base_addr + addr + div_offset, size=div_size)\r\n reg = BitLogic()\r\n reg.frombytes(tobytes(ret))\r\n return reg[size + mod_offset - 1:mod_offset].tovalue()", "def write_byte(self, cmd, value):\n \"\"\"\n Writes an 8-bit byte to the specified command register\n \"\"\"\n self.bus.write_byte_data(self.address, cmd, value)\n self.log.debug(\n \"write_byte: Wrote 0x%02X to command register 0x%02X\" % (\n value, cmd\n )\n )", "def get_bytes(self, addr, size, **kwargs):\r\n '''Reading bytes of any arbitrary size\r\n\r\n Parameters\r\n ----------.\r\n addr : int\r\n The register address.\r\n size : int\r\n Byte length of the value.\r\n\r\n Returns\r\n -------\r\n data : iterable\r\n Byte array.\r\n '''\r\n return self._intf.read(self._conf['base_addr'] + addr, size)" ]
[ 0.797266960144043, 0.772947371006012, 0.7609961032867432, 0.7418611645698547, 0.7390509247779846, 0.7367530465126038, 0.7359464168548584, 0.7352967262268066, 0.7338888049125671, 0.7202451229095459, 0.7109748721122742, 0.7084289789199829 ]
Read an unsigned word from the specified command register We assume the data is in little endian mode, if it is in big endian mode then set little_endian to False
def read_unsigned_word(self, cmd, little_endian=True): """ Read an unsigned word from the specified command register We assume the data is in little endian mode, if it is in big endian mode then set little_endian to False """ result = self.bus.read_word_data(self.address, cmd) if not little_endian: result = ((result << 8) & 0xFF00) + (result >> 8) self.log.debug( "read_unsigned_word: Read 0x%04X from command register 0x%02X" % ( result, cmd ) ) return result
[ "def read_unsigned_byte(self, cmd):\n \"\"\"\n Read an unsigned byte from the specified command register\n \"\"\"\n result = self.bus.read_byte_data(self.address, cmd)\n self.log.debug(\n \"read_unsigned_byte: Read 0x%02X from command register 0x%02X\" % (\n result, cmd\n )\n )\n return result", "def write_word(self, cmd, value):\n \"\"\"\n Writes a 16-bit word to the specified command register\n \"\"\"\n self.bus.write_word_data(self.address, cmd, value)\n self.log.debug(\n \"write_word: Wrote 0x%04X to command register 0x%02X\" % (\n value, cmd\n )\n )", "def readWord(self):\n \"\"\"\n Reads a word value from the L{ReadData} stream object.\n \n @rtype: int\n @return: The word value read from the L{ReadData} stream.\n \"\"\"\n word = unpack(self.endianness + ('H' if not self.signed else 'h'), self.readAt(self.offset, 2))[0]\n self.offset += 2\n return word", "def readDword(self):\n \"\"\"\n Reads a dword value from the L{ReadData} stream object.\n \n @rtype: int\n @return: The dword value read from the L{ReadData} stream.\n \"\"\"\n dword = unpack(self.endianness + ('L' if not self.signed else 'l'), self.readAt(self.offset, 4))[0]\n self.offset += 4\n return dword", "def read_word(self, offset):\n\t\t\"\"\"\n\t\t.. _read_word:\n\n\t\tRead one word from a device.\n\t\tThe offset is ``device_addr + device_offset``, e.g.::\n\n\t\t\toffset = 3 # third word of the device\n\t\t\toffset += addr2\n\t\t\tb.read_word(offset)\n\t\t\t# reads third word of d2.\n\n\t\tTruncates the value according to ``width``.\n\n\t\tMay raise BUSError_, if the offset exceeds the address space.\n\n\t\t\"\"\"\t\n\t\tself._lock = True\n\t\tif(offset > self.current_max_offset):\n\t\t\traise BUSError(\"Offset({}) exceeds address space of BUS({})\".format(offset, self.current_max_offset)) \n\t\tself.reads += 1\n\t\tfor addresspace, device in self.index.items():\n\t\t\tif(offset in addresspace):\n\t\t\t\tif(self.debug > 5):\n\t\t\t\t\tprint(\"BUS::read({}) | startaddress({})> {}\".format(offset, self.start_addresses[device],\n\t\t\t\t\t\t\t\tdevice.read(offset - self.start_addresses[device])))\n\t\t\t\tself.truncate.setvalue( device.read(offset - self.start_addresses[device]))\n\t\t\t\treturn self.truncate.getvalue()", "def read_uint16(self):\n \"\"\"Read 2 bytes.\"\"\"\n if self.pos + 2 > self.remaining_length:\n return NC.ERR_PROTOCOL\n msb = self.payload[self.pos]\n self.pos += 1\n lsb = self.payload[self.pos]\n self.pos += 1\n \n word = (msb << 8) + lsb\n \n return NC.ERR_SUCCESS, word", "protected int readWord(byte[] data, int offset) {\n int low = data[offset] & 0xff;\n int high = data[offset + 1] & 0xff;\n return high << 8 | low;\n }", "def LDRSH(self, params):\n \"\"\"\n LDRSH Ra, [Rb, Rc]\n\n Load a half word from memory, sign extend, and put into Ra\n Ra, Rb, and Rc must be low registers\n \"\"\"\n # TODO LDRSH cant use immediates\n Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)\n\n self.check_arguments(low_registers=(Ra, Rb, Rc))\n\n def LDRSH_func():\n # TODO does memory read up?\n if (self.register[Rb] + self.register[Rc]) % 2 != 0:\n raise iarm.exceptions.HardFault(\n \"Memory access not half word aligned\\nR{}: {}\\nR{}: {}\".format(Rb, self.register[Rb],\n Rc, self.register[Rc]))\n self.register[Ra] = 0\n for i in range(2):\n self.register[Ra] |= (self.memory[self.register[Rb] + self.register[Rc] + i] << (8 * i))\n if self.register[Ra] & (1 << 15):\n self.register[Ra] |= (0xFFFF << 16)\n\n return LDRSH_func", "def read_reg(self, addr):\n \"\"\" Read memory address in target \"\"\"\n # we don't call check_command here because read_reg() function is called\n # when detecting chip type, and the way we check for success (STATUS_BYTES_LENGTH) is different\n # for different chip types (!)\n val, data = self.command(self.ESP_READ_REG, struct.pack('<I', addr))\n if byte(data, 0) != 0:\n raise FatalError.WithResult(\"Failed to read register address %08x\" % addr, data)\n return val", "def read_word_data(self, i2c_addr, register, force=None):\n \"\"\"\n Read a single word (2 bytes) from a given register.\n\n :param i2c_addr: i2c address\n :type i2c_addr: int\n :param register: Register to read\n :type register: int\n :param force:\n :type force: Boolean\n :return: 2-byte word\n :rtype: int\n \"\"\"\n self._set_address(i2c_addr, force=force)\n msg = i2c_smbus_ioctl_data.create(\n read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA\n )\n ioctl(self.fd, I2C_SMBUS, msg)\n return msg.data.contents.word", "def write_word_data(self, address, register, value):\n \"\"\"\n SMBus Write Word: i2c_smbus_write_word_data()\n ==============================================\n\n This is the opposite of the Read Word operation. 16 bits\n of data is written to a device, to the designated register that is\n specified through the Comm byte.\n\n S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P\n\n Functionality flag: I2C_FUNC_SMBUS_WRITE_WORD_DATA\n\n Note the convenience function i2c_smbus_write_word_swapped is\n available for writes where the two data bytes are the other way\n around (not SMBus compliant, but very popular.)\n \"\"\"\n return self.smbus.write_word_data(address, register, value)", "def readQword(self):\n \"\"\"\n Reads a qword value from the L{ReadData} stream object.\n \n @rtype: int\n @return: The qword value read from the L{ReadData} stream.\n \"\"\"\n qword = unpack(self.endianness + ('Q' if not self.signed else 'b'), self.readAt(self.offset, 8))[0]\n self.offset += 8\n return qword" ]
[ 0.7329193949699402, 0.7321426868438721, 0.7232850790023804, 0.6866912245750427, 0.6802865266799927, 0.67705899477005, 0.6747961640357971, 0.668343722820282, 0.6674012541770935, 0.6657078266143799, 0.6654201745986938, 0.6635306477546692 ]
Attempt to connect to an I2C bus
def __connect_to_bus(self, bus): """ Attempt to connect to an I2C bus """ def connect(bus_num): try: self.log.debug("Attempting to connect to bus %s..." % bus_num) self.bus = smbus.SMBus(bus_num) self.log.debug("Success") except IOError: self.log.debug("Failed") raise # If the bus is not explicitly stated, try 0 and then try 1 if that # fails if bus is None: try: connect(0) return except IOError: pass try: connect(1) return except IOError: raise else: try: connect(bus) return except IOError: raise
[ "def open(self, bus):\n \"\"\"\n Open a given i2c bus.\n\n :param bus: i2c bus number (e.g. 0 or 1)\n :type bus: int\n \"\"\"\n self.fd = os.open(\"/dev/i2c-{}\".format(bus), os.O_RDWR)\n self.funcs = self._get_funcs()", "function(options) {\n options = options || {};\n options.device = options.device || '/dev/i2c-1';\n options.debug = options.debug || false;\n \n var address = BME280.I2C_ADDRESS_A;\n if ('address' in options)\n address = options.address;\n\n this.wire = new i2c(address, options);\n}", "function (i2copts_arg) {\n var i2copts;\n var raspi_check;\n if (typeof i2copts_arg === 'undefined') {\n i2copts = {device: '/dev/i2c-1'};\n raspi_check = raspi_i2c_devname();\n if (raspi_check !== '') {\n //console.log('Raspberry Pi I2C device name is: ', raspi_check);\n i2copts.device = raspi_check;\n }\n }\n else {\n i2copts = i2copts_arg;\n if ((typeof i2copts.device === 'undefined') || (i2copts.device === '')) {\n raspi_check = raspi_i2c_devname();\n if (raspi_check !== '') {\n //console.log('Raspberry Pi I2C device name is: ', raspi_check);\n i2copts.device = raspi_check;\n }\n }\n }\n //console.log('i2c options: ', i2copts);\n this.i2c = new i2c(HTU21D_I2CADDR, i2copts);\n}", "def connect(self, mode='default_reset'):\n \"\"\" Try connecting repeatedly until successful, or giving up \"\"\"\n print('Connecting...', end='')\n sys.stdout.flush()\n last_error = None\n\n try:\n for _ in range(7):\n last_error = self._connect_attempt(mode=mode, esp32r0_delay=False)\n if last_error is None:\n return\n last_error = self._connect_attempt(mode=mode, esp32r0_delay=True)\n if last_error is None:\n return\n finally:\n print('') # end 'Connecting...' line\n raise FatalError('Failed to connect to %s: %s' % (self.CHIP_NAME, last_error))", "async def i2c_config(self, command):\n \"\"\"\n This method initializes the I2c and sets the optional read delay (in microseconds).\n\n It must be called before doing any other i2c operations for a given device.\n :param command: {\"method\": \"i2c_config\", \"params\": [DELAY]}\n :returns: No Return message.\n \"\"\"\n delay = int(command[0])\n await self.core.i2c_config(delay)", "public static void main(String[] args) throws InterruptedException, PlatformAlreadyAssignedException, IOException, UnsupportedBusNumberException {\n\n // create Pi4J console wrapper/helper\n // (This is a utility class to abstract some of the boilerplate code)\n final Console console = new Console();\n\n // print program title/header\n console.title(\"<-- The Pi4J Project -->\", \"I2C Example\");\n\n // allow for user to exit program using CTRL-C\n console.promptForExit();\n\n // fetch all available busses\n try {\n int[] ids = I2CFactory.getBusIds();\n console.println(\"Found follow I2C busses: \" + Arrays.toString(ids));\n } catch (IOException exception) {\n console.println(\"I/O error during fetch of I2C busses occurred\");\n }\n\n // find available busses\n for (int number = I2CBus.BUS_0; number <= I2CBus.BUS_17; ++number) {\n try {\n @SuppressWarnings(\"unused\")\n\t\t\t\tI2CBus bus = I2CFactory.getInstance(number);\n console.println(\"Supported I2C bus \" + number + \" found\");\n } catch (IOException exception) {\n console.println(\"I/O error on I2C bus \" + number + \" occurred\");\n } catch (UnsupportedBusNumberException exception) {\n console.println(\"Unsupported I2C bus \" + number + \" required\");\n }\n }\n\n // get the I2C bus to communicate on\n I2CBus i2c = I2CFactory.getInstance(I2CBus.BUS_1);\n\n // create an I2C device for an individual device on the bus that you want to communicate with\n // in this example we will use the default address for the TSL2561 chip which is 0x39.\n I2CDevice device = i2c.getDevice(TSL2561_ADDR);\n\n // next, lets perform am I2C READ operation to the TSL2561 chip\n // we will read the 'ID' register from the chip to get its part number and silicon revision number\n console.println(\"... reading ID register from TSL2561\");\n int response = device.read(TSL2561_REG_ID);\n console.println(\"TSL2561 ID = \" + String.format(\"0x%02x\", response) + \" (should be 0x50)\");\n\n // next we want to start taking light measurements, so we need to power up the sensor\n console.println(\"... powering up TSL2561\");\n device.write(TSL2561_REG_CONTROL, TSL2561_POWER_UP);\n\n // wait while the chip collects data\n Thread.sleep(500);\n\n // now we will perform our first I2C READ operation to retrieve raw integration\n // results from DATA_0 and DATA_1 registers\n console.println(\"... reading DATA registers from TSL2561\");\n int data0 = device.read(TSL2561_REG_DATA_0);\n int data1 = device.read(TSL2561_REG_DATA_1);\n\n // print raw integration results from DATA_0 and DATA_1 registers\n console.println(\"TSL2561 DATA 0 = \" + String.format(\"0x%02x\", data0));\n console.println(\"TSL2561 DATA 1 = \" + String.format(\"0x%02x\", data1));\n\n // before we exit, lets not forget to power down light sensor\n console.println(\"... powering down TSL2561\");\n device.write(TSL2561_REG_CONTROL, TSL2561_POWER_DOWN);\n }", "def _connect_attempt(self, mode='default_reset', esp32r0_delay=False):\n \"\"\" A single connection attempt, with esp32r0 workaround options \"\"\"\n # esp32r0_delay is a workaround for bugs with the most common auto reset\n # circuit and Windows, if the EN pin on the dev board does not have\n # enough capacitance.\n #\n # Newer dev boards shouldn't have this problem (higher value capacitor\n # on the EN pin), and ESP32 revision 1 can't use this workaround as it\n # relies on a silicon bug.\n #\n # Details: https://github.com/espressif/esptool/issues/136\n last_error = None\n\n # If we're doing no_sync, we're likely communicating as a pass through\n # with an intermediate device to the ESP32\n if mode == \"no_reset_no_sync\":\n return last_error\n\n # issue reset-to-bootloader:\n # RTS = either CH_PD/EN or nRESET (both active low = chip in reset\n # DTR = GPIO0 (active low = boot to flasher)\n #\n # DTR & RTS are active low signals,\n # ie True = pin @ 0V, False = pin @ VCC.\n if mode != 'no_reset':\n self._setDTR(False) # IO0=HIGH\n self._setRTS(True) # EN=LOW, chip in reset\n time.sleep(0.1)\n if esp32r0_delay:\n # Some chips are more likely to trigger the esp32r0\n # watchdog reset silicon bug if they're held with EN=LOW\n # for a longer period\n time.sleep(1.2)\n self._setDTR(True) # IO0=LOW\n self._setRTS(False) # EN=HIGH, chip out of reset\n if esp32r0_delay:\n # Sleep longer after reset.\n # This workaround only works on revision 0 ESP32 chips,\n # it exploits a silicon bug spurious watchdog reset.\n time.sleep(0.4) # allow watchdog reset to occur\n time.sleep(0.05)\n self._setDTR(False) # IO0=HIGH, done\n\n for _ in range(5):\n try:\n self.flush_input()\n self._port.flushOutput()\n self.sync()\n return None\n except FatalError as e:\n if esp32r0_delay:\n print('_', end='')\n else:\n print('.', end='')\n sys.stdout.flush()\n time.sleep(0.05)\n last_error = e\n return last_error", "def connect(self, port=None, baud_rate=115200):\n '''\n Parameters\n ----------\n port : str or list-like, optional\n Port (or list of ports) to try to connect to as a DMF Control\n Board.\n baud_rate : int, optional\n\n Returns\n -------\n str\n Port DMF control board was connected on.\n\n Raises\n ------\n RuntimeError\n If connection could not be established.\n IOError\n If no ports were specified and Arduino Mega2560 not found on any\n port.\n '''\n if isinstance(port, types.StringTypes):\n ports = [port]\n else:\n ports = port\n\n if not ports:\n # No port was specified.\n #\n # Try ports matching Mega2560 USB vendor/product ID.\n ports = serial_ports().index.tolist()\n if not ports:\n raise IOError(\"Arduino Mega2560 not found on any port.\")\n\n for comport_i in ports:\n if self.connected():\n self.disconnect()\n self.port = None\n self._i2c_devices = {}\n\n # Try to connect to control board on available ports.\n try:\n logger.debug('Try to connect to: %s', comport_i)\n # Explicitly cast `comport_i` to string since `Base.connect`\n # Boost Python binding does not support unicode strings.\n #\n # Fixes [issue 8][issue-8].\n #\n # [issue-8]: https://github.com/wheeler-microfluidics/dmf-control-board-firmware/issues/8\n Base.connect(self, str(comport_i), baud_rate)\n self.port = comport_i\n break\n except BadVGND, exception:\n logger.warning(exception)\n break\n except RuntimeError, exception:\n continue\n else:\n raise RuntimeError('Could not connect to control board on any of '\n 'the following ports: %s' % ports)\n\n name = self.name()\n version = self.hardware_version()\n firmware = self.software_version()\n serial_number_string = \"\"\n try:\n serial_number_string = \", S/N %03d\" % self.serial_number\n except:\n # Firmware does not support `serial_number` attribute.\n pass\n logger.info(\"Connected to %s v%s (Firmware: %s%s)\" %\n (name, version, firmware, serial_number_string))\n\n logger.info(\"Poll control board for series resistors and \"\n \"capacitance values.\")\n\n self._read_calibration_data()\n\n try:\n self.__aref__ = self._aref()\n logger.info(\"Analog reference = %.2f V\" % self.__aref__)\n except:\n # Firmware does not support `__aref__` attribute.\n pass\n\n # Check VGND for both analog channels\n expected = 2 ** 10/2\n v = {}\n channels = [0, 1]\n damaged = []\n for channel in channels:\n try:\n v[channel] = np.mean(self.analog_reads(channel, 10))\n logger.info(\"A%d VGND = %.2f V (%.2f%% of Aref)\", channel,\n self.__aref__ * v[channel] / (2 ** 10), 100.0 *\n v[channel] / (2 ** 10))\n # Make sure that the VGND is close to the expected value;\n # otherwise, the op-amp may be damaged (expected error\n # is <= 10%).\n if np.abs(v[channel] - expected) / expected > .1:\n damaged.append(channel)\n except:\n # Firmware does not support `__aref__` attribute.\n break\n\n # Scan I2C bus to generate list of connected devices.\n self._i2c_scan()\n\n if damaged:\n # At least one of the analog input channels appears to be damaged.\n if len(damaged) == 1:\n msg = \"Analog channel %d appears\" % damaged[0]\n else:\n msg = \"Analog channels %s appear\" % damaged\n raise BadVGND(msg + \" to be damaged. You may need to replace the \"\n \"op-amp on the control board.\")\n\n return self.RETURN_OK", "def i2c_bus_timeout(self):\n \"\"\"I2C bus lock timeout in ms.\n\n Minimum value is 10 ms and the maximum value is 450 ms. Not every value\n can be set and will be rounded to the next possible number. You can\n read back the property to get the actual value.\n\n The power-on default value is 200 ms.\n \"\"\"\n ret = api.py_aa_i2c_bus_timeout(self.handle, 0)\n _raise_error_if_negative(ret)\n return ret", "def import_i2c_addr(bus, opt=\"sensors\"):\r\n \"\"\" import_i2c_addresses will return a list of the\r\n currently connected I2C devices.\r\n\r\n This can be used a means to automatically detect\r\n the number of connected sensor modules.\r\n Modules are between int(112) and int(119)\r\n\r\n By default, the method will return a list\r\n of sensor addresses.\r\n \"\"\"\r\n\r\n i2c_list = []\r\n for device in range(128):\r\n try:\r\n bus.read_byte(device)\r\n i2c_list.append((device))\r\n except IOError:\r\n pass\r\n\r\n if opt == \"sensors\":\r\n sensor_list = []\r\n for module in range(112,120):\r\n try:\r\n indx = i2c_list.index(module)\r\n sensor_list.append(module)\r\n except ValueError:\r\n pass\r\n return sensor_list\r\n\r\n else:\r\n return i2c_list", "async def i2c_config(self, read_delay_time=0):\n \"\"\"\n NOTE: THIS METHOD MUST BE CALLED BEFORE ANY I2C REQUEST IS MADE\n This method initializes Firmata for I2c operations.\n\n :param read_delay_time (in microseconds): an optional parameter,\n default is 0\n\n :returns: No Return Value\n \"\"\"\n data = [read_delay_time & 0x7f, (read_delay_time >> 7) & 0x7f]\n await self._send_sysex(PrivateConstants.I2C_CONFIG, data)", "def connect(self):\n \"\"\"Enumerate and connect to the first available interface.\"\"\"\n transport = self._defs.find_device()\n if not transport:\n raise interface.NotFoundError('{} not connected'.format(self))\n\n log.debug('using transport: %s', transport)\n for _ in range(5): # Retry a few times in case of PIN failures\n connection = self._defs.Client(transport=transport,\n ui=self.ui,\n state=self.__class__.cached_state)\n self._verify_version(connection)\n\n try:\n connection.ping(msg='', pin_protection=True) # unlock PIN\n return connection\n except (self._defs.PinException, ValueError) as e:\n log.error('Invalid PIN: %s, retrying...', e)\n continue\n except Exception as e:\n log.exception('ping failed: %s', e)\n connection.close() # so the next HID open() will succeed\n raise" ]
[ 0.7433919310569763, 0.7207122445106506, 0.7194353938102722, 0.713069498538971, 0.7109002470970154, 0.7105693221092224, 0.7105294466018677, 0.7097484469413757, 0.7061082720756531, 0.7052105069160461, 0.6963741779327393, 0.695769727230072 ]
Default user to the current version owner.
def get_formset(self, request, obj=None, **kwargs): """ Default user to the current version owner. """ data = super().get_formset(request, obj, **kwargs) if obj: data.form.base_fields['user'].initial = request.user.id return data
[ "def default_owner\n unless defined? @default_owner\n username = config[:username] ? config[:username].to_s : jdbc_connection.meta_data.user_name\n @default_owner = username.nil? ? nil : username.upcase\n end\n @default_owner\n end", "def get_owner(self, default=True):\n \"\"\"Return (User ID, Group ID) tuple\n\n :param bool default: Whether to return default if not set.\n :rtype: tuple[int, int]\n \"\"\"\n uid, gid = self.owner\n\n if not uid and default:\n uid = os.getuid()\n\n if not gid and default:\n gid = os.getgid()\n\n return uid, gid", "def user(self):\n \"\"\" Return a (deferred) cached Koji user name for this change. \"\"\"\n # Note, do any tasks really have an \"owner_id\", or are they all\n # \"owner\"?\n owner_id = getattr(self.task, 'owner_id', self.task.owner)\n return self.task.connection.cache.user_name(owner_id)", "def user(self, value):\n \"\"\"\n Sets the user on the current request. This is necessary to maintain\n compatibility with django.contrib.auth where the user property is\n set in the login and logout functions.\n\n Note that we also set the user on Django's underlying `HttpRequest`\n instance, ensuring that it is available to any middleware in the stack.\n \"\"\"\n self._user = value\n self._request.user = value", "def owners(self, value):\n \"\"\"Update owners.\n\n DEPRECATED: use ``policy[\"roles/owners\"] = value`` instead.\"\"\"\n warnings.warn(\n _ASSIGNMENT_DEPRECATED_MSG.format(\"owners\", OWNER_ROLE), DeprecationWarning\n )\n self[OWNER_ROLE] = value", "def save_form(self, request, form, change):\n \"\"\"\n Set the object's owner as the logged in user.\n \"\"\"\n obj = form.save(commit=False)\n if obj.user_id is None:\n obj.user = request.user\n return super(OwnableAdmin, self).save_form(request, form, change)", "def set_uid(self):\n \"\"\"Change the user of the running process\"\"\"\n if self.user:\n uid = getpwnam(self.user).pw_uid\n try:\n os.setuid(uid)\n except Exception:\n message = ('Unable to switch ownership to {0}:{1}. ' +\n 'Did you start the daemon as root?')\n print(message.format(self.user, self.group))\n sys.exit(1)", "def save(self, commit=True):\n \"\"\"\n Overloaded so we can save any new password that is included.\n \"\"\"\n is_new_user = self.instance.pk is None\n\n user = super(UserForm, self).save(commit)\n\n # new users should be made active by default\n if is_new_user:\n user.is_active = True\n\n # if we had a new password set, use it\n new_pass = self.cleaned_data['new_password']\n if new_pass:\n user.set_password(new_pass)\n if commit:\n user.save()\n\n return user", "def get_user(request):\n \"\"\"\n Returns the users from `request` if authentication is enabled, otherwise\n returns the default user (from settings, or as reported by the OS).\n \"\"\"\n if settings.LOCKDOWN and hasattr(request, 'user'):\n if request.user.is_authenticated:\n user = request.user.username\n else:\n # This may happen with crafted requests\n user = ''\n else:\n user = getattr(settings, 'DEFAULT_USER', getpass.getuser())\n\n return user", "def user_default_loader(self, pk):\n \"\"\"Load a User from the database.\"\"\"\n try:\n obj = User.objects.get(pk=pk)\n except User.DoesNotExist:\n return None\n else:\n self.user_default_add_related_pks(obj)\n return obj", "def get_user(\n self, identified_with, identifier, req, resp, resource, uri_kwargs\n ):\n \"\"\"Return default user object.\"\"\"\n return self.user", "private void setDefaultUserAgent() {\n setUserAgent(\n MwsUtl.escapeAppName(applicationName),\n MwsUtl.escapeAppVersion(applicationVersion),\n MwsUtl.escapeAttributeValue(\"Java/\"\n + System.getProperty(\"java.version\") + \"/\"\n + System.getProperty(\"java.class.version\") + \"/\"\n + System.getProperty(\"java.vendor\")),\n\n MwsUtl.escapeAttributeName(\"Platform\"),\n MwsUtl.escapeAttributeValue(\"\" + System.getProperty(\"os.name\")\n + \"/\" + System.getProperty(\"os.arch\") + \"/\"\n + System.getProperty(\"os.version\")),\n\n MwsUtl.escapeAttributeName(\"MWSClientVersion\"),\n MwsUtl.escapeAttributeValue(libraryVersion));\n }" ]
[ 0.7370908856391907, 0.7131034731864929, 0.7005427479743958, 0.6957241892814636, 0.6943697333335876, 0.6932656764984131, 0.6921624541282654, 0.691453218460083, 0.6903442144393921, 0.6879087090492249, 0.6876251101493835, 0.6872333288192749 ]
Function reload Reload the full object to ensure sync
def reload(self): """ Function reload Reload the full object to ensure sync """ realData = self.load() self.clear() self.update(realData)
[ "def reload(self):\n \"\"\" Function reload\n Sync the full object\n \"\"\"\n self.load(self.api.get(self.objName, self.key))", "def refresh(self):\n # type: () -> None\n \"\"\"Refresh the object in place.\"\"\"\n src = self._client.reload(self)\n self.__dict__.update(src.__dict__)", "final public function reload() {\n\n\t\t$app = Application::getInstance();\n\t\t$class = get_called_class();\n\t\t\n\t\t// properties to not reset\n\t\t$propertiesToSave = array('keyProperties', 'db', 'loadedFromDb', 'typeList', 'cache', 'errors');\n\t\t\n\t\t// save key from being unset\n\t\t$propertiesToSave = array_merge($propertiesToSave, $this->keyProperties);\n\n\t\t// unset all the other properties\n\t\tforeach ($this as $key => $value) {\n\t\t\tif (!in_array($key, $propertiesToSave)) {\n\t\t\t\tunset($this->$key);\n\t\t\t}\n\t\t}\n\t\t\n\t\t$this->cache = array();\n\t\t$this->errors = array();\n\t\t\n\t\t$this->loadFromDb($this->getSqlKeyValues());\n\t\t\n\t\t// log the reload \n\t\t$app->logEvent('Reloaded ' . $class . ' object with ' . $this->getKeyForEventlog());\n\t\t\n\t}", "function reload(args) {\n if (args !== undefined) {\n if (args.l !== undefined) {\n fs.closeSync(1);\n fs.openSync(args.l, 'a+');\n }\n\n if (args.e !== undefined) {\n fs.closeSync(2);\n fs.openSync(args.e, 'a+');\n }\n }\n}", "function reload() {\n\t\t\tvar params = {}\n\n\t\t\tparams[model.primary] = me[model.primary]\n\t\t\n\t\t\tvar act = new NobleMachine(model.find(params));\n\n\t\t\tact.next(function(newInst) {\n\t\t\t\tif (newInst) {\n\t\t\t\t\tfor (var col in model.columns) {\n\t\t\t\t\t\tme[col] = newInst[col];\n\t\t\t\t\t}\n\n\t\t\t\t\tact.toNext(me);\n\t\t\t\t} else {\n\t\t\t\t\tfor (var key in model.columns) {\n\t\t\t\t\t\tme[key] = undefined\n\t\t\t\t\t}\n\n\t\t\t\t\tact.toNext(null);\n\t\t\t\t}\n\t\t\t});\n\n\t\t\treturn act;\n\t\t}", "function reload() {\n var defer = q.defer();\n\n if (browserSync.active) {\n browserSync.reload();\n defer.resolve();\n } else\n startServer().then(defer.resolve);\n\n return defer.promise;\n}", "def reload(self):\n \"\"\"\n Re-fetches the object from the API, discarding any local changes.\n Returns without doing anything if the object is new.\n \"\"\"\n\n if not self.id:\n return\n reloaded_object = self.__class__.find(self.id)\n self.set_raw(\n reloaded_object.raw,\n reloaded_object.etag\n )", "def reload(self):\n ''' reloads this object so if it was updated in the database it now\n contains the new values'''\n key = self.key()\n redis = type(self).get_redis()\n\n if not redis.exists(key):\n raise ModelNotFoundError('This object has been deleted')\n\n data = debyte_hash(redis.hgetall(key))\n\n for fieldname, field in self.proxy:\n value = field.recover(data, redis)\n\n setattr(\n self,\n fieldname,\n value\n )\n\n return self", "def refresh(self):\n # type: () -> None\n \"\"\"Refresh the object in place.\"\"\"\n from pykechain.client import API_EXTRA_PARAMS\n src = self._client.reload(self, extra_params=API_EXTRA_PARAMS['activity'])\n self.__dict__.update(src.__dict__)", "def reload(self):\n \"\"\"\n Suspend a node\n \"\"\"\n try:\n yield from self.post(\"/reload\", timeout=240)\n except asyncio.TimeoutError:\n raise aiohttp.web.HTTPRequestTimeout(text=\"Timeout when reloading {}\".format(self._name))", "def updateBeforeDecorator(function):\n \"\"\" Function updateAfterDecorator\n Decorator to ensure local dict is sync with remote foreman\n \"\"\"\n def _updateBeforeDecorator(self, *args, **kwargs):\n if self.forceFullSync:\n self.reload()\n return function(self, *args, **kwargs)\n return _updateBeforeDecorator", "public function reload()\n {\n if ($this['object'] === 'event') {\n parent::g_reload(self::getUrl($this['id']));\n } else {\n parent::g_reload(self::getUrl());\n }\n }" ]
[ 0.875714898109436, 0.7415770888328552, 0.7407538890838623, 0.7348027229309082, 0.7307147979736328, 0.7304747700691223, 0.7252180576324463, 0.7199515104293823, 0.7190576195716858, 0.7147949934005737, 0.7137467265129089, 0.7131270170211792 ]
Function updateAfterDecorator Decorator to ensure local dict is sync with remote foreman
def updateAfterDecorator(function): """ Function updateAfterDecorator Decorator to ensure local dict is sync with remote foreman """ def _updateAfterDecorator(self, *args, **kwargs): ret = function(self, *args, **kwargs) self.reload() return ret return _updateAfterDecorator
[ "def updateBeforeDecorator(function):\n \"\"\" Function updateAfterDecorator\n Decorator to ensure local dict is sync with remote foreman\n \"\"\"\n def _updateBeforeDecorator(self, *args, **kwargs):\n if self.forceFullSync:\n self.reload()\n return function(self, *args, **kwargs)\n return _updateBeforeDecorator", "def decorate_postinject(func, classkey=None, skipmain=False):\n \"\"\"\n Will perform func with argument self after inject_instance is called on classkey\n\n classkey is some identifying string, tuple, or object\n \"\"\"\n #import utool as ut\n global __CLASSTYPE_POSTINJECT_FUNCS__\n assert classkey is not None, 'must specify classkey'\n #if not (skipmain and ut.get_caller_modname() == '__main__'):\n __CLASSTYPE_POSTINJECT_FUNCS__[classkey].append(func)\n return func", "def after_websocket(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable:\n \"\"\"Add an after websocket function.\n\n This is designed to be used as a decorator. An example usage,\n\n .. code-block:: python\n\n @app.after_websocket\n def func(response):\n return response\n\n Arguments:\n func: The after websocket function itself.\n name: Optional blueprint key name.\n \"\"\"\n handler = ensure_coroutine(func)\n self.after_websocket_funcs[name].append(handler)\n return func", "def update_cache(func):\n \"\"\"Decorate functions that modify the internally stored usernotes JSON.\n\n Ensures that updates are mirrored onto reddit.\n\n Arguments:\n func: the function being decorated\n \"\"\"\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n \"\"\"The wrapper function.\"\"\"\n lazy = kwargs.get('lazy', False)\n kwargs.pop('lazy', None)\n\n if not lazy:\n self.get_json()\n\n ret = func(self, *args, **kwargs)\n\n # If returning a string assume it is an update message\n if isinstance(ret, str) and not lazy:\n self.set_json(ret)\n else:\n return ret\n\n return wrapper", "def after_request(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable:\n \"\"\"Add an after request function.\n\n This is designed to be used as a decorator. An example usage,\n\n .. code-block:: python\n\n @app.after_request\n def func(response):\n return response\n\n Arguments:\n func: The after request function itself.\n name: Optional blueprint key name.\n \"\"\"\n handler = ensure_coroutine(func)\n self.after_request_funcs[name].append(handler)\n return func", "def after_app_request(self, func: Callable) -> Callable:\n \"\"\"Add a after request function to the app.\n\n This is designed to be used as a decorator, and has the same arguments\n as :meth:`~quart.Quart.after_request`. It applies to all requests to the\n app this blueprint is registered on. An example usage,\n\n .. code-block:: python\n\n blueprint = Blueprint(__name__)\n @blueprint.after_app_request\n def after():\n ...\n \"\"\"\n self.record_once(lambda state: state.app.after_request(func))\n return func", "def post_clear_cache(func):\n \"\"\"Decorator for functions that alter the index using the git command. This would\n invalidate our possibly existing entries dictionary which is why it must be\n deleted to allow it to be lazily reread later.\n\n :note:\n This decorator will not be required once all functions are implemented\n natively which in fact is possible, but probably not feasible performance wise.\n \"\"\"\n\n @wraps(func)\n def post_clear_cache_if_not_raised(self, *args, **kwargs):\n rval = func(self, *args, **kwargs)\n self._delete_entries_cache()\n return rval\n # END wrapper method\n\n return post_clear_cache_if_not_raised", "def sync(func):\n \"\"\"Decorator to make a task synchronous.\"\"\"\n sync_timeout = 3600 # Match standard synchronous timeout.\n\n def wraps(*args, **kwargs):\n task = func(*args, **kwargs)\n task.wait_for_result(timeout=sync_timeout)\n result = json.loads(task.result)\n return result\n\n return wraps", "def after_websocket(self, func: Callable) -> Callable:\n \"\"\"Add an after websocket function to the Blueprint.\n\n This is designed to be used as a decorator, and has the same arguments\n as :meth:`~quart.Quart.after_websocket`. It applies only to requests that\n are routed to an endpoint in this blueprint. An example usage,\n\n .. code-block:: python\n\n blueprint = Blueprint(__name__)\n @blueprint.after_websocket\n def after():\n ...\n \"\"\"\n self.record_once(lambda state: state.app.after_websocket(func, self.name))\n return func", "def after_request(self, func: Callable) -> Callable:\n \"\"\"Add an after request function to the Blueprint.\n\n This is designed to be used as a decorator, and has the same arguments\n as :meth:`~quart.Quart.after_request`. It applies only to requests that\n are routed to an endpoint in this blueprint. An example usage,\n\n .. code-block:: python\n\n blueprint = Blueprint(__name__)\n @blueprint.after_request\n def after():\n ...\n \"\"\"\n self.record_once(lambda state: state.app.after_request(func, self.name))\n return func", "def auto_update(cls, function):\n \"\"\"\n This class method could be used as decorator on subclasses, it ensures\n update method is called after function execution.\n \"\"\"\n\n def wrapper(self, *args, **kwargs):\n f = function(self, *args, **kwargs)\n self.update()\n return f\n return wrapper", "def good_decorator(decorator):\n \"\"\"This decorator makes decorators behave well wrt to decorated\n functions names, doc, etc.\"\"\" \n def new_decorator(f):\n g = decorator(f)\n g.__name__ = f.__name__\n g.__doc__ = f.__doc__\n g.__dict__.update(f.__dict__)\n return g\n \n new_decorator.__name__ = decorator.__name__\n new_decorator.__doc__ = decorator.__doc__\n new_decorator.__dict__.update(decorator.__dict__)\n\n return new_decorator" ]
[ 0.9027689099311829, 0.68341463804245, 0.677200973033905, 0.6769857406616211, 0.6768900752067566, 0.6674069166183472, 0.6669563055038452, 0.6637515425682068, 0.660396158695221, 0.6586720943450928, 0.657244861125946, 0.6561042070388794 ]
Function updateAfterDecorator Decorator to ensure local dict is sync with remote foreman
def updateBeforeDecorator(function): """ Function updateAfterDecorator Decorator to ensure local dict is sync with remote foreman """ def _updateBeforeDecorator(self, *args, **kwargs): if self.forceFullSync: self.reload() return function(self, *args, **kwargs) return _updateBeforeDecorator
[ "def updateAfterDecorator(function):\n \"\"\" Function updateAfterDecorator\n Decorator to ensure local dict is sync with remote foreman\n \"\"\"\n def _updateAfterDecorator(self, *args, **kwargs):\n ret = function(self, *args, **kwargs)\n self.reload()\n return ret\n return _updateAfterDecorator", "def decorate_postinject(func, classkey=None, skipmain=False):\n \"\"\"\n Will perform func with argument self after inject_instance is called on classkey\n\n classkey is some identifying string, tuple, or object\n \"\"\"\n #import utool as ut\n global __CLASSTYPE_POSTINJECT_FUNCS__\n assert classkey is not None, 'must specify classkey'\n #if not (skipmain and ut.get_caller_modname() == '__main__'):\n __CLASSTYPE_POSTINJECT_FUNCS__[classkey].append(func)\n return func", "def after_websocket(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable:\n \"\"\"Add an after websocket function.\n\n This is designed to be used as a decorator. An example usage,\n\n .. code-block:: python\n\n @app.after_websocket\n def func(response):\n return response\n\n Arguments:\n func: The after websocket function itself.\n name: Optional blueprint key name.\n \"\"\"\n handler = ensure_coroutine(func)\n self.after_websocket_funcs[name].append(handler)\n return func", "def update_cache(func):\n \"\"\"Decorate functions that modify the internally stored usernotes JSON.\n\n Ensures that updates are mirrored onto reddit.\n\n Arguments:\n func: the function being decorated\n \"\"\"\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n \"\"\"The wrapper function.\"\"\"\n lazy = kwargs.get('lazy', False)\n kwargs.pop('lazy', None)\n\n if not lazy:\n self.get_json()\n\n ret = func(self, *args, **kwargs)\n\n # If returning a string assume it is an update message\n if isinstance(ret, str) and not lazy:\n self.set_json(ret)\n else:\n return ret\n\n return wrapper", "def after_request(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable:\n \"\"\"Add an after request function.\n\n This is designed to be used as a decorator. An example usage,\n\n .. code-block:: python\n\n @app.after_request\n def func(response):\n return response\n\n Arguments:\n func: The after request function itself.\n name: Optional blueprint key name.\n \"\"\"\n handler = ensure_coroutine(func)\n self.after_request_funcs[name].append(handler)\n return func", "def after_app_request(self, func: Callable) -> Callable:\n \"\"\"Add a after request function to the app.\n\n This is designed to be used as a decorator, and has the same arguments\n as :meth:`~quart.Quart.after_request`. It applies to all requests to the\n app this blueprint is registered on. An example usage,\n\n .. code-block:: python\n\n blueprint = Blueprint(__name__)\n @blueprint.after_app_request\n def after():\n ...\n \"\"\"\n self.record_once(lambda state: state.app.after_request(func))\n return func", "def post_clear_cache(func):\n \"\"\"Decorator for functions that alter the index using the git command. This would\n invalidate our possibly existing entries dictionary which is why it must be\n deleted to allow it to be lazily reread later.\n\n :note:\n This decorator will not be required once all functions are implemented\n natively which in fact is possible, but probably not feasible performance wise.\n \"\"\"\n\n @wraps(func)\n def post_clear_cache_if_not_raised(self, *args, **kwargs):\n rval = func(self, *args, **kwargs)\n self._delete_entries_cache()\n return rval\n # END wrapper method\n\n return post_clear_cache_if_not_raised", "def sync(func):\n \"\"\"Decorator to make a task synchronous.\"\"\"\n sync_timeout = 3600 # Match standard synchronous timeout.\n\n def wraps(*args, **kwargs):\n task = func(*args, **kwargs)\n task.wait_for_result(timeout=sync_timeout)\n result = json.loads(task.result)\n return result\n\n return wraps", "def after_websocket(self, func: Callable) -> Callable:\n \"\"\"Add an after websocket function to the Blueprint.\n\n This is designed to be used as a decorator, and has the same arguments\n as :meth:`~quart.Quart.after_websocket`. It applies only to requests that\n are routed to an endpoint in this blueprint. An example usage,\n\n .. code-block:: python\n\n blueprint = Blueprint(__name__)\n @blueprint.after_websocket\n def after():\n ...\n \"\"\"\n self.record_once(lambda state: state.app.after_websocket(func, self.name))\n return func", "def after_request(self, func: Callable) -> Callable:\n \"\"\"Add an after request function to the Blueprint.\n\n This is designed to be used as a decorator, and has the same arguments\n as :meth:`~quart.Quart.after_request`. It applies only to requests that\n are routed to an endpoint in this blueprint. An example usage,\n\n .. code-block:: python\n\n blueprint = Blueprint(__name__)\n @blueprint.after_request\n def after():\n ...\n \"\"\"\n self.record_once(lambda state: state.app.after_request(func, self.name))\n return func", "def auto_update(cls, function):\n \"\"\"\n This class method could be used as decorator on subclasses, it ensures\n update method is called after function execution.\n \"\"\"\n\n def wrapper(self, *args, **kwargs):\n f = function(self, *args, **kwargs)\n self.update()\n return f\n return wrapper", "def good_decorator(decorator):\n \"\"\"This decorator makes decorators behave well wrt to decorated\n functions names, doc, etc.\"\"\" \n def new_decorator(f):\n g = decorator(f)\n g.__name__ = f.__name__\n g.__doc__ = f.__doc__\n g.__dict__.update(f.__dict__)\n return g\n \n new_decorator.__name__ = decorator.__name__\n new_decorator.__doc__ = decorator.__doc__\n new_decorator.__dict__.update(decorator.__dict__)\n\n return new_decorator" ]
[ 0.940290629863739, 0.68341463804245, 0.677200973033905, 0.6769857406616211, 0.6768900752067566, 0.6674069166183472, 0.6669563055038452, 0.6637515425682068, 0.660396158695221, 0.6586720943450928, 0.657244861125946, 0.6561042070388794 ]
Function load Get the list of all objects @return RETURN: A ForemanItem list
def load(self): """ Function load Get the list of all objects @return RETURN: A ForemanItem list """ return {x[self.index]: self.itemType(self.api, x['id'], self.objName, self.payloadObj, x) for x in self.api.list(self.objName, limit=self.searchLimit)}
[ "def load(self):\n \"\"\" Function load\n Get the list of all objects\n\n @return RETURN: A ForemanItem list\n \"\"\"\n cl_tmp = self.api.list(self.objName, limit=self.searchLimit).values()\n cl = []\n for i in cl_tmp:\n cl.extend(i)\n return {x[self.index]: ItemPuppetClass(self.api, x['id'],\n self.objName, self.payloadObj,\n x)\n for x in cl}", "def load(self, limit=9999):\n \"\"\" Function list\n Get the list of all interfaces\n\n @param key: The targeted object\n @param limit: The limit of items to return\n @return RETURN: A ForemanItem list\n \"\"\"\n subItemList = self.api.list('{}/{}/{}'.format(self.parentObjName,\n self.parentKey,\n self.objName,\n ),\n limit=limit)\n if self.objName == 'puppetclass_ids':\n subItemList = list(map(lambda x: {'id': x}, subItemList))\n if self.objName == 'puppetclasses':\n sil_tmp = subItemList.values()\n subItemList = []\n for i in sil_tmp:\n subItemList.extend(i)\n return {x[self.index]: self.objType(self.api, x['id'],\n self.parentObjName,\n self.parentPayloadObj,\n self.parentKey,\n x)\n for x in subItemList}", "def get_items(self):\n \"\"\"This is out of spec, but required for adaptive assessment parts?\"\"\"\n ils = get_item_lookup_session(runtime=self._runtime, proxy=self._proxy)\n ils.use_federated_bank_view()\n items = []\n if self.has_items():\n for idstr in self._my_map['itemIds']:\n items.append(ils.get_item(Id(idstr)))\n return ItemList(items, runtime=self._runtime, proxy=self._proxy)", "def get_all(self) -> List[Commodity]:\n \"\"\" Loads all non-currency commodities, assuming they are stocks. \"\"\"\n query = (\n self.query\n .order_by(Commodity.namespace, Commodity.mnemonic)\n )\n return query.all()", "def items(self):\n '''gets the property value for items'''\n self.__init()\n items = []\n for item in self._items:\n items.append(\n UserItem(url=\"%s/items/%s\" % (self.location, item['id']),\n securityHandler=self._securityHandler,\n proxy_url=self._proxy_url,\n proxy_port=self._proxy_port,\n initalize=True)\n )\n return items", "def load_all(self):\n \"\"\"\n Force all reports to be loaded and parsed instead of lazy loading on demand.\n \n :returns: ``self`` or ``None`` if load fails\n \"\"\"\n try:\n self.toi.load_all()\n self.rosters.load_all()\n #self.summary.load_all()\n self.play_by_play.load_all()\n self.face_off_comp.load_all()\n return self\n except Exception as e:\n print(e)\n return None", "def items(self):\n \"\"\"\n Loads the items this Installation refers to.\n \"\"\"\n for id in self._items:\n yield self.store.getItemByID(int(id))", "def load_item_for_objective(self):\n \"\"\"if this is the first time for this magic part, find an LO linked item\"\"\"\n mgr = self.my_osid_object._get_provider_manager('ASSESSMENT', local=True)\n if self.my_osid_object._my_map['itemBankId']:\n item_query_session = mgr.get_item_query_session_for_bank(Id(self.my_osid_object._my_map['itemBankId']),\n proxy=self.my_osid_object._proxy)\n else:\n item_query_session = mgr.get_item_query_session(proxy=self.my_osid_object._proxy)\n item_query_session.use_federated_bank_view()\n item_query = item_query_session.get_item_query()\n for objective_id_str in self.my_osid_object._my_map['learningObjectiveIds']:\n item_query.match_learning_objective_id(Id(objective_id_str), True)\n item_list = list(item_query_session.get_items_by_query(item_query))\n # Let's query all takens and their children sections for questions, to\n # remove seen ones\n taking_agent_id = self._assessment_section._assessment_taken.taking_agent_id\n atqs = mgr.get_assessment_taken_query_session(proxy=self.my_osid_object._proxy)\n atqs.use_federated_bank_view()\n querier = atqs.get_assessment_taken_query()\n querier.match_taking_agent_id(taking_agent_id, match=True)\n # let's seed this with the current section's questions\n seen_items = [item_id for item_id in self._assessment_section._item_id_list]\n taken_ids = [str(t.ident)\n for t in atqs.get_assessments_taken_by_query(querier)]\n # Try to find the questions directly via Mongo query -- don't do\n # for section in taken._get_assessment_sections():\n # seen_items += [question['itemId'] for question in section._my_map['questions']]\n # because standing up all the sections is wasteful\n collection = JSONClientValidated('assessment',\n collection='AssessmentSection',\n runtime=self.my_osid_object._runtime)\n results = collection.find({\"assessmentTakenId\": {\"$in\": taken_ids}})\n for section in results:\n if 'questions' in section:\n seen_items += [question['itemId'] for question in section['questions']]\n unseen_item_id = None\n # need to randomly shuffle this item_list\n shuffle(item_list)\n for item in item_list:\n if str(item.ident) not in seen_items:\n unseen_item_id = item.get_id()\n break\n if unseen_item_id is not None:\n self.my_osid_object._my_map['itemIds'] = [str(unseen_item_id)]\n elif self.my_osid_object._my_map['allowRepeatItems']:\n if len(item_list) > 0:\n self.my_osid_object._my_map['itemIds'] = [str(item_list[0].ident)]\n else:\n self.my_osid_object._my_map['itemIds'] = [] # don't put '' here, it will break when it tries to find an item with id ''\n else:\n self.my_osid_object._my_map['itemIds'] = []", "def get_items(self):\n \"\"\"\n Return the item models associated with this Publish group.\n \"\"\"\n from .layers import Layer\n\n # no expansion support, just URLs\n results = []\n for url in self.items:\n if '/layers/' in url:\n r = self._client.request('GET', url)\n results.append(self._client.get_manager(Layer).create_from_result(r.json()))\n else:\n raise NotImplementedError(\"No support for %s\" % url)\n return results", "def GetItems(self):\n \"\"\"Updates self.name and self.items and returns (self.name, self.items)\"\"\"\n url = 'http://min.us/api/GetItems/' + 'm' + self.reader_id\n response = _doget(url)\n\n self.name = response[\"GALLERY_TITLE\"]\n\n # To get the item id, we have to take the file name from the URL\n # We also need to get rid of any file extension if there is any\n self.items = [a[16:].split('.')[0] for a in response[\"ITEMS_GALLERY\"]]\n\n return (self.name, self.items)", "def get_all(self):\n \"\"\"Gets all items in file.\"\"\"\n logger.debug('Fetching items. Path: {data_file}'.format(\n data_file=self.data_file\n ))\n\n return load_file(self.data_file)", "def get_all(self, force_download=False):\n \"\"\" Retrieve the metadata for all items in this list from the server,\n as Item objects\n\n :rtype: List\n :returns: a List of the corresponding Item objects\n :type force_download: Boolean\n :param force_download: True to download from the server\n regardless of the cache's contents\n\n :raises: APIError if the API request is not successful\n\n\n \"\"\"\n cl = self.client\n return [cl.get_item(item, force_download) for item in self.item_urls]" ]
[ 0.8650457859039307, 0.8018741607666016, 0.7024894952774048, 0.6753305196762085, 0.6598423719406128, 0.6597557663917542, 0.655421257019043, 0.653819739818573, 0.6532294154167175, 0.6501277089118958, 0.648154616355896, 0.6473885774612427 ]
Function checkAndCreate Check if an object exists and create it if not @param key: The targeted object @param payload: The targeted object description @return RETURN: The id of the object
def checkAndCreate(self, key, payload): """ Function checkAndCreate Check if an object exists and create it if not @param key: The targeted object @param payload: The targeted object description @return RETURN: The id of the object """ if key not in self: self[key] = payload return self[key]['id']
[ "def checkAndCreate(self, key, payload):\n \"\"\" Function checkAndCreate\n Check if an object exists and create it if not\n\n @param key: The targeted object\n @param payload: The targeted object description\n @return RETURN: The id of the object\n \"\"\"\n if key not in self:\n if 'templates' in payload:\n templates = payload.pop('templates')\n self[key] = payload\n self.reload()\n return self[key]['id']", "def checkAndCreate(self, key, payload, domainId):\n \"\"\" Function checkAndCreate\n Check if a subnet exists and create it if not\n\n @param key: The targeted subnet\n @param payload: The targeted subnet description\n @param domainId: The domainId to be attached wiuth the subnet\n @return RETURN: The id of the subnet\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n #~ Ensure subnet contains the domain\n subnetDomainIds = []\n for domain in self[key]['domains']:\n subnetDomainIds.append(domain['id'])\n if domainId not in subnetDomainIds:\n subnetDomainIds.append(domainId)\n self[key][\"domain_ids\"] = subnetDomainIds\n if len(self[key][\"domains\"]) is not len(subnetDomainIds):\n return False\n return oid", "def checkAndCreate(self, key, payload, osIds):\n \"\"\" Function checkAndCreate\n Check if an architectures exists and create it if not\n\n @param key: The targeted architectures\n @param payload: The targeted architectures description\n @param osIds: The list of os ids liked with this architecture\n @return RETURN: The id of the object\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n #~ To be sure the OS list is good, we ensure our os are in the list\n for os in self[key]['operatingsystems']:\n osIds.add(os['id'])\n self[key][\"operatingsystem_ids\"] = list(osIds)\n if (len(self[key]['operatingsystems']) is not len(osIds)):\n return False\n return oid", "def checkAndCreate(self, key, payload,\n hostgroupConf,\n hostgroupParent,\n puppetClassesId):\n \"\"\" Function checkAndCreate\n check And Create procedure for an hostgroup\n - check the hostgroup is not existing\n - create the hostgroup\n - Add puppet classes from puppetClassesId\n - Add params from hostgroupConf\n\n @param key: The hostgroup name or ID\n @param payload: The description of the hostgroup\n @param hostgroupConf: The configuration of the host group from the\n foreman.conf\n @param hostgroupParent: The id of the parent hostgroup\n @param puppetClassesId: The dict of puppet classes ids in foreman\n @return RETURN: The ItemHostsGroup object of an host\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n\n # Create Hostgroup classes\n if 'classes' in hostgroupConf.keys():\n classList = list()\n for c in hostgroupConf['classes']:\n classList.append(puppetClassesId[c])\n if not self[key].checkAndCreateClasses(classList):\n print(\"Failed in classes\")\n return False\n\n # Set params\n if 'params' in hostgroupConf.keys():\n if not self[key].checkAndCreateParams(hostgroupConf['params']):\n print(\"Failed in params\")\n return False\n\n return oid", "def make_create_payload(**kwargs):\n \"\"\"Create payload for upload/check-upload operations.\"\"\"\n payload = {}\n # Add non-empty arguments\n for k, v in six.iteritems(kwargs):\n if v is not None:\n payload[k] = v\n\n return payload", "def create(self, obj, payload, async=False):\n \"\"\" Function create\n Create an new object\n\n @param obj: object name ('hosts', 'puppetclasses'...)\n @param payload: the dict of the payload\n @param async: should this request be async, if true use\n return.result() to get the response\n @return RETURN: the server response\n \"\"\"\n self.url = self.base_url + obj\n self.method = 'POST'\n self.payload = json.dumps(payload)\n if async:\n self.method = 'POST(Async)'\n session = FuturesSession()\n self.resp = session.post(url=self.url, auth=self.auth,\n headers=self.headers, data=self.payload,\n cert=self.ca_cert)\n return self.resp\n else:\n self.resp = requests.post(url=self.url, auth=self.auth,\n headers=self.headers,\n data=self.payload, cert=self.ca_cert)\n return self.__process_resp__(obj)", "def insert(self, payload):\n \"\"\"Inserts a new record with the payload passed as an argument\n\n :param payload: The record to create (dict)\n :return:\n - Created record\n \"\"\"\n response = self.session.post(self._get_table_url(), data=json.dumps(payload))\n return self._get_content(response)", "function stateAndPayloadAreValid(config, state, payload) {\n\n // ensure that the instances array exists\n if (!Array.isArray(state[config.stateKey])) {\n console.error('State does not contain an \"' + config.stateKey + '\" array.');\n return false;\n }\n\n // ensure that the payload contains an id\n if ((typeof payload === 'undefined' ? 'undefined' : _typeof(payload)) !== 'object' || typeof payload[config.instanceKey] === 'undefined') {\n console.error('Mutation payloads must be an object with an \"' + config.instanceKey + '\" property.');\n return false;\n }\n\n return true;\n }", "def validate(self, payload, required=None, strict=None):\n '''\n Validates a given JSON payload according to the rules defiined for all\n the fields/keys in the sub-class.\n\n :param dict payload: deserialized JSON object.\n :param bool required: if every field/key is required and must be\n present in the payload.\n :param bool strict: if :py:meth:`validate` should detect and report any\n fields/keys that are present in the payload but not\n defined in the sub-class.\n\n :returns: a tuple of two items. First item is a :class:`bool`\n indicating if the payload was successfully validated and the\n second item is ``None``. If the payload was not valid, then\n then the second item is a :py:class:`dict` of errors.\n '''\n\n # replace datatypes.Function.func if not already replaced\n self._replace_string_args()\n\n required = required if required is not None else self.required\n strict = strict if strict is not None else self.strict\n\n errors = PayloadErrors()\n fields = copy.deepcopy(list(self._fields))\n\n for key, value in iteritems(payload):\n if key not in self._fields:\n if strict:\n errors[key].append(self.strict_error)\n else:\n getattr(self, key).test(key, value, payload=payload,\n errors=errors[key])\n\n # Remove the key that has been checked\n fields.remove(key)\n\n for field in fields:\n rule = getattr(self, field)\n\n if rule.required is None:\n required = required\n else:\n required = rule.required\n\n if required:\n errors[field].append(self.required_error)\n elif isinstance(rule, Function):\n rule.test(field, payload.get(field, None),\n payload=payload, errors=errors[field])\n\n return (False, errors.to_dict()) if errors.has_errors() else (True,\n None)", "def handle_create_payload(\n entity: BaseEntity,\n author_user: UserType,\n protocol_name: str,\n to_user_key: RsaKey = None,\n parent_user: UserType = None,\n) -> str:\n \"\"\"Create a payload with the given protocol.\n\n Any given user arguments must have ``private_key`` and ``handle`` attributes.\n\n :arg entity: Entity object to send. Can be a base entity or a protocol specific one.\n :arg author_user: User authoring the object.\n :arg protocol_name: Protocol to create payload for.\n :arg to_user_key: Public key of user private payload is being sent to, required for private payloads.\n :arg parent_user: (Optional) User object of the parent object, if there is one. This must be given for the\n Diaspora protocol if a parent object exists, so that a proper ``parent_author_signature`` can\n be generated. If given, the payload will be sent as this user.\n :returns: Built payload message (str)\n \"\"\"\n mappers = importlib.import_module(f\"federation.entities.{protocol_name}.mappers\")\n protocol = importlib.import_module(f\"federation.protocols.{protocol_name}.protocol\")\n protocol = protocol.Protocol()\n outbound_entity = mappers.get_outbound_entity(entity, author_user.private_key)\n if parent_user:\n outbound_entity.sign_with_parent(parent_user.private_key)\n send_as_user = parent_user if parent_user else author_user\n data = protocol.build_send(entity=outbound_entity, from_user=send_as_user, to_user_key=to_user_key)\n return data", "def get_or_create(cls, key, defaults={}):\n '''\n A port of functionality from the Django ORM. Defaults can be passed in\n if creating a new document is necessary. Keyword args are used to\n lookup the document. Returns a tuple of (object, created), where object\n is the retrieved or created object and created is a boolean specifying\n whether a new object was created.\n '''\n instance = cls.get(key)\n if not instance:\n created = True\n data = dict(key=key)\n data.update(defaults)\n # Do an upsert here instead of a straight create to avoid a race\n # condition with another instance creating the same record at\n # nearly the same time.\n instance = cls.update(data, data, upsert=True)\n else:\n created = False\n return instance, created", "def _add(app, endpoint, payload):\n '''\n POST a payload\n '''\n nb = _nb_obj(auth_required=True)\n try:\n return getattr(getattr(nb, app), endpoint).create(**payload)\n except RequestError as e:\n log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error)\n return False" ]
[ 0.9320217370986938, 0.7752333283424377, 0.7522791028022766, 0.7026407718658447, 0.674565315246582, 0.6636437773704529, 0.6603406667709351, 0.6586929559707642, 0.653120756149292, 0.653052031993866, 0.6461232304573059, 0.6450947523117065 ]
Class decorator stores all calls into list. Can be used until .invalidate() is called. :return: decorated class
def operations(): """ Class decorator stores all calls into list. Can be used until .invalidate() is called. :return: decorated class """ def decorator(func): @wraps(func) def wrapped_func(*args, **kwargs): self = args[0] assert self.__can_use, "User operation queue only in 'with' block" def defaults_dict(): f_args, varargs, keywords, defaults = inspect.getargspec(func) defaults = defaults or [] return dict(zip(f_args[-len(defaults)+len(args[1:]):], defaults[len(args[1:]):])) route_args = dict(defaults_dict().items() + kwargs.items()) func(*args, **kwargs) self.operations.append((func.__name__, args[1:], route_args, )) return wrapped_func def decorate(clazz): for attr in clazz.__dict__: if callable(getattr(clazz, attr)): setattr(clazz, attr, decorator(getattr(clazz, attr))) def __init__(self): # simple parameter-less constructor self.operations = [] self.__can_use = True def invalidate(self): self.__can_use = False clazz.__init__ = __init__ clazz.invalidate = invalidate return clazz return decorate
[ "def class_register(cls):\n \"\"\"Class decorator that allows to map LSP method names to class methods.\"\"\"\n cls.handler_registry = {}\n cls.sender_registry = {}\n for method_name in dir(cls):\n method = getattr(cls, method_name)\n if hasattr(method, '_handle'):\n cls.handler_registry.update({method._handle: method_name})\n if hasattr(method, '_sends'):\n cls.sender_registry.update({method._sends: method_name})\n return cls", "def memoize(Class, *args, **kwargs):\n '''\n Memoize/record a function inside this vlermv. ::\n\n @Vlermv.cache('~/.http')\n def get(url):\n return requests.get(url, auth = ('username', 'password'))\n\n The args and kwargs get passed to the Vlermv with some slight changes.\n Here are the changes.\n\n First, the default ``key_transformer`` is the tuple key_transformer\n rather than the simple key_transformer.\n\n Second, it is valid for cache to be called without arguments.\n Vlermv would ordinarily fail if no arguments were passed to it.\n If you pass no arguments to cache, the Vlermv directory argument\n (the one required argument) will be set to the name of the function.\n\n Third, you are more likely to use the ``cache_exceptions`` keyword\n argument; see :py:class:`~vlermv.Vlermv` for documentation on that.\n '''\n def decorator(func):\n if len(args) == 0:\n if hasattr(func, '__name__'): \n _args = (func.__name__,)\n else:\n raise ValueError('You must specify the location to store the vlermv.')\n else:\n _args = args\n v = Class(*_args, **kwargs)\n v.func = func\n return v\n return decorator", "def immutable(cls):\n '''\n The @immutable decorator makes an abstract type out of the decorated class that overloads\n __new__ to create interesting behavior consistent with immutable data types. The following\n decorators may be used inside of the decorated class to define immutable behavior:\n * @value indicates that the following function is really a value that should be calculated\n and stored as a value of its arguments. The arguments should not start with self and \n should instead be named after other values from which it is calculated. If there are no\n arguments, then the returned value is a constant. Note that self is not an argument to this\n function.\n * @param indicates that the following function is really a variable that should be checked\n by the decorated function. Params are settable as long as the immutable object is transient.\n The check function decorated by @param() is actually a transformation function that is\n called every time the parameter gets changed; the actual value given to which the param is\n set is the value returned by this function. The function may raise exceptions to flag\n errors. Note that self is not an argument to this function. All parameters are required for\n an instantiated object; this means that all parameters must either be provided as values or\n options of implementing classes or must be assigned in the constructor.\n * @option(x) indicates that the following function is really an optional value; the syntax and\n behavior of @option is identical to @param except that @option(x) indicates that, if not\n provided, the parameter should take the value x, while @param indicates that an exception\n should be raised.\n * @require indicates that the following function is a requirement that should be run on the\n given arguments (which should name params/options/values of the class). Note that self is\n an argument to the function. If the function yields a truthy value, then the requirement is\n considered to be met; if it raises an exception or yields a non-trithy value (like None or\n []), then the requirement is not met and the object is considered invalid.\n In immutable objects, the functions defined by @require decorators are not instantiated; they\n may, however, be overloaded and called back to the parent class.\n '''\n # Annotate the class!\n cls = _annotate_imm(cls)\n # The attributes we want to make sure get set:\n auto_members = (('__getattribute__', _imm_getattribute),\n ('__setattr__', _imm_setattr),\n ('__delattr__', _imm_delattr),\n ('__copy__', _imm__copy__),\n ('__deepcopy__', _imm__copy__))\n for (name, fn) in auto_members: setattr(cls, name, _method_type(fn, cls))\n # __new__ is special...\n @staticmethod\n def _custom_new(c, *args, **kwargs): return _imm_new(c)\n setattr(cls, '__new__', _custom_new)\n # and the attributes we set only if they haven't been specified\n optl_members = (('is_persistent', _imm_is_persist),\n ('is_transient', _imm_is_trans),\n ('persist', imm_persist),\n ('transient', imm_transient),\n ('copy', imm_copy),\n ('params', imm_params),\n ('values', imm_values),\n ('todict', imm_dict))\n for (name, fn) in optl_members:\n if not hasattr(cls, name):\n setattr(cls, name, _method_type(fn, cls))\n # and the attributes we set if they're not overloaded from object\n initfn = _imm_default_init if cls.__init__ is object.__init__ else cls.__init__\n # we set this up so that it can monitor entry and exit from this specific class's __init__()\n def _init_wrapper(imm, *args, **kwargs):\n # call the init normally...\n initfn(imm, *args, **kwargs)\n # If we're still initializing after running the constructor, we need to switch to\n # transient\n if type(imm) is cls and _imm_is_init(imm): _imm_init_to_trans(imm)\n # Okay, all checks passed!\n setattr(cls, '__init__', _method_type(_init_wrapper, cls))\n dflt_members = (('__dir__', _imm_dir),\n ('__repr__', _imm_repr),\n ('__hash__', _imm_hash))\n for (name, fn) in dflt_members:\n if not hasattr(cls, name) or not hasattr(object, name) or \\\n getattr(cls, name) is getattr(object, name):\n setattr(cls, name, _method_type(fn, cls))\n # Done!\n return cls", "def call_list(self):\n \"\"\"For a call object that represents multiple calls, `call_list`\n returns a list of all the intermediate calls as well as the\n final call.\"\"\"\n vals = []\n thing = self\n while thing is not None:\n if thing.from_kall:\n vals.append(thing)\n thing = thing.parent\n return _CallList(reversed(vals))", "def _decorator(store_name, store_values):\n \"\"\"\n Return a class decorator that:\n\n 1) Defines a new class method, `wait_for_js`\n 2) Defines a new class list variable, `store_name` and adds\n `store_values` to the list.\n \"\"\"\n def decorator(clz): # pylint: disable=missing-docstring\n\n # Add a `wait_for_js` method to the class\n if not hasattr(clz, 'wait_for_js'):\n setattr(clz, 'wait_for_js', _wait_for_js) # pylint: disable= literal-used-as-attribute\n\n # Store the RequireJS module names in the class\n if not hasattr(clz, store_name):\n setattr(clz, store_name, set())\n\n getattr(clz, store_name).update(store_values)\n return clz\n\n return decorator", "def class_register(cls):\n \"\"\"Class decorator that allows to map LSP method names to class methods.\"\"\"\n cls.handler_registry = {}\n for method_name in dir(cls):\n method = getattr(cls, method_name)\n if hasattr(method, '_handle'):\n cls.handler_registry.update({method._handle: method_name})\n return cls", "def attr_item_call_auto_cache(func):\n \"\"\"\n Decorator for a a single positional argument function to cache\n its results and to make ``f(\"a\") == f[\"a\"] == f.a``.\n \"\"\"\n def __missing__(self, key):\n result = self[key] = func(key)\n return result\n wrapper = type(snake2ucamel(func.__name__), (dict,), {\n \"__missing__\": __missing__,\n \"__call__\": dict.__getitem__,\n \"__getattr__\": dict.__getitem__,\n \"__doc__\": func.__doc__, # Class docstring can't be updated afterwards\n \"__module__\": func.__module__,\n })()\n for k, v in vars(func).items():\n setattr(wrapper, k, v)\n return wrapper", "def mutable(function):\n '''Combined decorator of guarded.mutable and journal.recorded.\n\n When called from outside a recording context, it returns a Deferred.\n When called from inside a recording context, it returns a L{fiber.Fiber}\n or any synchronous value.\n\n Same as using::\n\n @journal.recorded()\n @guard.mutable\n def spam(self, state, some, args):\n pass\n '''\n\n guard_wrapper = guard.mutable(function)\n\n # Register the function\n canonical = reflect.class_canonical_name(3)\n annotate.injectClassCallback(\"recorded\", 4,\n \"_register_recorded_call\", guard_wrapper,\n class_canonical_name=canonical)\n\n def wrapper(self, *args, **kwargs):\n recorder = IRecorder(self)\n return recorder.call(guard_wrapper, args, kwargs)\n\n return wrapper", "def all_functions_called(self):\n '''\n list(Function): List of functions reachable from the contract (include super)\n '''\n all_calls = [f.all_internal_calls() for f in self.functions + self.modifiers] + [self.functions + self.modifiers]\n all_calls = [item for sublist in all_calls for item in sublist] + self.functions\n all_calls = list(set(all_calls))\n\n all_constructors = [c.constructor for c in self.inheritance]\n all_constructors = list(set([c for c in all_constructors if c]))\n\n all_calls = set(all_calls+all_constructors)\n\n return [c for c in all_calls if isinstance(c, Function)]", "def auto_memoize(func):\n \"\"\"\n Based on django.util.functional.memoize. Automatically memoizes instace methods for the lifespan of an object.\n Only works with methods taking non-keword arguments. Note that the args to the function must be usable as\n dictionary keys. Also, the first argument MUST be self. This decorator will not work for functions or class\n methods, only object methods.\n \"\"\"\n\n @wraps(func)\n def wrapper(*args):\n inst = args[0]\n inst._memoized_values = getattr(inst, '_memoized_values', {})\n key = (func, args[1:])\n if key not in inst._memoized_values:\n inst._memoized_values[key] = func(*args)\n return inst._memoized_values[key]\n return wrapper", "def froze_it(cls):\r\n \"\"\"\r\n Decorator to prevent from creating attributes in the object ouside __init__().\r\n\r\n This decorator must be applied to the final class (doesn't work if a\r\n decorated class is inherited).\r\n\r\n Yoann's answer at http://stackoverflow.com/questions/3603502\r\n \"\"\"\r\n cls._frozen = False\r\n\r\n def frozensetattr(self, key, value):\r\n if self._frozen and not hasattr(self, key):\r\n raise AttributeError(\"Attribute '{}' of class '{}' does not exist!\"\r\n .format(key, cls.__name__))\r\n else:\r\n object.__setattr__(self, key, value)\r\n\r\n def init_decorator(func):\r\n @wraps(func)\r\n def wrapper(self, *args, **kwargs):\r\n func(self, *args, **kwargs)\r\n self._frozen = True\r\n return wrapper\r\n\r\n cls.__setattr__ = frozensetattr\r\n cls.__init__ = init_decorator(cls.__init__)\r\n\r\n return cls", "def add_properties_callbacks(cls):\n \"\"\"Class decorator to add change notifications to builtin containers\"\"\"\n for name in cls._mutators: #pylint: disable=protected-access\n if not hasattr(cls, name):\n continue\n setattr(cls, name, properties_mutator(cls, name))\n for name in cls._operators: #pylint: disable=protected-access\n if not hasattr(cls, name):\n continue\n setattr(cls, name, properties_operator(cls, name))\n for name in cls._ioperators: #pylint: disable=protected-access\n if not hasattr(cls, name):\n continue\n setattr(cls, name, properties_mutator(cls, name, True))\n return cls" ]
[ 0.6748773455619812, 0.672943651676178, 0.663416862487793, 0.6629220843315125, 0.6556463837623596, 0.6541969180107117, 0.6535748243331909, 0.6490762829780579, 0.6490561962127686, 0.6469916701316833, 0.6456177830696106, 0.6455515027046204 ]
Process actions in the publishing schedule. Returns the number of actions processed.
def process_actions(action_ids=None): """ Process actions in the publishing schedule. Returns the number of actions processed. """ actions_taken = 0 action_list = PublishAction.objects.prefetch_related( 'content_object', ).filter( scheduled_time__lte=timezone.now(), ) if action_ids is not None: action_list = action_list.filter(id__in=action_ids) for action in action_list: action.process_action() action.delete() actions_taken += 1 return actions_taken
[ "def process_action(self):\n \"\"\"\n Process the action and update the related object, returns a boolean if a change is made.\n \"\"\"\n if self.publish_version == self.UNPUBLISH_CHOICE:\n actioned = self._unpublish()\n else:\n actioned = self._publish()\n\n # Only log if an action was actually taken\n if actioned:\n self._log_action()\n\n return actioned", "def process_action(self, request, queryset):\n \"\"\"\n Publishes the selected objects by passing the value of \\\n 'when' to the object's publish method. The object's \\\n `purge_archives` method is also called to limit the number \\\n of old items that we keep around. The action is logged as \\\n either 'published' or 'scheduled' depending on the value of \\\n 'when', and the user is notified with a message.\n\n Returns a 'render redirect' to the result of the \\\n `get_done_url` method.\n \"\"\"\n form = self.form(request.POST)\n if form.is_valid():\n when = form.cleaned_data.get('when')\n count = 0\n for obj in queryset:\n count += 1\n obj.publish(user=request.user, when=when)\n obj.purge_archives()\n object_url = self.get_object_url(obj)\n if obj.state == obj.PUBLISHED:\n self.log_action(\n obj, CMSLog.PUBLISH, url=object_url)\n else:\n self.log_action(\n obj, CMSLog.SCHEDULE, url=object_url)\n message = \"%s objects published.\" % count\n self.write_message(message=message)\n\n return self.render(request, redirect_url= self.get_done_url(),\n message=message,\n collect_render_data=False)\n return self.render(request, queryset=queryset, publish_form=form, action='Publish')", "def _serviceActions(self) -> int:\n \"\"\"\n Run all pending actions in the action queue.\n\n :return: number of actions executed.\n \"\"\"\n if self.aqStash:\n tm = time.perf_counter()\n if tm > self.aqNextCheck:\n earliest = float('inf')\n for d in list(self.aqStash):\n nxt, action = d\n if tm > nxt:\n self.actionQueue.appendleft(action)\n self.aqStash.remove(d)\n if nxt < earliest:\n earliest = nxt\n self.aqNextCheck = earliest\n count = len(self.actionQueue)\n while self.actionQueue:\n action, aid = self.actionQueue.popleft()\n assert action in self.scheduled\n if aid in self.scheduled[action]:\n self.scheduled[action].remove(aid)\n logger.trace(\"{} running action {} with id {}\".\n format(self, get_func_name(action), aid))\n action()\n else:\n logger.trace(\"{} not running cancelled action {} with id {}\".\n format(self, get_func_name(action), aid))\n return count", "def proceedings(self, key, value):\n \"\"\"Populate the ``proceedings`` key.\n\n Also populates the ``refereed`` key through side effects.\n \"\"\"\n proceedings = self.get('proceedings')\n refereed = self.get('refereed')\n\n if not proceedings:\n normalized_a_values = [el.upper() for el in force_list(value.get('a'))]\n if 'PROCEEDINGS' in normalized_a_values:\n proceedings = True\n\n if not refereed:\n normalized_a_values = [el.upper() for el in force_list(value.get('a'))]\n if 'PEER REVIEW' in normalized_a_values:\n refereed = True\n elif 'NON-PUBLISHED' in normalized_a_values:\n refereed = False\n\n self['refereed'] = refereed\n return proceedings", "def process_actions(actions):\n \"\"\"Process queue actions.\"\"\"\n queue = current_app.config['INDEXER_MQ_QUEUE']\n with establish_connection() as c:\n q = queue(c)\n for action in actions:\n q = action(q)", "def publish(self):\n \"\"\"\n Iterate over the scheduler collections and apply any actions found\n \"\"\"\n\n try:\n for collection in self.settings.get(\"scheduler\").get(\"collections\"):\n yield self.publish_for_collection(collection)\n except Exception as ex:\n self.logger.error(ex)", "def process_actions(self, actions):\n \"\"\"Process the actions we want to take\n\n Args:\n actions (`list`): List of actions we want to take\n\n Returns:\n `list` of notifications\n \"\"\"\n notices = {}\n notification_contacts = {}\n for action in actions:\n resource = action['resource']\n action_status = ActionStatus.SUCCEED\n\n try:\n if action['action'] == AuditActions.REMOVE:\n action_status = self.process_action(\n resource,\n AuditActions.REMOVE\n )\n if action_status == ActionStatus.SUCCEED:\n db.session.delete(action['issue'].issue)\n\n elif action['action'] == AuditActions.STOP:\n action_status = self.process_action(\n resource,\n AuditActions.STOP\n )\n if action_status == ActionStatus.SUCCEED:\n action['issue'].update({\n 'missing_tags': action['missing_tags'],\n 'notes': action['notes'],\n 'last_alert': action['last_alert'],\n 'state': action['action']\n })\n\n elif action['action'] == AuditActions.FIXED:\n db.session.delete(action['issue'].issue)\n\n elif action['action'] == AuditActions.ALERT:\n action['issue'].update({\n 'missing_tags': action['missing_tags'],\n 'notes': action['notes'],\n 'last_alert': action['last_alert'],\n 'state': action['action']\n })\n\n db.session.commit()\n\n if action_status == ActionStatus.SUCCEED:\n for owner in [\n dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)}\n ]:\n if owner['value'] not in notification_contacts:\n contact = NotificationContact(type=owner['type'], value=owner['value'])\n notification_contacts[owner['value']] = contact\n notices[contact] = {\n 'fixed': [],\n 'not_fixed': []\n }\n else:\n contact = notification_contacts[owner['value']]\n\n if action['action'] == AuditActions.FIXED:\n notices[contact]['fixed'].append(action)\n else:\n notices[contact]['not_fixed'].append(action)\n except Exception as ex:\n self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format(\n action['resource'].account.account_name,\n action['resource'].id,\n action['resource'],\n ex\n ))\n\n return notices", "def get_new_actions(self):\n \"\"\" Wrapper function for do_get_new_actions\n For stats purpose\n\n :return: None\n TODO: Use a decorator for timing this function\n \"\"\"\n try:\n _t0 = time.time()\n self.do_get_new_actions()\n statsmgr.timer('actions.got.time', time.time() - _t0)\n except RuntimeError:\n logger.error(\"Exception like issue #1007\")", "def _RunActions(self, rule, client_id):\n \"\"\"Run all the actions specified in the rule.\n\n Args:\n rule: Rule which actions are to be executed.\n client_id: Id of a client where rule's actions are to be executed.\n\n Returns:\n Number of actions started.\n \"\"\"\n actions_count = 0\n\n for action in rule.actions:\n try:\n # Say this flow came from the foreman.\n token = self.token.Copy()\n token.username = \"Foreman\"\n\n if action.HasField(\"hunt_id\"):\n if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id):\n logging.info(\n \"Foreman: ignoring hunt %s on client %s: was started \"\n \"here before\", client_id, action.hunt_id)\n else:\n logging.info(\"Foreman: Starting hunt %s on client %s.\",\n action.hunt_id, client_id)\n\n flow_cls = registry.AFF4FlowRegistry.FlowClassByName(\n action.hunt_name)\n flow_cls.StartClients(action.hunt_id, [client_id])\n actions_count += 1\n else:\n flow.StartAFF4Flow(\n client_id=client_id,\n flow_name=action.flow_name,\n token=token,\n **action.argv.ToDict())\n actions_count += 1\n # There could be all kinds of errors we don't know about when starting the\n # flow/hunt so we catch everything here.\n except Exception as e: # pylint: disable=broad-except\n logging.exception(\"Failure running foreman action on client %s: %s\",\n action.hunt_id, e)\n\n return actions_count", "def rollout(env, acts):\n \"\"\"\n Perform a rollout using a preset collection of actions\n \"\"\"\n total_rew = 0\n env.reset()\n steps = 0\n for act in acts:\n _obs, rew, done, _info = env.step(act)\n steps += 1\n total_rew += rew\n if done:\n break\n\n return steps, total_rew", "def process_action(self, request, queryset):\n \"\"\"\n Unpublishes the selected objects by calling the object's \\\n unpublish method. The action is logged and the user is \\\n notified with a message.\n\n Returns a 'render redirect' to the result of the \\\n `get_done_url` method.\n \"\"\"\n count = 0\n for obj in queryset:\n count += 1\n obj.unpublish()\n object_url = self.get_object_url(obj)\n self.log_action(obj, CMSLog.UNPUBLISH, url=object_url)\n url = self.get_done_url()\n msg = self.write_message(message=\"%s objects unpublished.\" % count)\n return self.render(request, redirect_url=url,\n message=msg,\n collect_render_data=False)", "def _process_actions(self, actions, inport, packet):\n '''\n Process actions in order, in two stages. Each action implements a __call__, which\n applies any packet-level changes or other non-output changes. The functors\n can optionally return another function to be applied at the second stage.\n '''\n second_stage = []\n for a in actions:\n fn = a(packet=packet, net=self._switchyard_net, controllers=self._controller_connections, inport=inport)\n if (fn):\n second_stage.append(fn)\n for fn in second_stage:\n fn()" ]
[ 0.7414658069610596, 0.7352325916290283, 0.7167717218399048, 0.7096720933914185, 0.7074881196022034, 0.7056097388267517, 0.705003559589386, 0.6979048252105713, 0.696949303150177, 0.6945710778236389, 0.6918920278549194, 0.685328483581543 ]
Return a boolean if Celery tasks are enabled for this app. If the ``GLITTER_PUBLISHER_CELERY`` setting is ``True`` or ``False`` - then that value will be used. However if the setting isn't defined, then this will be enabled automatically if Celery is installed.
def celery_enabled(): """ Return a boolean if Celery tasks are enabled for this app. If the ``GLITTER_PUBLISHER_CELERY`` setting is ``True`` or ``False`` - then that value will be used. However if the setting isn't defined, then this will be enabled automatically if Celery is installed. """ enabled = getattr(settings, 'GLITTER_PUBLISHER_CELERY', None) if enabled is None: try: import celery # noqa enabled = True except ImportError: enabled = False return enabled
[ "def get_celery_info():\n \"\"\"\n Check celery availability\n \"\"\"\n import celery\n if not getattr(settings, 'USE_CELERY', False):\n log.error(\"No celery config found. Set USE_CELERY in settings to enable.\")\n return {\"status\": NO_CONFIG}\n start = datetime.now()\n try:\n # pylint: disable=no-member\n app = celery.Celery('tasks')\n app.config_from_object('django.conf:settings', namespace='CELERY')\n # Make sure celery is connected with max_retries=1\n # and not the default of max_retries=None if the connection\n # is made lazily\n app.connection().ensure_connection(max_retries=1)\n\n celery_stats = celery.task.control.inspect().stats()\n if not celery_stats:\n log.error(\"No running Celery workers were found.\")\n return {\"status\": DOWN, \"message\": \"No running Celery workers\"}\n except Exception as exp: # pylint: disable=broad-except\n log.error(\"Error connecting to the backend: %s\", exp)\n return {\"status\": DOWN, \"message\": \"Error connecting to the backend\"}\n return {\"status\": UP, \"response_microseconds\": (datetime.now() - start).microseconds}", "def is_visible(self):\n \"\"\"\n Return a boolean if the page is visible in navigation.\n\n Pages must have show in navigation set. Regular pages must be published (published and\n have a current version - checked with `is_published`), pages with a glitter app associated\n don't need any page versions.\n \"\"\"\n if self.glitter_app_name:\n visible = self.show_in_navigation\n else:\n visible = self.show_in_navigation and self.is_published\n\n return visible", "def celery_enable_all():\n \"\"\"Enable johnny-cache in all celery tasks, clearing the local-store\n after each task.\"\"\"\n from celery.signals import task_prerun, task_postrun, task_failure\n task_prerun.connect(prerun_handler)\n task_postrun.connect(postrun_handler)\n # Also have to cleanup on failure.\n task_failure.connect(postrun_handler)", "def is_enabled():\n '''\n See if jail service is actually enabled on boot\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' jail.is_enabled <jail name>\n '''\n cmd = 'service -e'\n services = __salt__['cmd.run'](cmd, python_shell=False)\n for service in services.split('\\\\n'):\n if re.search('jail', service):\n return True\n return False", "def get_celery_app(name=None, **kwargs): # nocv\n # pylint: disable=import-error\n '''\n Function to return celery-app. Works only if celery installed.\n :param name: Application name\n :param kwargs: overrided env-settings\n :return: Celery-app object\n '''\n from celery import Celery\n prepare_environment(**kwargs)\n name = name or os.getenv(\"VST_PROJECT\")\n celery_app = Celery(name)\n celery_app.config_from_object('django.conf:settings', namespace='CELERY')\n celery_app.autodiscover_tasks()\n return celery_app", "def is_enabled():\n \"\"\"Returns ``True`` if bcrypt should be used.\"\"\"\n enabled = getattr(settings, \"BCRYPT_ENABLED\", True)\n if not enabled:\n return False\n # Are we under a test?\n if hasattr(mail, 'outbox'):\n return getattr(settings, \"BCRYPT_ENABLED_UNDER_TEST\", False)\n return True", "def enabled(job_label, runas=None):\n '''\n Return True if the named service is enabled, false otherwise\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.enabled <service label>\n '''\n overrides_data = dict(plistlib.readPlist(\n '/var/db/launchd.db/com.apple.launchd/overrides.plist'\n ))\n if overrides_data.get(job_label, False):\n if overrides_data[job_label]['Disabled']:\n return False\n else:\n return True\n else:\n return False", "def init_celery(project_name):\n \"\"\" init celery app without the need of redundant code \"\"\"\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', '%s.settings' % project_name)\n app = Celery(project_name)\n app.config_from_object('django.conf:settings')\n app.autodiscover_tasks(settings.INSTALLED_APPS, related_name='tasks')\n return app", "def enabled(name, **kwargs):\n '''\n Return True if the named service is enabled, false otherwise\n\n name\n Service name\n\n .. versionchanged:: 2016.3.4\n\n Support for jail (representing jid or jail name) keyword argument in kwargs\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.enabled <service name>\n '''\n jail = kwargs.get('jail', '')\n if not available(name, jail):\n log.error('Service %s not found', name)\n return False\n\n cmd = '{0} {1} rcvar'.format(_cmd(jail), name)\n\n for line in __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines():\n if '_enable=\"' not in line:\n continue\n _, state, _ = line.split('\"', 2)\n return state.lower() in ('yes', 'true', 'on', '1')\n\n # probably will never reached\n return False", "def check_enabled(self):\n \"\"\"This method will be used to verify that a plugin should execute\n given the condition of the underlying environment.\n\n The default implementation will return True if none of class.files,\n class.packages, nor class.commands is specified. If any of these is\n specified the plugin will check for the existence of any of the\n corresponding paths, packages or commands and return True if any\n are present.\n\n For SCLPlugin subclasses, it will check whether the plugin can be run\n for any of installed SCLs. If so, it will store names of these SCLs\n on the plugin class in addition to returning True.\n\n For plugins with more complex enablement checks this method may be\n overridden.\n \"\"\"\n # some files or packages have been specified for this package\n if any([self.files, self.packages, self.commands, self.kernel_mods,\n self.services]):\n if isinstance(self.files, six.string_types):\n self.files = [self.files]\n\n if isinstance(self.packages, six.string_types):\n self.packages = [self.packages]\n\n if isinstance(self.commands, six.string_types):\n self.commands = [self.commands]\n\n if isinstance(self.kernel_mods, six.string_types):\n self.kernel_mods = [self.kernel_mods]\n\n if isinstance(self.services, six.string_types):\n self.services = [self.services]\n\n if isinstance(self, SCLPlugin):\n # save SCLs that match files or packages\n type(self)._scls_matched = []\n for scl in self._get_scls():\n files = [f % {\"scl_name\": scl} for f in self.files]\n packages = [p % {\"scl_name\": scl} for p in self.packages]\n commands = [c % {\"scl_name\": scl} for c in self.commands]\n services = [s % {\"scl_name\": scl} for s in self.services]\n if self._check_plugin_triggers(files,\n packages,\n commands,\n services):\n type(self)._scls_matched.append(scl)\n return len(type(self)._scls_matched) > 0\n\n return self._check_plugin_triggers(self.files,\n self.packages,\n self.commands,\n self.services)\n\n if isinstance(self, SCLPlugin):\n # if files and packages weren't specified, we take all SCLs\n type(self)._scls_matched = self._get_scls()\n\n return True", "def _is_plugin_disabled(plugin):\n \"\"\" Determines if provided plugin is disabled from running for the\n active task.\n \"\"\"\n item = _registered.get(plugin.name)\n if not item:\n return False\n\n _, props = item\n return bool(props.get('disabled'))", "def schedule_enabled():\n '''\n Check the status of automatic update scheduling.\n\n :return: True if scheduling is enabled, False if disabled\n\n :rtype: bool\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' softwareupdate.schedule_enabled\n '''\n cmd = ['softwareupdate', '--schedule']\n ret = salt.utils.mac_utils.execute_return_result(cmd)\n\n enabled = ret.split()[-1]\n\n return salt.utils.mac_utils.validate_enabled(enabled) == 'on'" ]
[ 0.6862063407897949, 0.677836537361145, 0.6738221645355225, 0.6663748025894165, 0.6523762345314026, 0.6509922742843628, 0.6505083441734314, 0.6491189002990723, 0.640676736831665, 0.6372286081314087, 0.6347237825393677, 0.6333461403846741 ]
Function checkAndCreate Check if an object exists and create it if not @param key: The targeted object @param payload: The targeted object description @return RETURN: The id of the object
def checkAndCreate(self, key, payload): """ Function checkAndCreate Check if an object exists and create it if not @param key: The targeted object @param payload: The targeted object description @return RETURN: The id of the object """ if key not in self: if 'templates' in payload: templates = payload.pop('templates') self[key] = payload self.reload() return self[key]['id']
[ "def checkAndCreate(self, key, payload):\n \"\"\" Function checkAndCreate\n Check if an object exists and create it if not\n\n @param key: The targeted object\n @param payload: The targeted object description\n @return RETURN: The id of the object\n \"\"\"\n if key not in self:\n self[key] = payload\n return self[key]['id']", "def checkAndCreate(self, key, payload, domainId):\n \"\"\" Function checkAndCreate\n Check if a subnet exists and create it if not\n\n @param key: The targeted subnet\n @param payload: The targeted subnet description\n @param domainId: The domainId to be attached wiuth the subnet\n @return RETURN: The id of the subnet\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n #~ Ensure subnet contains the domain\n subnetDomainIds = []\n for domain in self[key]['domains']:\n subnetDomainIds.append(domain['id'])\n if domainId not in subnetDomainIds:\n subnetDomainIds.append(domainId)\n self[key][\"domain_ids\"] = subnetDomainIds\n if len(self[key][\"domains\"]) is not len(subnetDomainIds):\n return False\n return oid", "def checkAndCreate(self, key, payload, osIds):\n \"\"\" Function checkAndCreate\n Check if an architectures exists and create it if not\n\n @param key: The targeted architectures\n @param payload: The targeted architectures description\n @param osIds: The list of os ids liked with this architecture\n @return RETURN: The id of the object\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n #~ To be sure the OS list is good, we ensure our os are in the list\n for os in self[key]['operatingsystems']:\n osIds.add(os['id'])\n self[key][\"operatingsystem_ids\"] = list(osIds)\n if (len(self[key]['operatingsystems']) is not len(osIds)):\n return False\n return oid", "def checkAndCreate(self, key, payload,\n hostgroupConf,\n hostgroupParent,\n puppetClassesId):\n \"\"\" Function checkAndCreate\n check And Create procedure for an hostgroup\n - check the hostgroup is not existing\n - create the hostgroup\n - Add puppet classes from puppetClassesId\n - Add params from hostgroupConf\n\n @param key: The hostgroup name or ID\n @param payload: The description of the hostgroup\n @param hostgroupConf: The configuration of the host group from the\n foreman.conf\n @param hostgroupParent: The id of the parent hostgroup\n @param puppetClassesId: The dict of puppet classes ids in foreman\n @return RETURN: The ItemHostsGroup object of an host\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n\n # Create Hostgroup classes\n if 'classes' in hostgroupConf.keys():\n classList = list()\n for c in hostgroupConf['classes']:\n classList.append(puppetClassesId[c])\n if not self[key].checkAndCreateClasses(classList):\n print(\"Failed in classes\")\n return False\n\n # Set params\n if 'params' in hostgroupConf.keys():\n if not self[key].checkAndCreateParams(hostgroupConf['params']):\n print(\"Failed in params\")\n return False\n\n return oid", "def make_create_payload(**kwargs):\n \"\"\"Create payload for upload/check-upload operations.\"\"\"\n payload = {}\n # Add non-empty arguments\n for k, v in six.iteritems(kwargs):\n if v is not None:\n payload[k] = v\n\n return payload", "def create(self, obj, payload, async=False):\n \"\"\" Function create\n Create an new object\n\n @param obj: object name ('hosts', 'puppetclasses'...)\n @param payload: the dict of the payload\n @param async: should this request be async, if true use\n return.result() to get the response\n @return RETURN: the server response\n \"\"\"\n self.url = self.base_url + obj\n self.method = 'POST'\n self.payload = json.dumps(payload)\n if async:\n self.method = 'POST(Async)'\n session = FuturesSession()\n self.resp = session.post(url=self.url, auth=self.auth,\n headers=self.headers, data=self.payload,\n cert=self.ca_cert)\n return self.resp\n else:\n self.resp = requests.post(url=self.url, auth=self.auth,\n headers=self.headers,\n data=self.payload, cert=self.ca_cert)\n return self.__process_resp__(obj)", "def insert(self, payload):\n \"\"\"Inserts a new record with the payload passed as an argument\n\n :param payload: The record to create (dict)\n :return:\n - Created record\n \"\"\"\n response = self.session.post(self._get_table_url(), data=json.dumps(payload))\n return self._get_content(response)", "function stateAndPayloadAreValid(config, state, payload) {\n\n // ensure that the instances array exists\n if (!Array.isArray(state[config.stateKey])) {\n console.error('State does not contain an \"' + config.stateKey + '\" array.');\n return false;\n }\n\n // ensure that the payload contains an id\n if ((typeof payload === 'undefined' ? 'undefined' : _typeof(payload)) !== 'object' || typeof payload[config.instanceKey] === 'undefined') {\n console.error('Mutation payloads must be an object with an \"' + config.instanceKey + '\" property.');\n return false;\n }\n\n return true;\n }", "def validate(self, payload, required=None, strict=None):\n '''\n Validates a given JSON payload according to the rules defiined for all\n the fields/keys in the sub-class.\n\n :param dict payload: deserialized JSON object.\n :param bool required: if every field/key is required and must be\n present in the payload.\n :param bool strict: if :py:meth:`validate` should detect and report any\n fields/keys that are present in the payload but not\n defined in the sub-class.\n\n :returns: a tuple of two items. First item is a :class:`bool`\n indicating if the payload was successfully validated and the\n second item is ``None``. If the payload was not valid, then\n then the second item is a :py:class:`dict` of errors.\n '''\n\n # replace datatypes.Function.func if not already replaced\n self._replace_string_args()\n\n required = required if required is not None else self.required\n strict = strict if strict is not None else self.strict\n\n errors = PayloadErrors()\n fields = copy.deepcopy(list(self._fields))\n\n for key, value in iteritems(payload):\n if key not in self._fields:\n if strict:\n errors[key].append(self.strict_error)\n else:\n getattr(self, key).test(key, value, payload=payload,\n errors=errors[key])\n\n # Remove the key that has been checked\n fields.remove(key)\n\n for field in fields:\n rule = getattr(self, field)\n\n if rule.required is None:\n required = required\n else:\n required = rule.required\n\n if required:\n errors[field].append(self.required_error)\n elif isinstance(rule, Function):\n rule.test(field, payload.get(field, None),\n payload=payload, errors=errors[field])\n\n return (False, errors.to_dict()) if errors.has_errors() else (True,\n None)", "def handle_create_payload(\n entity: BaseEntity,\n author_user: UserType,\n protocol_name: str,\n to_user_key: RsaKey = None,\n parent_user: UserType = None,\n) -> str:\n \"\"\"Create a payload with the given protocol.\n\n Any given user arguments must have ``private_key`` and ``handle`` attributes.\n\n :arg entity: Entity object to send. Can be a base entity or a protocol specific one.\n :arg author_user: User authoring the object.\n :arg protocol_name: Protocol to create payload for.\n :arg to_user_key: Public key of user private payload is being sent to, required for private payloads.\n :arg parent_user: (Optional) User object of the parent object, if there is one. This must be given for the\n Diaspora protocol if a parent object exists, so that a proper ``parent_author_signature`` can\n be generated. If given, the payload will be sent as this user.\n :returns: Built payload message (str)\n \"\"\"\n mappers = importlib.import_module(f\"federation.entities.{protocol_name}.mappers\")\n protocol = importlib.import_module(f\"federation.protocols.{protocol_name}.protocol\")\n protocol = protocol.Protocol()\n outbound_entity = mappers.get_outbound_entity(entity, author_user.private_key)\n if parent_user:\n outbound_entity.sign_with_parent(parent_user.private_key)\n send_as_user = parent_user if parent_user else author_user\n data = protocol.build_send(entity=outbound_entity, from_user=send_as_user, to_user_key=to_user_key)\n return data", "def get_or_create(cls, key, defaults={}):\n '''\n A port of functionality from the Django ORM. Defaults can be passed in\n if creating a new document is necessary. Keyword args are used to\n lookup the document. Returns a tuple of (object, created), where object\n is the retrieved or created object and created is a boolean specifying\n whether a new object was created.\n '''\n instance = cls.get(key)\n if not instance:\n created = True\n data = dict(key=key)\n data.update(defaults)\n # Do an upsert here instead of a straight create to avoid a race\n # condition with another instance creating the same record at\n # nearly the same time.\n instance = cls.update(data, data, upsert=True)\n else:\n created = False\n return instance, created", "def _add(app, endpoint, payload):\n '''\n POST a payload\n '''\n nb = _nb_obj(auth_required=True)\n try:\n return getattr(getattr(nb, app), endpoint).create(**payload)\n except RequestError as e:\n log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error)\n return False" ]
[ 0.939257025718689, 0.7752333283424377, 0.7522791028022766, 0.7026407718658447, 0.674565315246582, 0.6636437773704529, 0.6603406667709351, 0.6586929559707642, 0.653120756149292, 0.653052031993866, 0.6461232304573059, 0.6450947523117065 ]
Collect new style (44.1+) return values to old-style kv-list
def __collect_interfaces_return(interfaces): """Collect new style (44.1+) return values to old-style kv-list""" acc = [] for (interfaceName, interfaceData) in interfaces.items(): signalValues = interfaceData.get("signals", {}) for (signalName, signalValue) in signalValues.items(): pinName = "{0}.{1}".format(interfaceName, signalName) acc.append({'id': pinName, 'value': signalValue}) return acc
[ "function postProcessList( list )\r\n\t{\r\n\t\tvar children = list.children,\r\n\t\t\tchild,\r\n\t\t\tattrs,\r\n\t\t\tcount = list.children.length,\r\n\t\t\tmatch,\r\n\t\t\tmergeStyle,\r\n\t\t\tstyleTypeRegexp = /list-style-type:(.*?)(?:;|$)/,\r\n\t\t\tstylesFilter = CKEDITOR.plugins.pastefromword.filters.stylesFilter;\r\n\r\n\t\tattrs = list.attributes;\r\n\t\tif ( styleTypeRegexp.exec( attrs.style ) )\r\n\t\t\treturn;\r\n\r\n\t\tfor ( var i = 0; i < count; i++ )\r\n\t\t{\r\n\t\t\tchild = children[ i ];\r\n\r\n\t\t\tif ( child.attributes.value && Number( child.attributes.value ) == i + 1 )\r\n\t\t\t\tdelete child.attributes.value;\r\n\r\n\t\t\tmatch = styleTypeRegexp.exec( child.attributes.style );\r\n\r\n\t\t\tif ( match )\r\n\t\t\t{\r\n\t\t\t\tif ( match[ 1 ] == mergeStyle || !mergeStyle )\r\n\t\t\t\t\tmergeStyle = match[ 1 ];\r\n\t\t\t\telse\r\n\t\t\t\t{\r\n\t\t\t\t\tmergeStyle = null;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tif ( mergeStyle )\r\n\t\t{\r\n\t\t\tfor ( i = 0; i < count; i++ )\r\n\t\t\t{\r\n\t\t\t\tattrs = children[ i ].attributes;\r\n\t\t\t\tattrs.style && ( attrs.style = stylesFilter( [ [ 'list-style-type'] ] )( attrs.style ) || '' );\r\n\t\t\t}\r\n\r\n\t\t\tlist.addStyle( 'list-style-type', mergeStyle );\r\n\t\t}\r\n\t}", "function(prop, callback) {\n // shortcut getComputedStyle function\n var s = function(el, p) {\n // this *can* be written to be smaller - see below, but in fact it doesn't compress in gzip as well, the commented\n // out version actually *adds* 2 bytes.\n // return document.defaultView.getComputedStyle(el, \"\").getPropertyValue(p.replace(/([A-Z])/g, \"-$1\").toLowerCase());\n return document.defaultView.getComputedStyle(el, \"\").getPropertyValue(cssstyle(p));\n }\n if (callback === undefined) {\n \tvar styles = [];\n this.each(function(el) {styles.push(s(el, prop))});\n return styles;\n } else return this.each(function(el) { callback(s(el, prop)); });\n }", "function kv2List( source ) {\n var list = [];\n for ( var key in source ) {\n if ( source.hasOwnProperty( key ) ) {\n list.push( {\n k: key,\n v: source[ key ]\n } );\n }\n }\n\n return list;\n }", "def _apply_style_colors(self, colors, kwds, col_num, label):\n \"\"\"\n Manage style and color based on column number and its label.\n Returns tuple of appropriate style and kwds which \"color\" may be added.\n \"\"\"\n style = None\n if self.style is not None:\n if isinstance(self.style, list):\n try:\n style = self.style[col_num]\n except IndexError:\n pass\n elif isinstance(self.style, dict):\n style = self.style.get(label, style)\n else:\n style = self.style\n\n has_color = 'color' in kwds or self.colormap is not None\n nocolor_style = style is None or re.match('[a-z]+', style) is None\n if (has_color or self.subplots) and nocolor_style:\n kwds['color'] = colors[col_num % len(colors)]\n return style, kwds", "def execute_with_style_LEGACY(template, style, data, callback, body_subtree='body'):\n \"\"\"OBSOLETE old API.\"\"\"\n try:\n body_data = data[body_subtree]\n except KeyError:\n raise EvaluationError('Data dictionary has no subtree %r' % body_subtree)\n tokens_body = []\n template.execute(body_data, tokens_body.append)\n data[body_subtree] = tokens_body\n tokens = []\n style.execute(data, tokens.append)\n _FlattenToCallback(tokens, callback)", "def old_style_notification(notify)\n resources = resource_hash_references(notify)\n resource_type = resources.xpath(\"symbol[1]/ident/@value\").to_s.to_sym\n if resource_type.empty?\n resource_type = resources.xpath(\"label/@value\").to_s.chop.to_sym\n end\n resource_name = resources.xpath('string_add[1][count(../\n descendant::string_add) = 1]/tstring_content/@value').to_s\n resource_name = resources if resource_name.empty?\n { resource_name: resource_name, resource_type: resource_type }\n end", "def _get_old_style_getter_deps(cls, prop_name, _getter): # @NoSelf\n \"\"\"Checks if deps were given with argument 'deps' (only for\n old-style getters (not based on decorator).\n Checks types, and returns the value (iterable of strings)\n given with the argument 'deps'\"\"\"\n args, _, _, defaults = inspect.getargspec(_getter)\n\n try:\n idx = args.index(KWARG_NAME_DEPS) - (len(args) - len(defaults))\n # finds the corresponding value\n if idx < 0:\n raise ValueError(\"In class %s.%s logical OP getter '%s'\"\n \"must have a default value set for \"\n \"argument '%s'\" % \\\n (cls.__module__, cls.__name__,\n _getter.__name__, KWARG_NAME_DEPS))\n _deps = defaults[idx]\n # checks types\n if not hasattr(_deps, '__iter__'):\n raise TypeError(\"In logical OP getter %s.%s.%s \"\n \"default value of argument '%s' \"\n \"must be an iterable\" % \\\n (cls.__module__, cls.__name__,\n _getter.__name__, KWARG_NAME_DEPS))\n\n for dep in _deps:\n if not isinstance(dep, str):\n raise TypeError(\"In logical OP getter %s.%s.%s \"\n \"values of argument '%s' \"\n \"must be strings\" % \\\n (cls.__module__, cls.__name__,\n _getter.__name__,\n KWARG_NAME_DEPS))\n except ValueError:\n _deps = ()\n\n return _deps", "def styles\n out = selected.map do |view|\n view.rmq_data.styles\n end\n out.flatten!.uniq!\n out\n end", "def list_style(item)\n if item.type != :li\n raise \"list_style called with non-list element\"\n end\n line = element_line(item).strip\n if line.start_with?('*')\n :asterisk\n elsif line.start_with?('+')\n :plus\n elsif line.start_with?('-')\n :dash\n elsif line.match('[0-9]+\\.')\n :ordered\n elsif line.match('[0-9]+\\)')\n :ordered_paren\n else\n :unknown\n end\n end", "function computedStyle(el, prop, getComputedStyle, style) {\n getComputedStyle = window.getComputedStyle;\n style =\n // If we have getComputedStyle\n getComputedStyle ?\n // Query it\n // TODO: From CSS-Query notes, we might need (node, null) for FF\n getComputedStyle(el) :\n\n // Otherwise, we are in IE and use currentStyle\n el.currentStyle;\n if (style) {\n return style\n [\n // Switch to camelCase for CSSOM\n // DEV: Grabbed from jQuery\n // https://github.com/jquery/jquery/blob/1.9-stable/src/css.js#L191-L194\n // https://github.com/jquery/jquery/blob/1.9-stable/src/core.js#L593-L597\n prop.replace(/-(\\w)/gi, function (word, letter) {\n return letter.toUpperCase();\n })\n ];\n }\n}", "function(props, val) {\n if (/^o/.test(typeof props)) {\n for(var prop in props) {\n var prefixed = u.prfx(prop);\n if (props.hasOwnProperty(prop)) {\n this.each(function(index, el) {\n el.style[prefixed] = props[prop];\n });\n }\n }\n return this;\n }\n else {\n return val === undefined ? (this.length ? getComputedStyle(this[0])[props] : null) : this.each(function(index, el) {\n var prefixed = u.prfx(props);\n el.style[prefixed] = val;\n });\n }\n }", "function( el, props ) {\n var key, pkey;\n for ( key in props ) {\n if ( props.hasOwnProperty( key ) ) {\n pkey = pfx( key );\n if ( pkey !== null ) {\n el.style[ pkey ] = props[ key ];\n }\n }\n }\n return el;\n }" ]
[ 0.6900656819343567, 0.6850228309631348, 0.6828471422195435, 0.6798000335693359, 0.6776096224784851, 0.6761053204536438, 0.6748420000076294, 0.6712998151779175, 0.6667713522911072, 0.6658219695091248, 0.6653779149055481, 0.6639884114265442 ]
Guess what api we are using and return as public api does. Private has {'id':'key', 'value':'keyvalue'} format, public has {'key':'keyvalue'}
def return_values(self): """ Guess what api we are using and return as public api does. Private has {'id':'key', 'value':'keyvalue'} format, public has {'key':'keyvalue'} """ j = self.json() #TODO: FIXME: get rid of old API when its support will be removed public_api_value = j.get('returnValues') old_private_value = j.get('endpoints') new_private_value = self.__collect_interfaces_return(j.get('interfaces', {})) retvals = new_private_value or old_private_value or public_api_value or [] # TODO: Public api hack. if self._router.public_api_in_use: return retvals return self.__parse(retvals)
[ "def api_get(uri, key=None):\n \"\"\"\n Simple API endpoint get, return only the keys we care about\n \"\"\"\n response = get_json(uri)\n\n if response:\n if type(response) == list:\n r = response[0]\n elif type(response) == dict:\n r = response\n\n if type(r) == dict:\n # Special nested value we care about\n if key == USER_LOGIN:\n return user_login(r)\n if key in r:\n return r[key]", "def wk_api_info(api_method='')\n api_method ||= ''\n fail 'Invalid wukong api' unless api_method == '' || api_method =~ WK_API_FORMAT\n if api_method.size > 0\n m = api_method.to_s.match(/^wk_([a-zA-Z0-9]+)_(.+)/)\n method_group = m[1].singularize\n method_name = m[2]\n end\n apis = api_method.size > 0 ? API_LIST.select { |a| a[:method_group]==method_group && method_name==a[:method_name] } : API_LIST\n fail 'api not found' unless apis.present?\n apis.map do |api|\n method_group = api[:method_pluralize] ? api[:method_group].pluralize : api[:method_group]\n method_name = \"wk_#{method_group}_#{api[:method_name]}\"\n \"#{method_name}, #{api_url(api)}, #{api[:args].inspect} \"\n end\n end", "def get_api_method(opt)\n case opt.class.to_s\n when \"Symbol\"\n api_method = api_methods.keys.include?(opt) ? api_methods[opt][:url] : (raise StandardError, \"The parameter you sent is not available.\")\n when \"Hash\"\n api_method = opt_from_hash(opt)\n else\n api_method = api_methods[:news][:url]\n end\n end", "function(Apis) {\n\t\treturn _(Apis)\n\t\t\t.map(function(api) {\n\t\t\t\tvar actionPairs = _.map(T.maybeToArray(api.aliases), function(alias) {\n\t\t\t\t\treturn [alias.toLowerCase(), {\n\t\t\t\t\t\tparamSchema: api.parameters.schema,\n\t\t\t\t\t\tresSchema: api.response.schema\n\t\t\t\t\t}]\n\t\t\t\t})\n\n\t\t\t\treturn actionPairs.concat([\n\t\t\t\t\t[api.id.toLowerCase(), {\n\t\t\t\t\t\tparamSchema: api.parameters.schema,\n\t\t\t\t\t\tresSchema: api.response.schema\n\t\t\t\t\t}]\n\t\t\t\t])\n\t\t\t})\n\t\t\t.flatten(true)\n\t\t\t.zipObject()\n\t\t\t.value()\n\t}", "function(name, obj){\n if (_.isArray(obj)){\n return _.map(obj, function(o){ return _.pick(o. B._api_objects.name); });\n } else if (!obj){\n return obj;\n } else {\n return _.pick(obj, B._api_objects.name);\n }\n }", "def get_by_api(resource_group_name, service_name, api_id, tag_id, custom_headers:nil)\n response = get_by_api_async(resource_group_name, service_name, api_id, tag_id, custom_headers:custom_headers).value!\n response.body unless response.nil?\n end", "def public_api(self,url):\n ''' template function of public api'''\n try :\n url in api_urls\n return ast.literal_eval(requests.get(base_url + api_urls.get(url)).text)\n except Exception as e:\n print(e)", "def api(gandi):\n \"\"\"Display information about API used.\"\"\"\n key_name = 'API version'\n\n result = gandi.api.info()\n result[key_name] = result.pop('api_version')\n output_generic(gandi, result, [key_name])\n\n return result", "def get_by_api_with_http_info(resource_group_name, service_name, api_id, tag_id, custom_headers:nil)\n get_by_api_async(resource_group_name, service_name, api_id, tag_id, custom_headers:custom_headers).value!\n end", "def rest_api_id\n stack_name = Jets::Naming.parent_stack_name\n return \"RestApi\" unless stack_exists?(stack_name)\n\n stack = cfn.describe_stacks(stack_name: stack_name).stacks.first\n\n api_gateway_stack_arn = lookup(stack[:outputs], \"ApiGateway\")\n\n # resources = cfn.describe_stack_resources(stack_name: api_gateway_stack_arn).stack_resources\n stack = cfn.describe_stacks(stack_name: api_gateway_stack_arn).stacks.first\n rest_api_id = lookup(stack[:outputs], \"RestApi\")\n end", "def method_missing(api, *args)\n unless GoogleAPI.discovered_apis.has_key?(api)\n GoogleAPI.logger.info \"Discovering the #{api} Google API...\"\n response = access_token.get(\"https://www.googleapis.com/discovery/v1/apis?preferred=true&name=#{api}\").parsed['items']\n super unless response # Raise a NoMethodError if Google's Discovery API does not return a good response\n discovery_url = response.first['discoveryRestUrl']\n GoogleAPI.discovered_apis[api] = access_token.get(discovery_url).parsed\n end\n\n API.new(access_token, api, GoogleAPI.discovered_apis[api]['resources'])\n end", "def api_request(action, data, request_type, binary_key = nil)\n if !binary_key.nil?\n binary_key_data = data[binary_key]\n data.delete(binary_key)\n end\n\n if data[:format].nil? || data[:format] == 'json'\n data = prepare_json_payload(data)\n else\n data[:api_key] = @api_key\n data[:format] ||= 'json'\n data[:sig] = get_signature_hash(data, @secret)\n end\n\n if !binary_key.nil?\n data[binary_key] = binary_key_data\n end\n _result = http_request(action, data, request_type, binary_key)\n\n # NOTE: don't do the unserialize here\n if data[:format] == 'json'\n begin\n unserialized = JSON.parse(_result)\n return unserialized ? unserialized : _result\n rescue JSON::JSONError => e\n return {'error' => e}\n end\n end\n _result\n end" ]
[ 0.6989791393280029, 0.6979104280471802, 0.6978617906570435, 0.6952900290489197, 0.6922489404678345, 0.6918883919715881, 0.6911982893943787, 0.6867008805274963, 0.6855074167251587, 0.6841657757759094, 0.6805800199508667, 0.6770697236061096 ]
Returns activitylog object severity - filter severity ('INFO', DEBUG') start/end - time or log text
def get_activitylog(self, after=None, severity=None, start=None, end=None): """ Returns activitylog object severity - filter severity ('INFO', DEBUG') start/end - time or log text """ if after: log_raw = self._router.get_instance_activitylog(org_id=self.organizationId, instance_id=self.instanceId, params={"after": after}).json() else: log_raw = self._router.get_instance_activitylog(org_id=self.organizationId, instance_id=self.instanceId).json() return ActivityLog(log_raw, severity=severity, start=start, end=end)
[ "def get_severity(self, alert):\n \"\"\"\n Get severity of correlated alert. Used to determine previous severity.\n \"\"\"\n query = {\n 'environment': alert.environment,\n 'resource': alert.resource,\n '$or': [\n {\n 'event': alert.event,\n 'severity': {'$ne': alert.severity}\n },\n {\n 'event': {'$ne': alert.event},\n 'correlate': alert.event\n }],\n 'customer': alert.customer\n }\n r = self.get_db().alerts.find_one(query, projection={'severity': 1, '_id': 0})\n return r['severity'] if r else None", "def severity(self):\n \"\"\"Retrieves the severity for the incident/incidents from the\n output response\n\n Returns:\n severity(namedtuple): List of named tuples of severity for the\n incident/incidents\n \"\"\"\n resource_list = self.traffic_incident()\n severity = namedtuple('severity', 'severity')\n if len(resource_list) == 1 and resource_list[0] is None:\n return None\n else:\n try:\n return [severity(resource['severity'])\n for resource in resource_list]\n except (KeyError, TypeError):\n return [severity(resource['Severity'])\n for resource in resource_list]", "def _severity_by_name(name):\n \"\"\"\n Return the severity integer value by it's name. If not found,\n return 'information'.\n \n :rtype: int\n \"\"\"\n for intvalue, sevname in SEVERITY.items():\n if name.lower() == sevname:\n return intvalue\n return 1", "def GetSeverityString(self, severity):\n \"\"\"Retrieves a string representation of the severity.\n\n Args:\n severity (int): severity.\n\n Returns:\n str: description of the event severity.\n \"\"\"\n if 0 <= severity < len(self._SEVERITY):\n return self._SEVERITY[severity]\n return 'Unknown {0:d}'.format(severity)", "def severity(self):\n \"\"\"\n Severity level of the event. One of ``INFO``, ``WATCH``,\n ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``.\n \"\"\"\n if self._proto.HasField('severity'):\n return yamcs_pb2.Event.EventSeverity.Name(self._proto.severity)\n return None", "function getSeverity(config, ruleId) {\n const rules = config && config.rules\n const ruleOptions = rules && rules[ruleId]\n const severity = Array.isArray(ruleOptions) ? ruleOptions[0] : ruleOptions\n\n switch (severity) {\n case 2:\n case \"error\":\n return 2\n\n case 1:\n case \"warn\":\n return 1\n\n default:\n return 0\n }\n}", "def date_range(data):\n \"\"\"Returns the minimum activity start time and the maximum activity end time\n from the active entities response. These dates are modified in the following\n way. The hours (and minutes and so on) are removed from the start and end\n times and a *day* is added to the end time. These are the dates that should\n be used in the subsequent analytics request.\n \"\"\"\n start = min([parse(d['activity_start_time']) for d in data])\n end = max([parse(d['activity_end_time']) for d in data])\n start = remove_hours(start)\n end = remove_hours(end) + timedelta(days=1)\n return start, end", "def severity( level, proc = nil, &block )\n proc ||= block\n configuration[:severities][ level.to_s.upcase ] = proc\n self\n end", "public List<LogRecord> filter(Severity severity, String text) {\n final List<LogRecord> records = new ArrayList<>();\n for (LogRecord record : this.records) {\n if (record.getSeverity().ordinal() >= severity.ordinal() && record.getMessage().contains(text))\n records.add(record);\n }\n return records;\n }", "def getSeverity(self, severity_name, projectarea_id=None,\n projectarea_name=None):\n \"\"\"Get :class:`rtcclient.models.Severity` object by its name\n\n At least either of `projectarea_id` and `projectarea_name` is given\n\n :param severity_name: the severity name\n :param projectarea_id: the :class:`rtcclient.project_area.ProjectArea`\n id\n :param projectarea_name: the project area name\n :return: the :class:`rtcclient.models.Severity` object\n :rtype: rtcclient.models.Severity\n \"\"\"\n\n self.log.debug(\"Try to get <Severity %s>\", severity_name)\n if not isinstance(severity_name,\n six.string_types) or not severity_name:\n excp_msg = \"Please specify a valid Severity name\"\n self.log.error(excp_msg)\n raise exception.BadValue(excp_msg)\n\n severities = self._getSeverities(projectarea_id=projectarea_id,\n projectarea_name=projectarea_name,\n severity_name=severity_name)\n\n if severities is not None:\n severity = severities[0]\n self.log.info(\"Find <Severity %s>\", severity)\n return severity\n\n self.log.error(\"No Severity named %s\", severity_name)\n raise exception.NotFound(\"No Severity named %s\" % severity_name)", "static Severity getSeverity(String severityName) {\n switch (severityName) {\n case \"BLOCKER\":\n case \"CRITICAL\":\n case \"MAJOR\":\n return Severity.ERROR;\n case \"MINOR\":\n return Severity.WARNING;\n case \"INFO\":\n return Severity.INFO;\n default:\n log.warn(\"Unknown severity: \" + severityName);\n }\n return Severity.WARNING;\n }", "def get_logs(severity_level, duration, log_type)\n response = rest_get(\"#{uri_for_log_type(log_type)}Entries/\")\n entries = response_handler(response)['Items']\n start_time = Time.now.utc - (duration * 3600)\n if severity_level.nil?\n entries.select { |e| Time.parse(e['Created']) > start_time }\n else\n entries.select { |e| severity_level.to_s.casecmp(e['Severity']) == 0 && Time.parse(e['Created']) > start_time }\n end\n end" ]
[ 0.6896112561225891, 0.6779490113258362, 0.6729638576507568, 0.6703992486000061, 0.6701990365982056, 0.6595064401626587, 0.6534167528152466, 0.6531382203102112, 0.6529648303985596, 0.6528253555297852, 0.6501330733299255, 0.6488714218139648 ]
return __cached_json, if accessed withing 300 ms. This allows to optimize calls when many parameters of entity requires withing short time.
def json(self): """ return __cached_json, if accessed withing 300 ms. This allows to optimize calls when many parameters of entity requires withing short time. """ if self.fresh(): return self.__cached_json # noinspection PyAttributeOutsideInit self.__last_read_time = time.time() self.__cached_json = self._router.get_instance(org_id=self.organizationId, instance_id=self.instanceId).json() return self.__cached_json
[ "def get_cache_data(request):\n if 'init' in request.POST:\n init = bool(float(request.POST['init']))\n else:\n init = False\n active_variables = []\n if 'variables[]' in request.POST:\n active_variables = request.POST.getlist('variables[]')\n \"\"\"\n else:\n active_variables = list(\n GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(\n 'charts__variables', flat=True))\n active_variables += list(\n GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(\n 'xy_charts__variables', flat=True))\n active_variables += list(\n GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(\n 'control_items__variable', flat=True))\n active_variables += list(\n GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(\n 'custom_html_panels__variables', flat=True))\n active_variables = list(set(active_variables))\n \"\"\"\n\n active_variable_properties = []\n if 'variable_properties[]' in request.POST:\n active_variable_properties = request.POST.getlist('variable_properties[]')\n\n timestamp_from = time.time()\n if 'timestamp_from' in request.POST:\n timestamp_from = float(request.POST['timestamp_from']) / 1000.0\n\n timestamp_to = time.time()\n\n if 'timestamp_to' in request.POST:\n timestamp_to = min(timestamp_to, float(request.POST['timestamp_to']) / 1000.0)\n\n if timestamp_to == 0:\n timestamp_to = time.time()\n\n if timestamp_from == 0:\n timestamp_from == time.time() - 60\n\n if timestamp_to - timestamp_from > 120 * 60:\n timestamp_from = timestamp_to - 120 * 60\n\n #if not init:\n #timestamp_to = min(timestamp_from + 30, timestamp_to)\n\n if len(active_variables) > 0:\n data = RecordedData.objects.db_data(\n variable_ids=active_variables,\n time_min=timestamp_from,\n time_max=timestamp_to,\n time_in_ms=True,\n query_first_value=init)\n else:\n data = None\n\n if data is None:\n data = {}\n\n data['variable_properties'] = {}\n\n for item in VariableProperty.objects.filter(pk__in=active_variable_properties):\n data['variable_properties'][item.pk] = item.value()\n\n data[\"server_time\"] = time.time() * 1000\n return HttpResponse(json.dumps(data), content_type='application/json')", "def cache_key(self, **extra):\n \"\"\"\n The cache key is made out of the key/values in `query_obj`, plus any\n other key/values in `extra`\n We remove datetime bounds that are hard values, and replace them with\n the use-provided inputs to bounds, which may be time-relative (as in\n \"5 days ago\" or \"now\").\n \"\"\"\n cache_dict = self.to_dict()\n cache_dict.update(extra)\n\n for k in ['from_dttm', 'to_dttm']:\n del cache_dict[k]\n if self.time_range:\n cache_dict['time_range'] = self.time_range\n json_data = self.json_dumps(cache_dict, sort_keys=True)\n return hashlib.md5(json_data.encode('utf-8')).hexdigest()", "def cache_key(self, query_obj, **extra):\n \"\"\"\n The cache key is made out of the key/values in `query_obj`, plus any\n other key/values in `extra`.\n\n We remove datetime bounds that are hard values, and replace them with\n the use-provided inputs to bounds, which may be time-relative (as in\n \"5 days ago\" or \"now\").\n\n The `extra` arguments are currently used by time shift queries, since\n different time shifts wil differ only in the `from_dttm` and `to_dttm`\n values which are stripped.\n \"\"\"\n cache_dict = copy.copy(query_obj)\n cache_dict.update(extra)\n\n for k in ['from_dttm', 'to_dttm']:\n del cache_dict[k]\n\n cache_dict['time_range'] = self.form_data.get('time_range')\n cache_dict['datasource'] = self.datasource.uid\n json_data = self.json_dumps(cache_dict, sort_keys=True)\n return hashlib.md5(json_data.encode('utf-8')).hexdigest()", "def get_cached(path, cache_name=None, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name is not False:\n if cache_name is None:\n cache_name = path\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n return json.load(fp)\n else:\n cache_file = None\n\n r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs)\n\n if not r.ok:\n try:\n response = r.json()\n except ValueError: # pragma: no cover\n response = None\n if isinstance(response, dict) and \"text\" in response:\n r.reason = response[\"text\"]\n\n r.raise_for_status()\n data = r.json()\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n\n return data", "def get_cached_object_or_404(model, timeout=CACHE_TIMEOUT, **kwargs):\n \"\"\"\n Shortcut that will raise Http404 if there is no object matching the query\n\n see get_cached_object for params description\n \"\"\"\n try:\n return get_cached_object(model, timeout=timeout, **kwargs)\n except ObjectDoesNotExist, e:\n raise Http404('Reason: %s' % str(e))", "def data(self):\n \"\"\"load and cache data in json format\n \"\"\"\n\n if self.is_obsolete():\n data = self.get_data()\n for datum in data:\n if 'published_parsed' in datum:\n datum['published_parsed'] = \\\n self.parse_time(datum['published_parsed'])\n\n try:\n dumped_data = json.dumps(data)\n except:\n self.update_cache(data)\n else:\n self.update_cache(dumped_data)\n return data\n\n try:\n return json.loads(self.cache_data)\n except:\n return self.cache_data\n\n return self.get_data()", "def get(self, object_type, cache_key):\n \"\"\" Query the cache for a Zenpy object \"\"\"\n if object_type not in self.mapping or self.disabled:\n return None\n cache = self.mapping[object_type]\n if cache_key in cache:\n log.debug(\"Cache HIT: [%s %s]\" % (object_type.capitalize(), cache_key))\n return cache[cache_key]\n else:\n log.debug('Cache MISS: [%s %s]' % (object_type.capitalize(), cache_key))", "def cache():\n \"\"\"Returns a 304 if an If-Modified-Since header or If-None-Match is present. Returns the same as a GET otherwise.\n ---\n tags:\n - Response inspection\n parameters:\n - in: header\n name: If-Modified-Since\n - in: header\n name: If-None-Match\n produces:\n - application/json\n responses:\n 200:\n description: Cached response\n 304:\n description: Modified\n\n \"\"\"\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)", "def get_cached(self, path, cache_name, **kwargs):\n \"\"\"Request a resource form the API, first checking if there is a cached\n response available. Returns the parsed JSON data.\n \"\"\"\n if gw2api.cache_dir and gw2api.cache_time and cache_name:\n cache_file = os.path.join(gw2api.cache_dir, cache_name)\n if mtime(cache_file) >= time.time() - gw2api.cache_time:\n with open(cache_file, \"r\") as fp:\n tmp = json.load(fp)\n return self.make_response(tmp[\"data\"], tmp[\"meta\"])\n else:\n cache_file = None\n\n meta, data = self._get(path, **kwargs)\n\n if cache_file:\n with open(cache_file, \"w\") as fp:\n json.dump({\"meta\": meta, \"data\": data}, fp, indent=2)\n\n return self.make_response(data, meta)", "def get_object(self, queryset=None):\n \"\"\"\n Implement cache on ``get_object`` method to\n avoid repetitive calls, in POST.\n \"\"\"\n if self._cached_object is None:\n self._cached_object = super(EntryCacheMixin, self).get_object(\n queryset)\n return self._cached_object", "def get_cache(\n self,\n instance,\n translation=None,\n language=None,\n field_name=None,\n field_value=None,\n ):\n \"\"\"\n Returns translation from cache.\n \"\"\"\n is_new = bool(instance.pk is None)\n\n try:\n cached_obj = instance._linguist_translations[field_name][language]\n if not cached_obj.field_name:\n cached_obj.field_name = field_name\n if not cached_obj.language:\n cached_obj.language = language\n if not cached_obj.identifier:\n cached_obj.identifier = self.instance.linguist_identifier\n except KeyError:\n cached_obj = None\n\n if not is_new:\n if translation is None:\n try:\n translation = self.decider.objects.get(\n identifier=self.instance.linguist_identifier,\n object_id=self.instance.pk,\n language=language,\n field_name=field_name,\n )\n except self.decider.DoesNotExist:\n pass\n\n if cached_obj is None:\n if translation is not None:\n cached_obj = CachedTranslation.from_object(translation)\n else:\n cached_obj = CachedTranslation(\n instance=instance,\n language=language,\n field_name=field_name,\n field_value=field_value,\n )\n\n instance._linguist_translations[cached_obj.field_name][\n cached_obj.language\n ] = cached_obj\n\n return cached_obj", "def etag_cache(max_age, check_perms=bool):\n \"\"\"\n A decorator for caching views and handling etag conditional requests.\n\n The decorator adds headers to GET requests that help with caching: Last-\n Modified, Expires and ETag. It also handles conditional requests, when the\n client send an If-Matches header.\n\n If a cache is set, the decorator will cache GET responses, bypassing the\n dataframe serialization. POST requests will still benefit from the\n dataframe cache for requests that produce the same SQL.\n\n \"\"\"\n def decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n # check if the user can access the resource\n check_perms(*args, **kwargs)\n\n # for POST requests we can't set cache headers, use the response\n # cache nor use conditional requests; this will still use the\n # dataframe cache in `superset/viz.py`, though.\n if request.method == 'POST':\n return f(*args, **kwargs)\n\n response = None\n if cache:\n try:\n # build the cache key from the function arguments and any\n # other additional GET arguments (like `form_data`, eg).\n key_args = list(args)\n key_kwargs = kwargs.copy()\n key_kwargs.update(request.args)\n cache_key = wrapper.make_cache_key(f, *key_args, **key_kwargs)\n response = cache.get(cache_key)\n except Exception: # pylint: disable=broad-except\n if app.debug:\n raise\n logging.exception('Exception possibly due to cache backend.')\n\n # if no response was cached, compute it using the wrapped function\n if response is None:\n response = f(*args, **kwargs)\n\n # add headers for caching: Last Modified, Expires and ETag\n response.cache_control.public = True\n response.last_modified = datetime.utcnow()\n expiration = max_age if max_age != 0 else FAR_FUTURE\n response.expires = \\\n response.last_modified + timedelta(seconds=expiration)\n response.add_etag()\n\n # if we have a cache, store the response from the request\n if cache:\n try:\n cache.set(cache_key, response, timeout=max_age)\n except Exception: # pylint: disable=broad-except\n if app.debug:\n raise\n logging.exception('Exception possibly due to cache backend.')\n\n return response.make_conditional(request)\n\n if cache:\n wrapper.uncached = f\n wrapper.cache_timeout = max_age\n wrapper.make_cache_key = \\\n cache._memoize_make_cache_key( # pylint: disable=protected-access\n make_name=None, timeout=max_age)\n\n return wrapper\n\n return decorator" ]
[ 0.7120234370231628, 0.6977940797805786, 0.6969521045684814, 0.6892980933189392, 0.6834937930107117, 0.6795302033424377, 0.6782623529434204, 0.6782003045082092, 0.6768079400062561, 0.6752720475196838, 0.6735942959785461, 0.6715708374977112 ]
Indicated most recent update of the instance, assumption based on: - if currentWorkflow exists, its startedAt time is most recent update. - else max of workflowHistory startedAt is most recent update.
def get_most_recent_update_time(self): """ Indicated most recent update of the instance, assumption based on: - if currentWorkflow exists, its startedAt time is most recent update. - else max of workflowHistory startedAt is most recent update. """ def parse_time(t): if t: return time.gmtime(t/1000) return None try: max_wf_started_at = max([i.get('startedAt') for i in self.workflowHistory]) return parse_time(max_wf_started_at) except ValueError: return None
[ "def _is_projection_updated_instance(self):\n \"\"\"\n This method tries to guess if instance was update since last time.\n If return True, definitely Yes, if False, this means more unknown\n :return: bool\n \"\"\"\n last = self._last_workflow_started_time\n if not self._router.public_api_in_use:\n most_recent = self.get_most_recent_update_time()\n else:\n most_recent = None\n if last and most_recent:\n return last < most_recent\n return False", "public List<CdoSnapshot> getHistoricals(GlobalId globalId, CommitId timePoint, boolean withChildValueObjects, int limit) {\n argumentsAreNotNull(globalId, timePoint);\n\n return delegate.getStateHistory(globalId, QueryParamsBuilder\n .withLimit(limit)\n .withChildValueObjects(withChildValueObjects)\n .toCommitId(timePoint).build());\n }", "@RequestMapping(method = RequestMethod.POST, value = \"/workflowInstance/{woinRefNum}\",\n produces = {MediaType.APPLICATION_JSON_VALUE, MediaType.TEXT_XML_VALUE})\n public ResponseEntity<WorkflowInstanceRestModel> updateIntance( @PathVariable long woinRefNum, @RequestBody UpdateInstanceStatusForm form ){\n WorkflowInstanceState woin = facade.findWorkflowInstance( woinRefNum, true );\n if( woin == null ){\n return new ResponseEntity<>( HttpStatus.NOT_FOUND );\n }\n try{\n if( WorkflowInstanceStatus.ABORT.name().equals( form.getStatus() ) ){\n facade.abortWorkflowInstance( woinRefNum );\n }\n else if( WorkflowInstanceStatus.SUSPENDED.name().equals( form.getStatus() ) ){\n facade.suspendWorkflowInstance( woinRefNum );\n }\n else if( WorkflowInstanceStatus.EXECUTING.name().equals( form.getStatus() ) ){\n facade.resumeWorkflowInstance( woinRefNum );\n }\n else{\n return new ResponseEntity<>( HttpStatus.BAD_REQUEST );\n }\n }\n catch( UnexpectedStatusException e ){\n return new ResponseEntity<>( HttpStatus.CONFLICT );\n }\n woin = facade.findWorkflowInstance( woinRefNum, true );\n return new ResponseEntity<>( createInstanceModel( woin ), HttpStatus.OK );\n }", "def last_update_time(self) -> float:\n \"\"\"The last time at which the report was modified.\"\"\"\n stdout = self.stdout_interceptor\n stderr = self.stderr_interceptor\n\n return max([\n self._last_update_time,\n stdout.last_write_time if stdout else 0,\n stderr.last_write_time if stderr else 0,\n ])", "def get_prev_status_from_history(instance, status=None):\n \"\"\"Returns the previous status of the object. If status is set, returns the\n previous status before the object reached the status passed in.\n If instance has reached the status passed in more than once, only the last\n one is considered.\n \"\"\"\n target = status or api.get_workflow_status_of(instance)\n history = getReviewHistory(instance, reverse=True)\n history = map(lambda event: event[\"review_state\"], history)\n if target not in history or history.index(target) == len(history)-1:\n return None\n return history[history.index(target)+1]", "static function retrieveMostCurrentWorkflowJobByAgeAndStatus($workflow_key, $status, $created_after) {\n $kind_schema = self::getWorkflowJobKind();\n $store = self::store($kind_schema);\n $obsolete_time = new \\DateTime($created_after);\n $where = [\n \"workflow_key\" => $workflow_key,\n \"status\" => $status,\n \"obsolete_time\" => $obsolete_time\n ];\n syslog(LOG_INFO, __METHOD__ . \" with vars \" . json_encode($where));\n $str_query = \"SELECT * FROM $kind_schema \n WHERE workflow_key = @workflow_key AND status = @status AND created > @obsolete_time ORDER BY created DESC\";\n $result = $store->fetchOne($str_query, $where);\n if ($result) {\n return $result->getData();\n } else {\n return false;\n }\n }", "def most_recent(self):\n \"\"\"\n Returns the most recent copy of the instance available in the history.\n \"\"\"\n if not self.instance:\n raise TypeError(\n \"Can't use most_recent() without a {} instance.\".format(\n self.model._meta.object_name\n )\n )\n tmp = []\n excluded_fields = getattr(self.model, \"_history_excluded_fields\", [])\n\n for field in self.instance._meta.fields:\n if field.name in excluded_fields:\n continue\n if isinstance(field, models.ForeignKey):\n tmp.append(field.name + \"_id\")\n else:\n tmp.append(field.name)\n fields = tuple(tmp)\n try:\n values = self.get_queryset().values_list(*fields)[0]\n except IndexError:\n raise self.instance.DoesNotExist(\n \"%s has no historical record.\" % self.instance._meta.object_name\n )\n return self.instance.__class__(*values)", "public void fireHistoricActivityInstanceUpdate() {\n ProcessEngineConfigurationImpl configuration = Context.getProcessEngineConfiguration();\n HistoryLevel historyLevel = configuration.getHistoryLevel();\n if (historyLevel.isHistoryEventProduced(HistoryEventTypes.ACTIVITY_INSTANCE_UPDATE, this)) {\n // publish update event for current activity instance (containing the id\n // of the sub process/case)\n HistoryEventProcessor.processHistoryEvents(new HistoryEventProcessor.HistoryEventCreator() {\n @Override\n public HistoryEvent createHistoryEvent(HistoryEventProducer producer) {\n return producer.createActivityInstanceUpdateEvt(ExecutionEntity.this);\n }\n });\n }\n }", "public Calendar getLastUpdateTime() {\n if (null != lastUpdateTime && lastUpdateTime.after(Calendar.getInstance())) {\n LOG.warn(\"Last update time newer than now {} for {}\", lastUpdateTime, owner);\n return null; // Something's broken - we don't know\n }\n return lastUpdateTime;\n }", "public void setHistoryMaxSize(int historyMaxSize) {\n synchronized (mInstanceLock) {\n if (mHistoryMaxSize == historyMaxSize) {\n return;\n }\n mHistoryMaxSize = historyMaxSize;\n pruneExcessiveHistoricalRecordsIfNeeded();\n if (sortActivitiesIfNeeded()) {\n notifyChanged();\n }\n }\n }", "protected final List<HistoricVariableUpdate> getDescendingVariableUpdates(final String processInstanceId, final String processVariableName) {\n\n final List<HistoricVariableUpdate> result = new ArrayList<HistoricVariableUpdate>();\n\n final List<HistoricDetail> historicDetails = getHistoryService().createHistoricDetailQuery().variableUpdates().processInstanceId(processInstanceId)\n .orderByVariableName().asc().orderByTime().desc().list();\n\n boolean reachedTargetVariable = false;\n\n if (historicDetails != null && !historicDetails.isEmpty()) {\n for (final HistoricDetail historicDetail : historicDetails) {\n\n if (historicDetail != null && historicDetail instanceof HistoricVariableUpdate) {\n final HistoricVariableUpdate historicVariableUpdate = (HistoricVariableUpdate) historicDetail;\n final boolean isForTarget = isHistoricVariableUpdateForTargetVariable(processVariableName, historicVariableUpdate);\n\n if (isForTarget && !reachedTargetVariable) {\n reachedTargetVariable = true;\n result.add(historicVariableUpdate);\n } else if (isForTarget) {\n result.add(historicVariableUpdate);\n } else if (reachedTargetVariable) {\n break;\n }\n }\n }\n }\n\n return result;\n }", "private String insertOrUpdateWorkflow(Workflow workflow, boolean update) {\n\t\tPreconditions.checkNotNull(workflow, \"workflow object cannot be null\");\n\n\t\tif (workflow.getStatus().isTerminal()) {\n\t\t\tworkflow.setEndTime(System.currentTimeMillis());\n\t\t}\n\t\tList<Task> tasks = workflow.getTasks();\n\t\tworkflow.setTasks(new LinkedList<>());\n\n\t\tString payload = toJson(workflow);\n\t\t// Store the workflow object\n\t\tdynoClient.set(nsKey(WORKFLOW, workflow.getWorkflowId()), payload);\n\t\trecordRedisDaoRequests(\"storeWorkflow\", \"n/a\", workflow.getWorkflowName());\n\t\trecordRedisDaoPayloadSize(\"storeWorkflow\", payload.length(), \"n/a\", workflow.getWorkflowName());\n\t\tif (!update) {\n\t\t\t// Add to list of workflows for a workflowdef\n\t\t\tString key = nsKey(WORKFLOW_DEF_TO_WORKFLOWS, workflow.getWorkflowName(), dateStr(workflow.getCreateTime()));\n\t\t\tdynoClient.sadd(key, workflow.getWorkflowId());\n\t\t\tif (workflow.getCorrelationId() != null) {\n\t\t\t\t// Add to list of workflows for a correlationId\n\t\t\t\tdynoClient.sadd(nsKey(CORR_ID_TO_WORKFLOWS, workflow.getCorrelationId()), workflow.getWorkflowId());\n\t\t\t}\n\t\t}\n\t\t// Add or remove from the pending workflows\n\t\tif (workflow.getStatus().isTerminal()) {\n\t\t\tdynoClient.srem(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId());\n\t\t} else {\n\t\t\tdynoClient.sadd(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId());\n\t\t}\n\n\t\tworkflow.setTasks(tasks);\n\t\treturn workflow.getWorkflowId();\n\t}" ]
[ 0.7245467305183411, 0.6743656396865845, 0.6735358238220215, 0.67184978723526, 0.6716688871383667, 0.6702247262001038, 0.6640412211418152, 0.661932110786438, 0.6618991494178772, 0.6616625189781189, 0.6594071984291077, 0.6588082909584045 ]
This method tries to guess if instance was update since last time. If return True, definitely Yes, if False, this means more unknown :return: bool
def _is_projection_updated_instance(self): """ This method tries to guess if instance was update since last time. If return True, definitely Yes, if False, this means more unknown :return: bool """ last = self._last_workflow_started_time if not self._router.public_api_in_use: most_recent = self.get_most_recent_update_time() else: most_recent = None if last and most_recent: return last < most_recent return False
[ "def is_updated(self):\n \"\"\"\n Checks if a resource has been updated since last publish.\n Returns False if resource has not been published before.\n \"\"\"\n\n if not self.is_published:\n return False\n\n return sanitize_date(self.sys['published_at']) < sanitize_date(self.sys['updated_at'])", "def has_update(self):\n \"\"\"Depending on the interval:\n\n returns True if its time for an update,\n returns False if its not yet time for an update\n \"\"\"\n _time = time\n if _time() > self.next_update:\n self.update_data()\n self.next_update = _time() + self.interval\n return True\n return False", "def _has_changed(instance):\n \"\"\"\n Check if some tracked fields have changed\n \"\"\"\n for field, value in instance._original_fields.items():\n if field != 'pk' and \\\n not isinstance(instance._meta.get_field(field), ManyToManyField):\n try:\n if field in getattr(instance, '_tracked_fields', []):\n if isinstance(instance._meta.get_field(field), ForeignKey):\n if getattr(instance, '{0}_id'.format(field)) != value:\n return True\n else:\n if getattr(instance, field) != value:\n return True\n except TypeError:\n # Can't compare old and new value, should be different.\n return True\n return False", "def was_modified_since_last_sync(self):\n \"\"\"Return True if this resource was modified since last sync.\n\n None is returned if we don't know (because of missing meta data).\n \"\"\"\n info = self.get_sync_info()\n if not info:\n return None\n if self.size != info[\"s\"]:\n return True\n if self.mtime > info[\"m\"]:\n return True\n return False", "def is_training_modified(self):\n \"\"\" Returns `True` if training data\n was modified since last training.\n Returns `False` otherwise,\n or if using builtin training data.\n \"\"\"\n\n last_modified = self.trainer.get_last_modified()\n if last_modified > self.training_timestamp:\n return True\n else:\n return False", "def get_most_recent_update_time(self):\n \"\"\"\n Indicated most recent update of the instance, assumption based on:\n - if currentWorkflow exists, its startedAt time is most recent update.\n - else max of workflowHistory startedAt is most recent update.\n \"\"\"\n def parse_time(t):\n if t:\n return time.gmtime(t/1000)\n return None\n try:\n max_wf_started_at = max([i.get('startedAt') for i in self.workflowHistory])\n return parse_time(max_wf_started_at)\n except ValueError:\n return None", "def is_old(self):\n \"\"\"\n Is the object cached for too long, so it should be redownloaded?\n\n See :attr:`.DB_MAX_WAIT_TIME` and :attr:`.DB_CACHE_TIME` for details.\n\n Returns:\n bool: True if it is.\n \"\"\"\n if not self.processing_started_ts:\n return True\n\n if self.processing_ended_ts:\n return self.processing_ended_ts + DB_CACHE_TIME < time.time()\n\n # in case that processing started, but didn't ended in\n # DB_MAX_WAIT_TIME\n expected_end_ts = self.creation_ts + DB_MAX_WAIT_TIME\n if expected_end_ts < time.time():\n logger.error(\"Prosessing timeouted and properites were not set!\")\n\n return expected_end_ts < time.time()", "def isModified(self):\n \"\"\"Check if either the datastream content or profile fields have changed\n and should be saved to Fedora.\n\n :rtype: boolean\n \"\"\"\n # NOTE: only check content digest if locally cached content is set\n # (content already pulled or new content set); otherwise this\n # results in pulling content down to checksum it !\n return self.info_modified or \\\n self._content and self._content_digest() != self.digest", "def up_to_date(self):\n \"\"\" Returns True if plugin posts are up to date\n\n Determined by self.updated and conf.GOSCALE_POSTS_UPDATE_FREQUENCY\n \"\"\"\n# return False\n if not self.updated:\n return False\n return (utils.get_datetime_now() - self.updated).seconds < conf.GOSCALE_POSTS_UPDATE_FREQUENCY", "def hasUnsavedChanges(self):\n \"\"\" Determine if there are any edits in the GUI that have not yet been\n saved (e.g. to a file). \"\"\"\n\n # Sanity check - this case shouldn't occur\n assert self._lastSavedState is not None, \\\n \"BUG: Please report this as it should never occur.\"\n\n # Force the current GUI values into our model in memory, but don't\n # change anything. Don't save to file, don't even convert bad\n # values to their previous state in the gui. Note that this can\n # leave the GUI in a half-saved state, but since we are about to exit\n # this is OK. We only want prompting to occur if they decide to save.\n badList = self.checkSetSaveEntries(doSave=False, fleeOnBadVals=True,\n allowGuiChanges=False)\n if badList:\n return True\n\n # Then compare our data to the last known saved state. MAKE SURE\n # the LHS is the actual dict (and not 'self') to invoke the dict\n # comparison only.\n return self._lastSavedState != self._taskParsObj", "def config_updated_since(self, sentry_unit, filename, mtime,\n sleep_time=20, retry_count=30,\n retry_sleep_time=10):\n \"\"\"Check if file was modified after a given time.\n\n Args:\n sentry_unit (sentry): The sentry unit to check the file mtime on\n filename (string): The file to check mtime of\n mtime (float): The epoch time to check against\n sleep_time (int): Initial sleep time (s) before looking for file\n retry_sleep_time (int): Time (s) to sleep between retries\n retry_count (int): If file is not found, how many times to retry\n\n Returns:\n bool: True if file was modified more recently than mtime, False if\n file was modified before mtime, or if file not found.\n \"\"\"\n unit_name = sentry_unit.info['unit_name']\n self.log.debug('Checking that %s updated since %s on '\n '%s' % (filename, mtime, unit_name))\n time.sleep(sleep_time)\n file_mtime = None\n tries = 0\n while tries <= retry_count and not file_mtime:\n try:\n file_mtime = self._get_file_mtime(sentry_unit, filename)\n self.log.debug('Attempt {} to get {} file mtime on {} '\n 'OK'.format(tries, filename, unit_name))\n except IOError as e:\n # NOTE(beisner) - race avoidance, file may not exist yet.\n # https://bugs.launchpad.net/charm-helpers/+bug/1474030\n self.log.debug('Attempt {} to get {} file mtime on {} '\n 'failed\\n{}'.format(tries, filename,\n unit_name, e))\n time.sleep(retry_sleep_time)\n tries += 1\n\n if not file_mtime:\n self.log.warn('Could not determine file mtime, assuming '\n 'file does not exist')\n return False\n\n if file_mtime >= mtime:\n self.log.debug('File mtime is newer than provided mtime '\n '(%s >= %s) on %s (OK)' % (file_mtime,\n mtime, unit_name))\n return True\n else:\n self.log.warn('File mtime is older than provided mtime'\n '(%s < on %s) on %s' % (file_mtime,\n mtime, unit_name))\n return False", "def _is_updated(old_conf, new_conf):\n '''\n Compare the API results to the current statefile data\n '''\n changed = {}\n\n # Dirty json hacking to get parameters in the same format\n new_conf = _json_to_unicode(salt.utils.json.loads(\n salt.utils.json.dumps(new_conf, ensure_ascii=False)))\n old_conf = salt.utils.json.loads(salt.utils.json.dumps(old_conf, ensure_ascii=False))\n\n for key, value in old_conf.items():\n oldval = six.text_type(value).lower()\n if key in new_conf:\n newval = six.text_type(new_conf[key]).lower()\n if oldval == 'null' or oldval == 'none':\n oldval = ''\n if key in new_conf and newval != oldval:\n changed[key] = {'old': oldval, 'new': newval}\n return changed" ]
[ 0.7202460169792175, 0.7186499834060669, 0.7087157964706421, 0.7016710638999939, 0.7003849148750305, 0.6996637582778931, 0.6942580342292786, 0.691415548324585, 0.6844378709793091, 0.6832665205001831, 0.6798832416534424, 0.6771925091743469 ]
Find regexp in activitylog find record as if type are in description.
def find(self, item, description='', event_type=''): """ Find regexp in activitylog find record as if type are in description. """ # TODO: should be refactored, dumb logic if ': ' in item: splited = item.split(': ', 1) if splited[0] in self.TYPES: description = item.split(': ')[1] event_type = item.split(': ')[0] else: description = item else: if not description: description = item if event_type: found = [x['time'] for x in self.log if re.search(description, x['description']) and x['eventTypeText'] == event_type] else: found = [x['time'] for x in self.log if re.search(description, x['description'])] if len(found): return found raise exceptions.NotFoundError("Item '{}' is not found with (description='{}', event_type='{}')". format(item, description, event_type))
[ "def _regexp(expr, item):\n ''' REGEXP function for Sqlite\n '''\n reg = re.compile(expr)\n return reg.search(item) is not None", "def filter(self, record):\n \"\"\"\n Returns True if the record shall be logged. False otherwise.\n\n https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L607\n \"\"\"\n found = self._pattern.search(record.getMessage())\n return not found", "def search_regexp(self):\n \"\"\"\n Define the regexp used for the search\n \"\"\"\n if ((self.season == \"\") and (self.episode == \"\")):\n # Find serie\n try:\n print(\"%s has %s seasons (the serie is %s)\" % (self.tvdb.data['seriesname'], self.tvdb.get_season_number(), self.tvdb.data['status'].lower()))\n # print self.tvdb.data\n except:\n pass\n regexp = '^%s.*' % self.title.lower()\n elif (self.episode == \"\"):\n # Find season\n try:\n print(\"%s has %s episodes in season %s\" % (self.tvdb.data['seriesname'], self.tvdb.get_episode_number(int(self.season)), self.season))\n except:\n pass\n regexp = '^%s.*(s[0]*%s|season[\\s\\_\\-\\.]*%s).*' % (self.title.lower(), self.season, self.season)\n else:\n # Find season and episode\n try:\n print(\"%s S%sE%s name is \\\"%s\\\"\" % (self.tvdb.data['seriesname'], self.season, self.episode, self.tvdb.get_episode(int(self.season), int(self.episode))['episodename']))\n except:\n pass\n regexp = '^%s.*((s[0]*%s.*e[0]*%s)|[0]*%sx[0]*%s).*' % (self.title.lower(), self.season, self.episode, self.season, self.episode)\n return regexp", "def find_profile(self, bitarray, eep_rorg, rorg_func, rorg_type, direction=None, command=None):\n ''' Find profile and data description, matching RORG, FUNC and TYPE '''\n if not self.init_ok:\n self.logger.warn('EEP.xml not loaded!')\n return None\n\n if eep_rorg not in self.telegrams.keys():\n self.logger.warn('Cannot find rorg in EEP!')\n return None\n\n if rorg_func not in self.telegrams[eep_rorg].keys():\n self.logger.warn('Cannot find func in EEP!')\n return None\n\n if rorg_type not in self.telegrams[eep_rorg][rorg_func].keys():\n self.logger.warn('Cannot find type in EEP!')\n return None\n\n profile = self.telegrams[eep_rorg][rorg_func][rorg_type]\n\n if command:\n # multiple commands can be defined, with the command id always in same location (per RORG-FUNC-TYPE).\n eep_command = profile.find('command', recursive=False)\n # If commands are not set in EEP, or command is None,\n # get the first data as a \"best guess\".\n if not eep_command:\n return profile.find('data', recursive=False)\n\n # If eep_command is defined, so should be data.command\n return profile.find('data', {'command': str(command)}, recursive=False)\n\n # extract data description\n # the direction tag is optional\n if direction is None:\n return profile.find('data', recursive=False)\n return profile.find('data', {'direction': direction}, recursive=False)", "protected String findAll(String strPattern, String text, int grp,\n String separator) {\n\n String retval = \"\";\n boolean firstTime = true;\n\n Pattern pattern = Pattern.compile(strPattern);\n Matcher matcher = pattern.matcher(text);\n\n while (matcher.find()) {\n retval += (firstTime ? \"\" : separator) + matcher.group(grp);\n firstTime = false;\n }\n\n return retval;\n }", "def rdf_find_subject(id)\n return nil unless rdf_valid?\n rdf.each_subject do |s|\n return s if s.to_s =~ Regexp.new(id, Regexp::IGNORECASE)\n end\n nil\n end", "def find_record(relation = nil)\n if locate_id.nil? || (locate_id.is_a?(::Numeric) && locate_id == 0) || (locate_id.to_s == '')\n return -1\n end\n\n dataset = load_records(relation, false)\n return -1 if dataset.blank?\n\n first_item = dataset.first\n klass = first_item.class\n\n id_field = klass.respond_to?('primary_key') ? klass.primary_key : nil\n id_field ||= first_item.respond_to?('id') ? 'id' : nil\n\n return -1 unless id_field\n if locate_id.is_a?(::Numeric)\n dataset.index{|item| item.send(id_field) == locate_id} || -1\n else\n loc_id = locate_id.to_s.downcase\n dataset.index{|item| item.send(id_field).to_s.downcase == loc_id} || -1\n end\n\n end", "def rex_search(self, regexp, flags=0, byte=False, default=NULL):\n \"\"\"\n Search the regular expression in response body.\n\n :param byte: if False then search is performed in\n `response.unicode_body()` else the rex is searched in `response.body`.\n\n Note: if you use default non-byte mode than do not forget to build your\n regular expression with re.U flag.\n\n Return found match object or None\n\n \"\"\"\n\n regexp = normalize_regexp(regexp, flags)\n match = None\n if byte:\n if not isinstance(regexp.pattern, six.text_type) or not six.PY3:\n # if six.PY3:\n # body = self.body_as_bytes()\n # else:\n # body = self.body\n match = regexp.search(self.body)\n else:\n if isinstance(regexp.pattern, six.text_type) or not six.PY3:\n ubody = self.unicode_body()\n match = regexp.search(ubody)\n if match:\n return match\n else:\n if default is NULL:\n raise DataNotFound('Could not find regexp: %s' % regexp)\n else:\n return default", "def rex_text(self, regexp, flags=0, byte=False, default=NULL):\n \"\"\"\n Search regular expression in response body and return content of first\n matching group.\n\n :param byte: if False then search is performed in\n `response.unicode_body()` else the rex is searched in `response.body`.\n \"\"\"\n\n # pylint: disable=no-member\n try:\n match = self.rex_search(regexp, flags=flags, byte=byte)\n except DataNotFound:\n if default is NULL:\n raise DataNotFound('Regexp not found')\n else:\n return default\n else:\n return normalize_space(decode_entities(match.group(1)))", "public void logMatchesFound(String regex){\n\t\tif (uniqueTextViews.size() > 0) {\n\t\t\tLog.d(LOG_TAG, \" There are only \" + uniqueTextViews.size() + \" matches of '\" + regex + \"'\");\n\t\t}\n\t\telse if(webElements.size() > 0){\n\t\t\tLog.d(LOG_TAG, \" There are only \" + webElements.size() + \" matches of '\" + regex + \"'\");\n\t\t}\n\t\tuniqueTextViews.clear();\n\t\twebElements.clear();\n\t}", "public static String getRowKeyUIDRegex(\n final List<byte[]> group_bys, \n final ByteMap<byte[][]> row_key_literals, \n final boolean explicit_tags,\n final byte[] fuzzy_key, \n final byte[] fuzzy_mask) {\n if (group_bys != null) {\n Collections.sort(group_bys, Bytes.MEMCMP);\n }\n final int prefix_width = Const.SALT_WIDTH() + TSDB.metrics_width() + \n Const.TIMESTAMP_BYTES;\n final short name_width = TSDB.tagk_width();\n final short value_width = TSDB.tagv_width();\n final short tagsize = (short) (name_width + value_width);\n // Generate a regexp for our tags. Say we have 2 tags: { 0 0 1 0 0 2 }\n // and { 4 5 6 9 8 7 }, the regexp will be:\n // \"^.{7}(?:.{6})*\\\\Q\\000\\000\\001\\000\\000\\002\\\\E(?:.{6})*\\\\Q\\004\\005\\006\\011\\010\\007\\\\E(?:.{6})*$\"\n final StringBuilder buf = new StringBuilder(\n 15 // \"^.{N}\" + \"(?:.{M})*\" + \"$\"\n + ((13 + tagsize) // \"(?:.{M})*\\\\Q\" + tagsize bytes + \"\\\\E\"\n * ((row_key_literals == null ? 0 : row_key_literals.size()) + \n (group_bys == null ? 0 : group_bys.size() * 3))));\n // In order to avoid re-allocations, reserve a bit more w/ groups ^^^\n\n // Alright, let's build this regexp. From the beginning...\n buf.append(\"(?s)\" // Ensure we use the DOTALL flag.\n + \"^.{\")\n // ... start by skipping the salt, metric ID and timestamp.\n .append(Const.SALT_WIDTH() + TSDB.metrics_width() + Const.TIMESTAMP_BYTES)\n .append(\"}\");\n\n final Iterator<Entry<byte[], byte[][]>> it = row_key_literals == null ? \n new ByteMap<byte[][]>().iterator() : row_key_literals.iterator();\n int fuzzy_offset = Const.SALT_WIDTH() + TSDB.metrics_width();\n if (fuzzy_mask != null) {\n // make sure to skip the timestamp when scanning\n while (fuzzy_offset < prefix_width) {\n fuzzy_mask[fuzzy_offset++] = 1;\n }\n }\n \n while(it.hasNext()) {\n Entry<byte[], byte[][]> entry = it.hasNext() ? it.next() : null;\n // TODO - This look ahead may be expensive. We need to get some data around\n // whether it's faster for HBase to scan with a look ahead or simply pass\n // the rows back to the TSD for filtering.\n final boolean not_key = \n entry.getValue() != null && entry.getValue().length == 0;\n \n // Skip any number of tags.\n if (!explicit_tags) {\n buf.append(\"(?:.{\").append(tagsize).append(\"})*\");\n } else if (fuzzy_mask != null) {\n // TODO - see if we can figure out how to improve the fuzzy filter by\n // setting explicit tag values whenever we can. In testing there was\n // a conflict between the row key regex and fuzzy filter that prevented\n // results from returning properly.\n System.arraycopy(entry.getKey(), 0, fuzzy_key, fuzzy_offset, name_width);\n fuzzy_offset += name_width;\n for (int i = 0; i < value_width; i++) {\n fuzzy_mask[fuzzy_offset++] = 1;\n }\n }\n if (not_key) {\n // start the lookahead as we have a key we explicitly do not want in the\n // results\n buf.append(\"(?!\");\n }\n buf.append(\"\\\\Q\");\n \n addId(buf, entry.getKey(), true);\n if (entry.getValue() != null && entry.getValue().length > 0) { // Add a group_by.\n // We want specific IDs. List them: /(AAA|BBB|CCC|..)/\n buf.append(\"(?:\");\n for (final byte[] value_id : entry.getValue()) {\n if (value_id == null) {\n continue;\n }\n buf.append(\"\\\\Q\");\n addId(buf, value_id, true);\n buf.append('|');\n }\n // Replace the pipe of the last iteration.\n buf.setCharAt(buf.length() - 1, ')');\n } else {\n buf.append(\".{\").append(value_width).append('}'); // Any value ID.\n }\n \n if (not_key) {\n // be sure to close off the look ahead\n buf.append(\")\");\n }\n }\n // Skip any number of tags before the end.\n if (!explicit_tags) {\n buf.append(\"(?:.{\").append(tagsize).append(\"})*\");\n }\n buf.append(\"$\");\n return buf.toString();\n }", "def find_by_activity(activity_uri, include_invalidated = false)\n raise ArgumentError, 'activity_uri must be an RDF::URI' unless\n activity_uri.respond_to? :to_term\n query = SPARQL_CLIENT.select(:record)\n .where([:record,\n [RDF::PROV.wasGeneratedBy, '|', RDF::DPLA.wasRevisedBy],\n activity_uri.to_term])\n\n if include_invalidated\n query\n else\n # We need to say \"and if RDF::PROV.invalidatedAtTime is not set.\"\n #\n # The SPARQL query should be:\n #\n # PREFIX prov: <http://www.w3.org/ns/prov#>\n # SELECT * WHERE {\n # ?subject prov:wasGeneratedBy <http://xampl.org/ldp/activity/n> .\n # FILTER NOT EXISTS { ?subject prov:invalidatedAtTime ?x }\n # }\n #\n # ... However there doesn't appear to be a way of constructing\n # 'FILTER NOT EXISTS' with SPARQL::Client. Instead, we've managed to\n # hack the following solution together.\n #\n # SPARQL::Client#filter is labeled @private in its YARD comment (and\n # has no other documentation) but it's not private, at least for\n # now.\n query.filter \\\n 'NOT EXISTS ' \\\n '{ ?record <http://www.w3.org/ns/prov#invalidatedAtTime> ?x }'\n end\n end" ]
[ 0.6934164762496948, 0.6788412928581238, 0.6724658012390137, 0.6695852279663086, 0.6676204800605774, 0.6655965447425842, 0.6647224426269531, 0.6624881029129028, 0.6604191064834595, 0.6596809029579163, 0.6591915488243103, 0.6542932987213135 ]
Currently a small stub to create an instance of Checker for the passed ``infile`` and run its test functions through linting. Args: infile Returns: int: Number of flake8 errors raised.
def do_command_line(infile: typing.IO[str]) -> int: """ Currently a small stub to create an instance of Checker for the passed ``infile`` and run its test functions through linting. Args: infile Returns: int: Number of flake8 errors raised. """ lines = infile.readlines() tree = ast.parse(''.join(lines)) checker = Checker(tree, lines, infile.name) checker.load() errors = [] # type: typing.List[AAAError] for func in checker.all_funcs(skip_noqa=True): try: errors = list(func.check_all()) except ValidationError as error: errors = [error.to_aaa()] print(func.__str__(errors), end='') return len(errors)
[ "def check(codeString, filename, reporter=modReporter.Default, settings_path=None, **setting_overrides):\n \"\"\"Check the Python source given by codeString for unfrosted flakes.\"\"\"\n\n if not settings_path and filename:\n settings_path = os.path.dirname(os.path.abspath(filename))\n settings_path = settings_path or os.getcwd()\n\n active_settings = settings.from_path(settings_path).copy()\n for key, value in itemsview(setting_overrides):\n access_key = key.replace('not_', '').lower()\n if type(active_settings.get(access_key)) in (list, tuple):\n if key.startswith('not_'):\n active_settings[access_key] = list(set(active_settings[access_key]).difference(value))\n else:\n active_settings[access_key] = list(set(active_settings[access_key]).union(value))\n else:\n active_settings[key] = value\n active_settings.update(setting_overrides)\n\n if _should_skip(filename, active_settings.get('skip', [])):\n if active_settings.get('directly_being_checked', None) == 1:\n reporter.flake(FileSkipped(filename))\n return 1\n elif active_settings.get('verbose', False):\n ignore = active_settings.get('ignore_frosted_errors', [])\n if(not \"W200\" in ignore and not \"W201\" in ignore):\n reporter.flake(FileSkipped(filename, None, verbose=active_settings.get('verbose')))\n return 0\n\n # First, compile into an AST and handle syntax errors.\n try:\n tree = compile(codeString, filename, \"exec\", _ast.PyCF_ONLY_AST)\n except SyntaxError:\n value = sys.exc_info()[1]\n msg = value.args[0]\n\n (lineno, offset, text) = value.lineno, value.offset, value.text\n\n # If there's an encoding problem with the file, the text is None.\n if text is None:\n # Avoid using msg, since for the only known case, it contains a\n # bogus message that claims the encoding the file declared was\n # unknown.\n reporter.unexpected_error(filename, 'problem decoding source')\n else:\n reporter.flake(PythonSyntaxError(filename, msg, lineno, offset, text,\n verbose=active_settings.get('verbose')))\n return 1\n except Exception:\n reporter.unexpected_error(filename, 'problem decoding source')\n return 1\n # Okay, it's syntactically valid. Now check it.\n w = checker.Checker(tree, filename, None, ignore_lines=_noqa_lines(codeString), **active_settings)\n w.messages.sort(key=lambda m: m.lineno)\n for warning in w.messages:\n reporter.flake(warning)\n return len(w.messages)", "def _lint():\n \"\"\"Run lint and return an exit code.\"\"\"\n # Flake8 doesn't have an easy way to run checks using a Python function, so\n # just fork off another process to do it.\n\n # Python 3 compat:\n # - The result of subprocess call outputs are byte strings, meaning we need\n # to pass a byte string to endswith.\n project_python_files = [filename for filename in get_project_files()\n if filename.endswith(b'.py')]\n retcode = subprocess.call(\n ['flake8', '--max-complexity=10'] + project_python_files)\n if retcode == 0:\n print_success_message('No style errors')\n return retcode", "def lint(input_file, debug=False):\n \"\"\"Run the linter on the input file.\"\"\"\n options = load_options()\n\n if isinstance(input_file, string_types):\n text = input_file\n else:\n text = input_file.read()\n\n # Get the checks.\n checks = get_checks(options)\n\n # Apply all the checks.\n errors = []\n for check in checks:\n\n result = check(text)\n\n for error in result:\n (start, end, check, message, replacements) = error\n (line, column) = line_and_column(text, start)\n if not is_quoted(start, text):\n errors += [(check, message, line, column, start, end,\n end - start, \"warning\", replacements)]\n\n if len(errors) > options[\"max_errors\"]:\n break\n\n # Sort the errors by line and column number.\n errors = sorted(errors[:options[\"max_errors\"]], key=lambda e: (e[2], e[3]))\n\n return errors", "def run(files, temp_folder):\n \"Check flake8 errors in the code base.\"\n try:\n import flake8 # NOQA\n except ImportError:\n return NO_FLAKE_MSG\n try:\n from flake8.engine import get_style_guide\n except ImportError:\n # We're on a new version of flake8\n from flake8.api.legacy import get_style_guide\n\n py_files = filter_python_files(files)\n if not py_files:\n return\n DEFAULT_CONFIG = join(temp_folder, get_config_file())\n\n with change_folder(temp_folder):\n flake8_style = get_style_guide(config_file=DEFAULT_CONFIG)\n out, err = StringIO(), StringIO()\n with redirected(out, err):\n flake8_style.check_files(py_files)\n return out.getvalue().strip() + err.getvalue().strip()", "def run():\n \"\"\"\n Runs flake8 lint\n\n :return:\n A bool - if flake8 did not find any errors\n \"\"\"\n\n print('Running flake8 %s' % flake8.__version__)\n\n flake8_style = get_style_guide(config_file=os.path.join(package_root, 'tox.ini'))\n\n paths = []\n for _dir in [package_name, 'dev', 'tests']:\n for root, _, filenames in os.walk(_dir):\n for filename in filenames:\n if not filename.endswith('.py'):\n continue\n paths.append(os.path.join(root, filename))\n report = flake8_style.check_files(paths)\n success = report.total_errors == 0\n if success:\n print('OK')\n return success", "def lint(args):\n \"\"\"Run lint checks using flake8.\"\"\"\n application = get_current_application()\n if not args:\n args = [application.name, 'tests']\n args = ['flake8'] + list(args)\n run.main(args, standalone_mode=False)", "def lint(filename):\n \"\"\"Lints an INI file, returning 0 in case of success.\"\"\"\n config = ConfigParser.ConfigParser()\n try:\n config.read(filename)\n return 0\n except ConfigParser.Error as error:\n print('Error: %s' % error)\n return 1\n except:\n print('Unexpected Error')\n return 2", "def input_file(filename):\n \"\"\"\n Run all checks on a Python source file.\n \"\"\"\n if excluded(filename) or not filename_match(filename):\n return {}\n if options.verbose:\n message('checking ' + filename)\n options.counters['files'] = options.counters.get('files', 0) + 1\n errors = Checker(filename).check_all()\n if options.testsuite and not errors:\n message(\"%s: %s\" % (filename, \"no errors found\"))\n return errors", "def run(self) -> Generator[Tuple[int, int, str, type], None, None]:\n \"\"\"\n Yields:\n tuple (line_number: int, offset: int, text: str, check: type)\n \"\"\"\n if is_test_file(self.filename):\n self.load()\n for func in self.all_funcs():\n try:\n for error in func.check_all():\n yield (error.line_number, error.offset, error.text, Checker)\n except ValidationError as error:\n yield error.to_flake8(Checker)", "def check(codeString, filename):\n \"\"\"\n Check the Python source given by C{codeString} for flakes.\n\n @param codeString: The Python source to check.\n @type codeString: C{str}\n\n @param filename: The name of the file the source came from, used to report\n errors.\n @type filename: C{str}\n\n @return: The number of warnings emitted.\n @rtype: C{int}\n \"\"\"\n # First, compile into an AST and handle syntax errors.\n try:\n tree = compile(codeString, filename, \"exec\", ast.PyCF_ONLY_AST)\n except SyntaxError, value:\n msg = value.args[0]\n\n (lineno, offset, text) = value.lineno, value.offset, value.text\n\n # If there's an encoding problem with the file, the text is None.\n if text is None:\n # Avoid using msg, since for the only known case, it contains a\n # bogus message that claims the encoding the file declared was\n # unknown.\n sys.stderr.write(\"%s: problem decoding source\\n\" % (filename, ))\n else:\n line = text.splitlines()[-1]\n\n if offset is not None:\n offset = offset - (len(text) - len(line))\n\n sys.stderr.write('%s:%d: %s' % (filename, lineno, msg))\n sys.stderr.write(line + '\\n')\n\n if offset is not None:\n sys.stderr.write(\" \" * offset + \"^\\n\")\n\n return 1\n else:\n # Okay, it's syntactically valid. Now check it.\n w = checker.Checker(tree, filename)\n lines = codeString.split('\\n')\n messages = [message for message in w.messages\n if lines[message.lineno - 1].find('pyflakes:ignore') < 0]\n messages.sort(lambda a, b: cmp(a.lineno, b.lineno))\n false_positives = 0\n for warning in messages:\n if not (re.match('.*__init__.py', str(warning))\n and isinstance(warning, (UnusedImport, ImportStarUsed))):\n print(warning)\n else:\n false_positives += 1\n return len(messages) - false_positives", "def input_file(self, filename, lines=None, expected=None, line_offset=0):\n \"\"\"Run all checks on a Python source file.\"\"\"\n if self.options.verbose:\n print('checking %s' % filename)\n fchecker = self.checker_class(\n filename, lines=lines, options=self.options)\n return fchecker.check_all(expected=expected, line_offset=line_offset)", "def do_check_pep8(files, status):\n \"\"\"\n Run the python pep8 tool against the filst of supplied files.\n Append any linting errors to the returned status list\n\n Args:\n files (str): list of files to run pep8 against\n status (list): list of pre-receive check failures to eventually print\n to the user\n\n Returns:\n status list of current pre-redeive check failures. Might be an empty\n list.\n \"\"\"\n for file_name in files:\n\n args = ['flake8', '--max-line-length=120', '{0}'.format(file_name)]\n output = run(*args)\n\n if output:\n status.append(\"Python PEP8/Flake8: {0}: {1}\".format(file_name,\n output))\n\n return status" ]
[ 0.7110608220100403, 0.7024708390235901, 0.6943210363388062, 0.6942464113235474, 0.6909804940223694, 0.6906939148902893, 0.6896222829818726, 0.6877042651176453, 0.6864295601844788, 0.6849697828292847, 0.6821961402893066, 0.6819117665290833 ]
finds the appropriate properties (spec) of a module, and sets its loader.
def find_spec(self, fullname, path, target=None): '''finds the appropriate properties (spec) of a module, and sets its loader.''' if not path: path = [os.getcwd()] if "." in fullname: name = fullname.split(".")[-1] else: name = fullname for entry in path: if os.path.isdir(os.path.join(entry, name)): # this module has child modules filename = os.path.join(entry, name, "__init__.py") submodule_locations = [os.path.join(entry, name)] else: filename = os.path.join(entry, name + ".py") submodule_locations = None if not os.path.exists(filename): continue return spec_from_file_location(fullname, filename, loader=MyLoader(filename), submodule_search_locations=submodule_locations) return None
[ "def find_module(cls, fullname, path=None):\n \"\"\"find the module on sys.path or 'path' based on sys.path_hooks and\n sys.path_importer_cache.\n This method is for python2 only\n \"\"\"\n spec = cls.find_spec(fullname, path)\n if spec is None:\n return None\n elif spec.loader is None and spec.submodule_search_locations:\n # Here we need to create a namespace loader to handle namespaces since python2 doesn't...\n return NamespaceLoader2(spec.name, spec.submodule_search_locations)\n else:\n return spec.loader", "def find_spec(cls, fullname, path=None, target=None):\n \"\"\"find the module on sys.path or 'path' based on sys.path_hooks and\n sys.path_importer_cache.\"\"\"\n if path is None:\n path = sys.path\n spec = cls._get_spec(fullname, path, target)\n if spec is None:\n return None\n elif spec.loader is None:\n namespace_path = spec.submodule_search_locations\n if namespace_path:\n # We found at least one namespace path. Return a\n # spec which can create the namespace package.\n spec.origin = 'namespace'\n spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)\n return spec\n else:\n return None\n else:\n return spec", "def create_module(self, spec):\n \"\"\"Improve python2 semantics for module creation.\"\"\"\n mod = super(NamespaceLoader2, self).create_module(spec)\n # Set a few properties required by PEP 302\n # mod.__file__ = [p for p in self.path]\n # this will set mod.__repr__ to not builtin... shouldnt break anything in py2...\n # CAREFUL : get_filename present implies the module has ONE location, which is not true with namespaces\n return mod", "def find_loader(module_name)\n if module_name.nil? || EMPTY_STRING == module_name\n # Use the public environment loader\n public_environment_loader\n else\n # TODO : Later check if definition is private, and then add it to private_loader_for_module\n #\n loader = public_loader_for_module(module_name)\n if loader.nil?\n raise Puppet::ParseError, _(\"Internal Error: did not find public loader for module: '%{module_name}'\") % { module_name: module_name }\n end\n loader\n end\n end", "def _get_spec(cls, fullname, path, target=None):\n \"\"\"Find the loader or namespace_path for this module/package name.\"\"\"\n # If this ends up being a namespace package, namespace_path is\n # the list of paths that will become its __path__\n namespace_path = []\n for entry in path:\n if not isinstance(entry, (str, bytes)):\n continue\n finder = cls._path_importer_cache(entry)\n if finder is not None:\n if hasattr(finder, 'find_spec'):\n spec = finder.find_spec(fullname, target)\n else:\n spec = cls._legacy_get_spec(fullname, finder)\n if spec is None:\n continue\n if spec.loader is not None:\n return spec\n portions = spec.submodule_search_locations\n if portions is None:\n raise ImportError('spec missing loader')\n # This is possibly part of a namespace package.\n # Remember these path entries (if any) for when we\n # create a namespace package, and continue iterating\n # on path.\n namespace_path.extend(portions)\n else:\n spec = ModuleSpec(fullname, None)\n spec.submodule_search_locations = namespace_path\n return spec", "def find_module(self, fullname):\n \"\"\"Try to find a loader for the specified module, or the namespace\n package portions. Returns loader.\n \"\"\"\n\n spec = self.find_spec(fullname)\n if spec is None:\n return None\n\n # We need to handle the namespace case here for python2\n if spec.loader is None and len(spec.submodule_search_locations):\n spec.loader = NamespaceLoader2(spec.name, spec.submodule_search_locations)\n\n return spec.loader", "def load_module(self, fullname):\n \"\"\"Load the specified module into sys.modules and return it.\n This method is for python2 only, but implemented with backported py3 methods.\n \"\"\"\n\n if fullname in sys.modules:\n mod = sys.modules[fullname]\n self.exec_module(mod)\n # In this case we do not want to remove the module in case of error\n # Ref : https://docs.python.org/3/reference/import.html#loaders\n else:\n try:\n # Retrieving the spec to help creating module properly\n spec = spec_from_loader(fullname, self)\n\n # this will call create_module and also initialize the module properly (like for py3)\n mod = module_from_spec(spec)\n\n # as per https://docs.python.org/3/reference/import.html#loaders\n assert mod.__name__ in sys.modules\n\n self.exec_module(mod)\n # We don't ensure that the import-related module attributes get\n # set in the sys.modules replacement case. Such modules are on\n # their own.\n except Exception as exc:\n # TODO : log exception !\n # as per https://docs.python.org/3/reference/import.html#loaders\n if fullname in sys.modules:\n del sys.modules[fullname]\n raise\n\n return sys.modules[fullname]", "def find_spec(self, fullname, target=None):\n \"\"\"Try to find a spec for the specified module. Returns the\n matching spec, or None if not found.\"\"\"\n is_namespace = False\n tail_module = fullname.rpartition('.')[2]\n\n base_path = os.path.join(self.path, tail_module)\n for suffix, loader_class in self._loaders:\n init_filename = '__init__' + suffix\n init_full_path = os.path.join(base_path, init_filename)\n full_path = base_path + suffix\n if os.path.isfile(init_full_path):\n return self._get_spec(loader_class, fullname, init_full_path, [base_path], target)\n if os.path.isfile(full_path): # maybe we need more checks here (importlib filefinder checks its cache...)\n return self._get_spec(loader_class, fullname, full_path, None, target)\n else:\n # If a namespace package, return the path if we don't\n # find a module in the next section.\n is_namespace = os.path.isdir(base_path)\n\n if is_namespace:\n _verbose_message('possible namespace for {}'.format(base_path))\n spec = ModuleSpec(fullname, None)\n spec.submodule_search_locations = [base_path]\n return spec\n return None", "def module_for_loader(fxn):\n \"\"\"Decorator to handle selecting the proper module for loaders.\n The decorated function is passed the module to use instead of the module\n name. The module passed in to the function is either from sys.modules if\n it already exists or is a new module. If the module is new, then __name__\n is set the first argument to the method, __loader__ is set to self, and\n __package__ is set accordingly (if self.is_package() is defined) will be set\n before it is passed to the decorated function (if self.is_package() does\n not work for the module it will be set post-load).\n If an exception is raised and the decorator created the module it is\n subsequently removed from sys.modules.\n The decorator assumes that the decorated function takes the module name as\n the second argument.\n \"\"\"\n warnings.warn('The import system now takes care of this automatically.',\n DeprecationWarning, stacklevel=2)\n @functools.wraps(fxn)\n def module_for_loader_wrapper(self, fullname, *args, **kwargs):\n with _module_to_load(fullname) as module:\n module.__loader__ = self\n try:\n is_package = self.is_package(fullname)\n except (ImportError, AttributeError):\n pass\n else:\n if is_package:\n module.__package__ = fullname\n else:\n module.__package__ = fullname.rpartition('.')[0]\n # If __package__ was not set above, __import__() will do it later.\n return fxn(self, module, *args, **kwargs)\n\n return module_for_loader_wrapper", "function (name, specs) {\n \"use strict\";\n \n var path = specs.path;\n var type = (specs.type) ? specs.type : 'control';\n \n Object.defineProperty(this, 'name', {\n 'get': function () {\n return name;\n }\n });\n \n if (path.substr(0, 7) !== 'http://' || path.substr(0, 8) !== 'https://') {\n \n if (path.substr(0, 12) === 'application/') {\n \n // shift the \"application\" string\n path = path.split('/')\n path.shift();\n var componentPath = path.join('/');\n path = beyond.requireConfig.paths['application'] + '/' + componentPath + '/' + type + '.html';\n \n }\n else if (path.substr(0, 10) === 'libraries/') {\n \n var original = path;\n \n path = path.split('/');\n \n // shift the \"libraries\" string\n path.shift();\n // shift the library name\n var libraryName = path.shift();\n \n path = path.join('/');\n \n // search the library path\n var libraryPath = beyond.requireConfig.paths['libraries/' + libraryName];\n if (!libraryPath) {\n console.warn('library ' + libraryName + ' does not exist, check the module \"' + original + '\"');\n return;\n }\n \n path = libraryPath + '/' + path;\n \n var multilanguage = beyond.modules.multilanguage.get(original);\n if (multilanguage && multilanguage.indexOf(type) !== -1) {\n path += '/' + type + '/' + beyond.params.language + '.html';\n }\n else {\n path += '/' + type + '.html';\n }\n \n }\n else {\n \n var vendor;\n vendor = beyond.requireConfig.paths['libraries/vendor'];\n vendor = vendor + '/static/bower_components/';\n \n path = vendor + path;\n \n }\n \n }\n \n Object.defineProperty(this, 'path', {\n 'get': function () {\n return path;\n }\n });\n \n \n var loaded;\n Object.defineProperty(this, 'loaded', {\n 'get': function () {\n return !!loaded;\n }\n });\n \n var loading;\n Object.defineProperty(this, 'loading', {\n 'get': function () {\n return !!loading;\n }\n });\n \n var callbacks = [];\n \n this.load = function (callback) {\n \n if (loaded) {\n callback();\n return;\n }\n \n callbacks.push(callback);\n \n if (loading) return;\n \n loading = true;\n window.Polymer.Base.importHref(path, function () {\n \n loading = false;\n loaded = true;\n \n for (var i in callbacks) {\n callbacks[i]();\n }\n callbacks = [];\n \n });\n \n };\n \n }", "function polyfill(module, require) {\n if (apply(hasOwnProperty, require, [ 'moduleKeys' ])) {\n return;\n }\n let moduleIdentifier = `${ module.filename }`;\n if (sep !== '/') {\n moduleIdentifier = apply(replace, moduleIdentifier, [ sepGlobalPattern, '/' ]);\n if (moduleIdentifier[0] !== '/') {\n moduleIdentifier = `/${ moduleIdentifier }`;\n }\n }\n\n const keysObj = makeModuleKeys(moduleIdentifier);\n require.moduleKeys = keysObj;\n\n const { publicKey } = keysObj;\n // Export the public key.\n module.exports.publicKey = publicKey;\n module.exports[publicKeySymbol] = publicKey;\n // If the module body overrides exports, try to\n // sneak it in there too.\n let { exports, loaded } = module;\n delete module.exports;\n const properties = {\n exports: {\n enumerable: true,\n configurable: true,\n get() {\n return exports;\n },\n set(newExports) {\n exports = newExports;\n if (newExports &&\n (typeof newExports === 'object' ||\n typeof newExports === 'function')) {\n if (!apply(hasOwnProperty, exports, [ 'publicKey' ])) {\n try {\n module.exports.publicKey = publicKey;\n } catch (exc) {\n // Oh well. We tried our best.\n }\n }\n if (!apply(hasOwnProperty, exports, [ publicKeySymbol ])) {\n try {\n module.exports[publicKeySymbol] = publicKey;\n } catch (exc) {\n // Oh well. We tried our best.\n }\n }\n }\n },\n },\n };\n try {\n delete module.loaded;\n\n properties.loaded = {\n enumerable: true,\n configurable: true,\n get() {\n return loaded;\n },\n set(newLoaded) {\n loaded = newLoaded;\n if (loaded === true) {\n // Stop virtualizing\n try {\n delete module.exports;\n module.exports = exports;\n } catch (exc) {\n // Best effort.\n }\n try {\n delete module.loaded;\n module.loaded = loaded;\n } catch (exc) {\n // Best effort.\n }\n }\n },\n };\n } catch (exc) {\n // Webpack locks this down. Good job, webpack!\n }\n defineProperties(module, properties);\n}", "def find_spec(self, fullname, path, target=None):\n \"\"\"\n Claims modules that are under ipynb.fs\n \"\"\"\n if fullname.startswith(self.package_prefix):\n for path in self._get_paths(fullname):\n if os.path.exists(path):\n return ModuleSpec(\n name=fullname,\n loader=self.loader_class(fullname, path),\n origin=path,\n is_package=(path.endswith('__init__.ipynb') or path.endswith('__init__.py')),\n )" ]
[ 0.747175931930542, 0.7420939207077026, 0.7406797409057617, 0.7373334765434265, 0.7310171723365784, 0.7296516299247742, 0.7245499491691589, 0.7207825183868408, 0.7194115519523621, 0.7193523645401001, 0.7174927592277527, 0.7132523655891418 ]
import the source code, transforma it before executing it so that it is known to Python.
def exec_module(self, module): '''import the source code, transforma it before executing it so that it is known to Python.''' global MAIN_MODULE_NAME if module.__name__ == MAIN_MODULE_NAME: module.__name__ = "__main__" MAIN_MODULE_NAME = None with open(self.filename) as f: source = f.read() if transforms.transformers: source = transforms.transform(source) else: for line in source.split('\n'): if transforms.FROM_EXPERIMENTAL.match(line): ## transforms.transform will extract all such relevant ## lines and add them all relevant transformers source = transforms.transform(source) break exec(source, vars(module))
[ "def transform(source):\n '''Used to convert the source code, making use of known transformers.\n\n \"transformers\" are modules which must contain a function\n\n transform_source(source)\n\n which returns a tranformed source.\n Some transformers (for example, those found in the standard library\n module lib2to3) cannot cope with non-standard syntax; as a result, they\n may fail during a first attempt. We keep track of all failing\n transformers and keep retrying them until either they all succeeded\n or a fixed set of them fails twice in a row.\n '''\n source = extract_transformers_from_source(source)\n\n # Some transformer fail when multiple non-Python constructs\n # are present. So, we loop multiple times keeping track of\n # which transformations have been unsuccessfully performed.\n not_done = transformers\n while True:\n failed = {}\n for name in not_done:\n tr_module = import_transformer(name)\n try:\n source = tr_module.transform_source(source)\n except Exception as e:\n failed[name] = tr_module\n # from traceback import print_exc\n # print(\"Unexpected exception in transforms.transform\",\n # e.__class__.__name__)\n # print_exc()\n\n if not failed:\n break\n # Insanity is doing the same Tting over and overaAgain and\n # expecting different results ...\n # If the exact same set of transformations are not performed\n # twice in a row, there is no point in trying out a third time.\n if failed == not_done:\n print(\"Warning: the following transforms could not be done:\")\n for key in failed:\n print(key)\n break\n not_done = failed # attempt another pass\n\n return source", "def import_or_die(module_name, entrypoint_names):\n '''\n Import user code; return reference to usercode function.\n\n (str) -> function reference\n '''\n log_debug(\"Importing {}\".format(module_name))\n module_name = os.path.abspath(module_name)\n if module_name.endswith('.py'):\n module_name,ext = os.path.splitext(module_name)\n modname = os.path.basename(module_name)\n dirname = os.path.dirname(module_name)\n if dirname and dirname not in sys.path:\n sys.path.append(dirname)\n\n # first, try to reload code\n if modname in sys.modules:\n user_module = sys.modules.get(modname)\n user_module = importlib.reload(user_module)\n # if it isn't in sys.modules, load it for the first time, or\n # try to.\n else:\n try:\n mypaths = [ x for x in sys.path if (\"Cellar\" not in x and \"packages\" not in x)]\n # print(\"Loading {} from {} ({})\".format(modname, dirname, mypaths))\n # user_module = importlib.import_module(modname)\n user_module = importlib.__import__(modname)\n except ImportError as e:\n log_failure(\"Fatal error: couldn't import module (error: {}) while executing {}\".format(str(e), modname))\n raise ImportError(e)\n\n # if there aren't any functions to call into, then the caller\n # just wanted the module/code to be imported, and that's it.\n if not entrypoint_names:\n return\n\n existing_names = dir(user_module)\n for method in entrypoint_names:\n if method in existing_names:\n return getattr(user_module, method)\n\n if len(entrypoint_names) > 1:\n entrypoints = \"one of {}\".format(', '.join(entrypoint_names))\n else:\n entrypoints = entrypoint_names[0]\n raise ImportError(\"Required entrypoint function or symbol ({}) not found in your code\".format(entrypoints))", "def load_code(name, base_path=None, recurse=False):\n \"\"\"Load executable code from a URL or a path\"\"\"\n if '/' in name:\n return load_location(name, base_path, module=False)\n\n return importer.import_code(name, base_path, recurse=recurse)", "def source_to_code(self, nodes, path, *, _optimize=-1):\n \"\"\"* Convert the current source to ast \n * Apply ast transformers.\n * Compile the code.\"\"\"\n if not isinstance(nodes, ast.Module):\n nodes = ast.parse(nodes, self.path)\n if self._markdown_docstring:\n nodes = update_docstring(nodes)\n return super().source_to_code(\n ast.fix_missing_locations(self.visit(nodes)), path, _optimize=_optimize\n )", "def handle_import(self, name, compilation, rule):\n \"\"\"Implementation of the core Sass import mechanism, which just looks\n for files on disk.\n \"\"\"\n # TODO this is all not terribly well-specified by Sass. at worst,\n # it's unclear how far \"upwards\" we should be allowed to go. but i'm\n # also a little fuzzy on e.g. how relative imports work from within a\n # file that's not actually in the search path.\n # TODO i think with the new origin semantics, i've made it possible to\n # import relative to the current file even if the current file isn't\n # anywhere in the search path. is that right?\n path = PurePosixPath(name)\n\n search_exts = list(compilation.compiler.dynamic_extensions)\n if path.suffix and path.suffix in search_exts:\n basename = path.stem\n else:\n basename = path.name\n relative_to = path.parent\n search_path = [] # tuple of (origin, start_from)\n if relative_to.is_absolute():\n relative_to = PurePosixPath(*relative_to.parts[1:])\n elif rule.source_file.origin:\n # Search relative to the current file first, only if not doing an\n # absolute import\n search_path.append((\n rule.source_file.origin,\n rule.source_file.relpath.parent / relative_to,\n ))\n search_path.extend(\n (origin, relative_to)\n for origin in compilation.compiler.search_path\n )\n\n for prefix, suffix in product(('_', ''), search_exts):\n filename = prefix + basename + suffix\n for origin, relative_to in search_path:\n relpath = relative_to / filename\n # Lexically (ignoring symlinks!) eliminate .. from the part\n # of the path that exists within Sass-space. pathlib\n # deliberately doesn't do this, but os.path does.\n relpath = PurePosixPath(os.path.normpath(str(relpath)))\n\n if rule.source_file.key == (origin, relpath):\n # Avoid self-import\n # TODO is this what ruby does?\n continue\n\n path = origin / relpath\n if not path.exists():\n continue\n\n # All good!\n # TODO if this file has already been imported, we'll do the\n # source preparation twice. make it lazy.\n return SourceFile.read(origin, relpath)", "def visit_Module(self, node):\n \"\"\"\n When we normalize call, we need to add correct import for method\n to function transformation.\n\n a.max()\n\n for numpy array will become:\n\n numpy.max(a)\n\n so we have to import numpy.\n \"\"\"\n self.skip_functions = True\n self.generic_visit(node)\n self.skip_functions = False\n self.generic_visit(node)\n new_imports = self.to_import - self.globals\n imports = [ast.Import(names=[ast.alias(name=mod[17:], asname=mod)])\n for mod in new_imports]\n node.body = imports + node.body\n self.update |= bool(imports)\n return node", "def transform_flask_from_import(node):\n '''Translates a flask.ext from-style import into a non-magical import.\n\n Translates:\n from flask.ext import wtf, bcrypt as fcrypt\n Into:\n import flask_wtf as wtf, flask_bcrypt as fcrypt\n\n '''\n new_names = []\n # node.names is a list of 2-tuples. Each tuple consists of (name, as_name).\n # So, the import would be represented as:\n #\n # from flask.ext import wtf as ftw, admin\n #\n # node.names = [('wtf', 'ftw'), ('admin', None)]\n for (name, as_name) in node.names:\n actual_module_name = 'flask_{}'.format(name)\n new_names.append((actual_module_name, as_name or name))\n\n new_node = nodes.Import()\n copy_node_info(node, new_node)\n new_node.names = new_names\n mark_transformed(new_node)\n return new_node", "def recompile(filename):\n \"\"\"Create a .pyc by disassembling the file and assembling it again, printing\n a message that the reassembled file was loaded.\"\"\"\n # Most of the code here based on the compile.py module.\n import os\n import imp\n import marshal\n import struct\n\n f = open(filename, 'U')\n try:\n timestamp = long(os.fstat(f.fileno()).st_mtime)\n except AttributeError:\n timestamp = long(os.stat(filename).st_mtime)\n codestring = f.read()\n f.close()\n if codestring and codestring[-1] != '\\n':\n codestring = codestring + '\\n'\n try:\n codeobject = compile(codestring, filename, 'exec')\n except SyntaxError:\n print >> sys.stderr, \"Skipping %s - syntax error.\" % filename\n return\n cod = Code.from_code(codeobject)\n message = \"reassembled %r imported.\\n\" % filename\n cod.code[:0] = [ # __import__('sys').stderr.write(message)\n (LOAD_GLOBAL, '__import__'),\n (LOAD_CONST, 'sys'),\n (CALL_FUNCTION, 1),\n (LOAD_ATTR, 'stderr'),\n (LOAD_ATTR, 'write'),\n (LOAD_CONST, message),\n (CALL_FUNCTION, 1),\n (POP_TOP, None),\n ]\n codeobject2 = cod.to_code()\n fc = open(filename+'c', 'wb')\n fc.write('\\0\\0\\0\\0')\n fc.write(struct.pack('<l', timestamp))\n marshal.dump(codeobject2, fc)\n fc.flush()\n fc.seek(0, 0)\n fc.write(imp.get_magic())\n fc.close()", "def transform_import(self, node, results):\n \"\"\"Transform for the basic import case. Replaces the old\n import name with a comma separated list of its\n replacements.\n \"\"\"\n import_mod = results.get(\"module\")\n pref = import_mod.prefix\n\n names = []\n\n # create a Node list of the replacement modules\n for name in MAPPING[import_mod.value][:-1]:\n names.extend([Name(name[0], prefix=pref), Comma()])\n names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))\n import_mod.replace(names)", "def _exec(self, is_module, entry_point_fn, module_name, file, globals, locals):\n '''\n This function should have frames tracked by unhandled exceptions (the `_exec` name is important).\n '''\n if not is_module:\n pydev_imports.execfile(file, globals, locals) # execute the script\n else:\n # treat ':' as a separator between module and entry point function\n # if there is no entry point we run we same as with -m switch. Otherwise we perform\n # an import and execute the entry point\n if entry_point_fn:\n mod = __import__(module_name, level=0, fromlist=[entry_point_fn], globals=globals, locals=locals)\n func = getattr(mod, entry_point_fn)\n func()\n else:\n # Run with the -m switch\n import runpy\n if hasattr(runpy, '_run_module_as_main'):\n # Newer versions of Python actually use this when the -m switch is used.\n if sys.version_info[:2] <= (2, 6):\n runpy._run_module_as_main(module_name, set_argv0=False)\n else:\n runpy._run_module_as_main(module_name, alter_argv=False)\n else:\n runpy.run_module(module_name)\n return globals", "def gen_source(self, ast, name, customize, is_lambda=False, returnNone=False):\n \"\"\"convert SyntaxTree to Python source code\"\"\"\n\n rn = self.return_none\n self.return_none = returnNone\n old_name = self.name\n self.name = name\n # if code would be empty, append 'pass'\n if len(ast) == 0:\n self.println(self.indent, 'pass')\n else:\n self.customize(customize)\n if is_lambda:\n self.write(self.traverse(ast, is_lambda=is_lambda))\n else:\n self.text = self.traverse(ast, is_lambda=is_lambda)\n self.println(self.text)\n self.name = old_name\n self.return_none = rn", "def extract_transformers_from_source(source):\n '''Scan a source for lines of the form\n from __experimental__ import transformer1 [,...]\n identifying transformers to be used. Such line is passed to the\n add_transformer function, after which it is removed from the\n code to be executed.\n '''\n lines = source.split('\\n')\n linenumbers = []\n for number, line in enumerate(lines):\n if FROM_EXPERIMENTAL.match(line):\n add_transformers(line)\n linenumbers.insert(0, number)\n\n # drop the \"fake\" import from the source code\n for number in linenumbers:\n del lines[number]\n return '\\n'.join(lines)" ]
[ 0.792877733707428, 0.7396855354309082, 0.7372424602508545, 0.7340986728668213, 0.7337856888771057, 0.7240535616874695, 0.7217997908592224, 0.716796875, 0.7163119316101074, 0.715809166431427, 0.712336003780365, 0.7094584703445435 ]
Iterate through multiple lists or arrays of equal size
def _izip(*iterables): """ Iterate through multiple lists or arrays of equal size """ # This izip routine is from itertools # izip('ABCD', 'xy') --> Ax By iterators = map(iter, iterables) while iterators: yield tuple(map(next, iterators))
[ "public static void iterate(int dimension, int n, int[] size, int[] res, int dimension2, int n2, int[] size2,\n int[] res2, CoordinateFunction func) {\n if (dimension >= n || dimension2 >= n2) {\n // stop clause\n func.process(ArrayUtil.toLongArray(res), ArrayUtil.toLongArray(res2));\n return;\n }\n\n if (size2.length != size.length) {\n if (dimension >= size.length)\n return;\n for (int i = 0; i < size[dimension]; i++) {\n if (dimension2 >= size2.length)\n break;\n for (int j = 0; j < size2[dimension2]; j++) {\n res[dimension] = i;\n res2[dimension2] = j;\n iterate(dimension + 1, n, size, res, dimension2 + 1, n2, size2, res2, func);\n }\n\n }\n } else {\n if (dimension >= size.length)\n return;\n\n for (int i = 0; i < size[dimension]; i++) {\n for (int j = 0; j < size2[dimension2]; j++) {\n if (dimension2 >= size2.length)\n break;\n res[dimension] = i;\n res2[dimension2] = j;\n iterate(dimension + 1, n, size, res, dimension2 + 1, n2, size2, res2, func);\n }\n\n }\n }\n }", "def _checkDimensionsListLike(arrays):\n \"\"\"Check that each array in a list of arrays has the same size.\n\n \"\"\"\n dim1 = len(arrays)\n dim2, dim3 = arrays[0].shape\n for aa in range(1, dim1):\n dim2_aa, dim3_aa = arrays[aa].shape\n if (dim2_aa != dim2) or (dim3_aa != dim3):\n raise _error.InvalidError(_MDPERR[\"obj_square\"])\n return dim1, dim2, dim3", "function forEach (arrayLike, iteratee) {\n for (var i = 0, len = _toArrayLength(arrayLike.length); i < len; i++) {\n iteratee(arrayLike[i], i, arrayLike);\n }\n}", "def chunks(iterable, size):\n \"\"\"\n Splits a very large list into evenly sized chunks.\n Returns an iterator of lists that are no more than the size passed in.\n \"\"\"\n it = iter(iterable)\n item = list(islice(it, size))\n while item:\n yield item\n item = list(islice(it, size))", "def minibatch(items, size=8):\n \"\"\"Iterate over batches of items. `size` may be an iterator,\n so that batch-size can vary on each step.\n \"\"\"\n if isinstance(size, int):\n size_ = itertools.repeat(size)\n else:\n size_ = size\n items = iter(items)\n while True:\n batch_size = next(size_)\n batch = list(itertools.islice(items, int(batch_size)))\n if len(batch) == 0:\n break\n yield list(batch)", "function eachOfArrayLike(coll, iteratee, callback) {\n callback = once(callback);\n var index = 0,\n completed = 0,\n {length} = coll,\n canceled = false;\n if (length === 0) {\n callback(null);\n }\n\n function iteratorCallback(err, value) {\n if (err === false) {\n canceled = true\n }\n if (canceled === true) return\n if (err) {\n callback(err);\n } else if ((++completed === length) || value === breakLoop) {\n callback(null);\n }\n }\n\n for (; index < length; index++) {\n iteratee(coll[index], index, onlyOnce(iteratorCallback));\n }\n}", "function(_list){\n var _result = 0;\n _u._$forEach(\n _list,function(_size){\n if (!_size) return;\n if (!_result){\n _result = _size;\n }else{\n _result = Math.min(_result,_size);\n }\n }\n );\n return _result;\n }", "def ibatch(iterable, size):\n \"\"\"Yield a series of batches from iterable, each size elements long.\"\"\"\n source = iter(iterable)\n while True:\n batch = itertools.islice(source, size)\n yield itertools.chain([next(batch)], batch)", "def batch(items, size):\n \"\"\"Batches a list into a list of lists, with sub-lists sized by a specified\n batch size.\"\"\"\n return [items[x:x + size] for x in xrange(0, len(items), size)]", "function forEach(srcObj, iteratee) {\n if (!srcObj) return srcObj;\n if (!isFun(iteratee)) throwErr('fun');\n var length = srcObj.length;\n if (length && length >= 0 && length < Math.pow(2, 53) - 1) {\n for (var i = 0; i < length; i++) iteratee(srcObj[i], i);\n } else {\n var ks = keys(srcObj), i = -1;\n while (++ i < ks.length) iteratee(srcObj[ks[i]], ks[i]);\n }\n return srcObj;\n }", "def iter_chunks(l, size):\n \"\"\"\n Returns a generator containing chunks of *size* of a list, integer or generator *l*. A *size*\n smaller than 1 results in no chunking at all.\n \"\"\"\n if isinstance(l, six.integer_types):\n l = six.moves.range(l)\n\n if is_lazy_iterable(l):\n if size < 1:\n yield list(l)\n else:\n chunk = []\n for elem in l:\n if len(chunk) < size:\n chunk.append(elem)\n else:\n yield chunk\n chunk = [elem]\n else:\n if chunk:\n yield chunk\n\n else:\n if size < 1:\n yield l\n else:\n for i in six.moves.range(0, len(l), size):\n yield l[i:i + size]", "function(xs, f) {\n for (var i = 0, len = xs.length; i < len; i++) {\n var x = xs[i];\n f(x, i, xs);\n }\n }" ]
[ 0.7043029069900513, 0.7027319073677063, 0.6970442533493042, 0.6964612007141113, 0.6948021054267883, 0.6942374110221863, 0.6927012801170349, 0.6896812319755554, 0.6895675659179688, 0.6884126663208008, 0.6859806180000305, 0.6798259019851685 ]
Check and convert any input scalar or array to numpy array
def _checkinput(zi, Mi, z=False, verbose=None): """ Check and convert any input scalar or array to numpy array """ # How many halo redshifts provided? zi = np.array(zi, ndmin=1, dtype=float) # How many halo masses provided? Mi = np.array(Mi, ndmin=1, dtype=float) # Check the input sizes for zi and Mi make sense, if not then exit unless # one axis is length one, then replicate values to the size of the other if (zi.size > 1) and (Mi.size > 1): if(zi.size != Mi.size): print("Error ambiguous request") print("Need individual redshifts for all haloes provided ") print("Or have all haloes at same redshift ") return(-1) elif (zi.size == 1) and (Mi.size > 1): if verbose: print("Assume zi is the same for all Mi halo masses provided") # Replicate redshift for all halo masses zi = np.ones_like(Mi)*zi[0] elif (Mi.size == 1) and (zi.size > 1): if verbose: print("Assume Mi halo masses are the same for all zi provided") # Replicate redshift for all halo masses Mi = np.ones_like(zi)*Mi[0] else: if verbose: print("A single Mi and zi provided") # Very simple test for size / type of incoming array # just in case numpy / list given if z is False: # Didn't pass anything, set zi = z lenzout = 1 else: # If something was passed, convert to 1D NumPy array z = np.array(z, ndmin=1, dtype=float) lenzout = z.size return(zi, Mi, z, zi.size, Mi.size, lenzout)
[ "def _check_inputs(z, m):\n \"\"\"Check inputs are arrays of same length or array and a scalar.\"\"\"\n try:\n nz = len(z)\n z = np.array(z)\n except TypeError:\n z = np.array([z])\n nz = len(z)\n try:\n nm = len(m)\n m = np.array(m)\n except TypeError:\n m = np.array([m])\n nm = len(m)\n\n if (z < 0).any() or (m < 0).any():\n raise ValueError('z and m must be positive')\n\n if nz != nm and nz > 1 and nm > 1:\n raise ValueError('z and m arrays must be either equal in length, \\\n OR of different length with one of length 1.')\n\n else:\n if type(z) != np.ndarray:\n z = np.array(z)\n if type(m) != np.ndarray:\n m = np.array(m)\n\n return z, m", "def _check_scalar(value):\n '''If value is a 0-dimensional array, returns the contents of value.\n Otherwise, returns value.\n '''\n if isinstance(value, np.ndarray):\n if value.ndim == 0:\n # We have a 0-dimensional array\n return value[None][0]\n return value", "def _sanitize_values(arr):\n \"\"\"\n return an ndarray for our input,\n in a platform independent manner\n \"\"\"\n\n if hasattr(arr, 'values'):\n arr = arr.values\n else:\n\n # scalar\n if is_scalar(arr):\n arr = [arr]\n\n # ndarray\n if isinstance(arr, np.ndarray):\n pass\n\n elif is_list_like(arr) and len(arr) > 0:\n arr = maybe_convert_platform(arr)\n\n else:\n arr = np.asarray(arr)\n\n return arr", "def check_array_or_list(input):\n \"\"\"Return 1D ndarray, if input can be converted and elements are\n non-negative.\"\"\"\n if type(input) != np.ndarray:\n if type(input) == list:\n output = np.array(input)\n else:\n raise TypeError('Expecting input type as ndarray or list.')\n else:\n output = input\n\n if output.ndim != 1:\n raise ValueError('Input array must have 1 dimension.')\n\n if np.sum(output < 0.) > 0:\n raise ValueError(\"Input array values cannot be negative.\")\n\n return output", "def check_inputs_not_arrays(func):\n \"\"\"\n Decorator to check inputs and throw TypeError if any of the inputs are arrays.\n Methods potentially return with silent errors if inputs are not checked.\n \"\"\"\n @wraps(func)\n def func_wrapper(self, R, z, phi, t):\n if (hasattr(R, '__len__') and len(R) > 1) \\\n or (hasattr(z, '__len__') and len(z) > 1) \\\n or (hasattr(phi, '__len__') and len(phi) > 1) \\\n or (hasattr(t, '__len__') and len(t) > 1):\n raise TypeError('Methods in SpiralArmsPotential do not accept array inputs. Please input scalars.')\n return func(self, R, z, phi, t)\n\n return func_wrapper", "def ensure_ndarray(ndarray_or_adjusted_array):\n \"\"\"\n Return the input as a numpy ndarray.\n\n This is a no-op if the input is already an ndarray. If the input is an\n adjusted_array, this extracts a read-only view of its internal data buffer.\n\n Parameters\n ----------\n ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array\n\n Returns\n -------\n out : The input, converted to an ndarray.\n \"\"\"\n if isinstance(ndarray_or_adjusted_array, ndarray):\n return ndarray_or_adjusted_array\n elif isinstance(ndarray_or_adjusted_array, AdjustedArray):\n return ndarray_or_adjusted_array.data\n else:\n raise TypeError(\n \"Can't convert %s to ndarray\" %\n type(ndarray_or_adjusted_array).__name__\n )", "def check_npndarray(val, dtype=None, writeable=True, verbose=True):\n \"\"\"Check if input object is a numpy array.\n\n Parameters\n ----------\n val : np.ndarray\n Input object\n\n \"\"\"\n\n if not isinstance(val, np.ndarray):\n raise TypeError('Input is not a numpy array.')\n\n if ((not isinstance(dtype, type(None))) and\n (not np.issubdtype(val.dtype, dtype))):\n raise TypeError('The numpy array elements are not of type: {}'\n ''.format(dtype))\n\n if not writeable and verbose and val.flags.writeable:\n warn('Making input data immutable.')\n\n val.flags.writeable = writeable", "def check_array(array, accept_sparse=None, dtype=\"numeric\", order=None,\n copy=False, force_all_finite=True, ensure_2d=True,\n allow_nd=False, ensure_min_samples=1, ensure_min_features=1):\n \"\"\"Input validation on an array, list, sparse matrix or similar.\n\n By default, the input is converted to an at least 2nd numpy array.\n If the dtype of the array is object, attempt converting to float,\n raising on failure.\n\n Parameters\n ----------\n array : object\n Input object to check / convert.\n\n accept_sparse : string, list of string or None (default=None)\n String[s] representing allowed sparse matrix formats, such as 'csc',\n 'csr', etc. None means that sparse matrix input will raise an error.\n If the input is sparse but not in the allowed format, it will be\n converted to the first listed format.\n\n dtype : string, type or None (default=\"numeric\")\n Data type of result. If None, the dtype of the input is preserved.\n If \"numeric\", dtype is preserved unless array.dtype is object.\n\n order : 'F', 'C' or None (default=None)\n Whether an array will be forced to be fortran or c-style.\n\n copy : boolean (default=False)\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : boolean (default=True)\n Whether to raise an error on np.inf and np.nan in X.\n\n ensure_2d : boolean (default=True)\n Whether to make X at least 2d.\n\n allow_nd : boolean (default=False)\n Whether to allow X.ndim > 2.\n\n ensure_min_samples : int (default=1)\n Make sure that the array has a minimum number of samples in its first\n axis (rows for a 2D array). Setting to 0 disables this check.\n\n ensure_min_features : int (default=1)\n Make sure that the 2D array has some minimum number of features\n (columns). The default value of 1 rejects empty datasets.\n This check is only enforced when the input data has effectively 2\n dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0\n disables this check.\n\n Returns\n -------\n X_converted : object\n The converted and validated X.\n \"\"\"\n if isinstance(accept_sparse, str):\n accept_sparse = [accept_sparse]\n\n # store whether originally we wanted numeric dtype\n dtype_numeric = dtype == \"numeric\"\n\n if sp.issparse(array):\n if dtype_numeric:\n dtype = None\n array = _ensure_sparse_format(array, accept_sparse, dtype, order,\n copy, force_all_finite)\n else:\n if ensure_2d:\n array = np.atleast_2d(array)\n if dtype_numeric:\n if hasattr(array, \"dtype\") and getattr(array.dtype, \"kind\", None) == \"O\":\n # if input is object, convert to float.\n dtype = np.float64\n else:\n dtype = None\n array = np.array(array, dtype=dtype, order=order, copy=copy)\n # make sure we actually converted to numeric:\n if dtype_numeric and array.dtype.kind == \"O\":\n array = array.astype(np.float64)\n if not allow_nd and array.ndim >= 3:\n raise ValueError(\"Found array with dim %d. Expected <= 2\" %\n array.ndim)\n if force_all_finite:\n _assert_all_finite(array)\n\n shape_repr = _shape_repr(array.shape)\n if ensure_min_samples > 0:\n n_samples = _num_samples(array)\n if n_samples < ensure_min_samples:\n raise ValueError(\"Found array with %d sample(s) (shape=%s) while a\"\n \" minimum of %d is required.\"\n % (n_samples, shape_repr, ensure_min_samples))\n\n if ensure_min_features > 0 and array.ndim == 2:\n n_features = array.shape[1]\n if n_features < ensure_min_features:\n raise ValueError(\"Found array with %d feature(s) (shape=%s) while\"\n \" a minimum of %d is required.\"\n % (n_features, shape_repr, ensure_min_features))\n return array", "def check_array(array):\n \"Converts to flattened numpy arrays and ensures its not empty.\"\n\n if len(array) < 1:\n raise ValueError('Input array is empty! Must have atleast 1 element.')\n\n return np.ma.masked_invalid(array).flatten()", "def _check_array(self, X, **kwargs):\n \"\"\"Validate the data arguments X and y.\n\n By default, NumPy arrays are converted to 1-block dask arrays.\n\n Parameters\n ----------\n X, y : array-like\n \"\"\"\n if isinstance(X, np.ndarray):\n X = da.from_array(X, X.shape)\n X = check_array(X, **kwargs)\n return X", "def _check_var(var, dtype, ndmin, name, shape=None, shape2=None):\n r\"\"\"Return variable as array of dtype, ndmin; shape-checked.\"\"\"\n if var is None:\n raise ValueError\n var = np.array(var, dtype=dtype, copy=True, ndmin=ndmin)\n if shape:\n _check_shape(var, name, shape, shape2)\n return var", "def prepare_input_data(self, X):\n \"\"\"\n Check to make sure that the input matrix and its mask of missing\n values are valid. Returns X and missing mask.\n \"\"\"\n X = check_array(X, force_all_finite=False)\n if X.dtype != \"f\" and X.dtype != \"d\":\n X = X.astype(float)\n\n self._check_input(X)\n missing_mask = np.isnan(X)\n self._check_missing_value_mask(missing_mask)\n return X, missing_mask" ]
[ 0.765717625617981, 0.7596739530563354, 0.7529251575469971, 0.7414947152137756, 0.7377985715866089, 0.729101300239563, 0.7281977534294128, 0.7256779074668884, 0.7240608930587769, 0.7231460213661194, 0.7225255966186523, 0.7208993434906006 ]
Find cosmological parameters for named cosmo in cosmology.py list
def getcosmo(cosmology): """ Find cosmological parameters for named cosmo in cosmology.py list """ defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(), 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(), 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(), 'wmap1_lss': cg.WMAP1_2dF_mean(), 'wmap3_mean': cg.WMAP3_mean(), 'wmap5_ml': cg.WMAP5_ML(), 'wmap5_lss': cg.WMAP5_BAO_SN_mean(), 'wmap7_lss': cg.WMAP7_BAO_H0_mean(), 'planck13': cg.Planck_2013(), 'planck15': cg.Planck_2015()} if isinstance(cosmology, dict): # User providing their own variables cosmo = cosmology if 'A_scaling' not in cosmology.keys(): A_scaling = getAscaling(cosmology, newcosmo=True) cosmo.update({'A_scaling': A_scaling}) # Add extra variables by hand that cosmolopy requires # note that they aren't used (set to zero) for paramnames in cg.WMAP5_mean().keys(): if paramnames not in cosmology.keys(): cosmo.update({paramnames: 0}) elif cosmology.lower() in defaultcosmologies.keys(): # Load by name of cosmology instead cosmo = defaultcosmologies[cosmology.lower()] A_scaling = getAscaling(cosmology) cosmo.update({'A_scaling': A_scaling}) else: print("You haven't passed a dict of cosmological parameters ") print("OR a recognised cosmology, you gave %s" % (cosmology)) # No idea why this has to be done by hand but should be O_k = 0 cosmo = cp.distance.set_omega_k_0(cosmo) # Use the cosmology as **cosmo passed to cosmolopy routines return(cosmo)
[ "def Planck_2015(flat=False, extras=True):\n \"\"\"Planck 2015 XII: Cosmological parameters Table 4\n column Planck TT, TE, EE + lowP + lensing + ext\n from Ade et al. (2015) A&A in press (arxiv:1502.01589v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_b_0 = 0.02230/(0.6774**2)\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': 0.3089,\n 'omega_lambda_0': 0.6911,\n 'h': 0.6774,\n 'n': 0.9667,\n 'sigma_8': 0.8159,\n 'tau': 0.066,\n 'z_reion': 8.8,\n 't_0': 13.799,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo", "def _getcosmoheader(cosmo):\n \"\"\" Output the cosmology to a string for writing to file \"\"\"\n\n cosmoheader = (\"# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, \"\n \"sigma8:{3:.3f}, ns:{4:.2f}\".format(\n cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],\n cosmo['sigma_8'], cosmo['n']))\n\n return(cosmoheader)", "def xspec_cosmo(H0=None,q0=None,lambda_0=None):\n \"\"\"\n Define the Cosmology in use within the XSpec models. See Xspec manual for help:\n\n http://heasarc.nasa.gov/xanadu/xspec/manual/XScosmo.html\n \n All parameters can be modified or just a single parameter\n\n :param H0: the hubble constant\n :param q0:\n :param lambda_0:\n :return: Either none or the current setting (H_0, q_0, lambda_0)\n \"\"\"\n\n current_settings = _xspec.get_xscosmo()\n\n if (H0 is None) and (q0 is None) and (lambda_0 is None):\n\n return current_settings\n\n\n else:\n\n # ok, we will see what was changed by the used\n\n user_inputs = [H0, q0, lambda_0]\n\n for i, current_setting in enumerate(current_settings):\n\n if user_inputs[i] is None:\n\n # the user didn't modify this,\n # so lets keep what was already set\n\n user_inputs[i] = current_setting\n\n\n # pass this to xspec\n\n _xspec.set_xscosmo(*user_inputs)", "def _delta_sigma(**cosmo):\n \"\"\" Perturb best-fit constant of proportionality Ascaling for\n rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)\n\n Parameters\n ----------\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float\n The perturbed 'A' relation between rho_2 and rho_crit for the cosmology\n\n Raises\n ------\n\n \"\"\"\n\n M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)\n perturbed_A = (0.796/cosmo['sigma_8']) * \\\n (M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)\n return(perturbed_A)", "def DRAGONS(flat=False, extras=True):\n \"\"\"DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from\n Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.2292\n omega_b_0 = 0.0458\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.725,\n 'h': 0.702,\n 'n': 0.963,\n 'sigma_8': 0.816,\n 'tau': 0.088,\n 'z_reion': 10.6,\n 't_0': 13.76,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo", "def get_cosmo(self, Dd, Ds_Dds):\n \"\"\"\n return the values of H0 and omega_m computed with an interpolation\n :param Dd: flat\n :param Ds_Dds: float\n :return:\n \"\"\"\n if not hasattr(self, '_f_H0') or not hasattr(self, '_f_omega_m'):\n self._make_interpolation()\n H0 = self._f_H0(Dd, Ds_Dds)\n print(H0, 'H0')\n omega_m = self._f_omega_m(Dd, Ds_Dds)\n Dd_new, Ds_Dds_new = self.cosmo2Dd_Ds_Dds(H0[0], omega_m[0])\n if abs(Dd - Dd_new)/Dd > 0.01 or abs(Ds_Dds - Ds_Dds_new)/Ds_Dds > 0.01:\n return [-1], [-1]\n else:\n return H0[0], omega_m[0]", "def _cosmoid_request(self, resource, cosmoid, **kwargs):\n \"\"\"\n Maps to the Generic API method for requests who's only parameter is ``cosmoid``\n \"\"\"\n\n params = {\n 'cosmoid': cosmoid,\n }\n params.update(kwargs)\n\n return self.make_request(resource, params)", "def get_cosmology(cosmology=None, **kwargs):\n r\"\"\"Gets an astropy cosmology class.\n\n Parameters\n ----------\n cosmology : str or astropy.cosmology.FlatLambdaCDM, optional\n The name of the cosmology to use. For the list of options, see\n :py:attr:`astropy.cosmology.parameters.available`. If None, and no\n other keyword arguments are provided, will default to\n :py:attr:`DEFAULT_COSMOLOGY`. If an instance of\n :py:class:`astropy.cosmology.FlatLambdaCDM`, will just return that.\n \\**kwargs :\n If any other keyword arguments are provided they will be passed to\n :py:attr:`astropy.cosmology.FlatLambdaCDM` to create a custom\n cosmology.\n\n Returns\n -------\n astropy.cosmology.FlatLambdaCDM\n The cosmology to use.\n\n Examples\n --------\n Use the default:\n\n >>> from pycbc.cosmology import get_cosmology\n >>> get_cosmology()\n FlatLambdaCDM(name=\"Planck15\", H0=67.7 km / (Mpc s), Om0=0.307,\n Tcmb0=2.725 K, Neff=3.05, m_nu=[0. 0. 0.06] eV,\n Ob0=0.0486)\n\n Use properties measured by WMAP instead:\n\n >>> get_cosmology(\"WMAP9\")\n FlatLambdaCDM(name=\"WMAP9\", H0=69.3 km / (Mpc s), Om0=0.286, Tcmb0=2.725 K,\n Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.0463)\n\n Create your own cosmology (see :py:class:`astropy.cosmology.FlatLambdaCDM`\n for details on the default values used):\n\n >>> get_cosmology(H0=70., Om0=0.3)\n FlatLambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Tcmb0=0 K, Neff=3.04, m_nu=None,\n Ob0=None)\n\n \"\"\"\n if kwargs and cosmology is not None:\n raise ValueError(\"if providing custom cosmological parameters, do \"\n \"not provide a `cosmology` argument\")\n if isinstance(cosmology, astropy.cosmology.FlatLambdaCDM):\n # just return\n return cosmology\n if kwargs:\n cosmology = astropy.cosmology.FlatLambdaCDM(**kwargs)\n else:\n if cosmology is None:\n cosmology = DEFAULT_COSMOLOGY\n if cosmology not in astropy.cosmology.parameters.available:\n raise ValueError(\"unrecognized cosmology {}\".format(cosmology))\n cosmology = getattr(astropy.cosmology, cosmology)\n return cosmology", "def parameters(self):\n \"\"\"\n Get the dictionary of parameters (either ra,dec or l,b)\n\n :return: dictionary of parameters\n \"\"\"\n\n if self._coord_type == 'galactic':\n\n return collections.OrderedDict((('l', self.l), ('b', self.b)))\n\n else:\n\n return collections.OrderedDict((('ra', self.ra), ('dec', self.dec)))", "def get_account_cos(self, account):\n \"\"\" Fetch the cos for a given account\n\n Quite different from the original request which returns COS + various\n URL + COS + zimbraMailHost... But all other informations are accessible\n through get_account.\n\n :type account: zobjects.Account\n :rtype: zobjects.COS\n \"\"\"\n resp = self.request(\n 'GetAccountInfo', {'account': account.to_selector()})\n return zobjects.COS.from_dict(resp['cos'])", "def active_cosfi(self):\n \"\"\"\n Takes the average of all instantaneous cosfi values\n\n Returns\n -------\n float\n \"\"\"\n inst = self.load_instantaneous()\n values = [float(i['value']) for i in inst if i['key'].endswith('Cosfi')]\n return sum(values) / len(values)", "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)" ]
[ 0.700488269329071, 0.6922816038131714, 0.6892850995063782, 0.6832613945007324, 0.670433521270752, 0.6544787287712097, 0.6512612104415894, 0.6474101543426514, 0.6463600993156433, 0.6381969451904297, 0.6360576152801514, 0.6343229413032532 ]
Output the cosmology to a string for writing to file
def _getcosmoheader(cosmo): """ Output the cosmology to a string for writing to file """ cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, " "sigma8:{3:.3f}, ns:{4:.2f}".format( cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'], cosmo['sigma_8'], cosmo['n'])) return(cosmoheader)
[ "def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)", "def to_string(self, verbose=0):\n \"\"\"String representation.\"\"\"\n lines = []\n app = lines.append\n app(\"<%s: %s>\" % (self.__class__.__name__, self.basename))\n app(\" summary: \" + self.summary.strip())\n app(\" number of valence electrons: %s\" % self.Z_val)\n app(\" maximum angular momentum: %s\" % l2str(self.l_max))\n app(\" angular momentum for local part: %s\" % l2str(self.l_local))\n app(\" XC correlation: %s\" % self.xc)\n app(\" supports spin-orbit: %s\" % self.supports_soc)\n\n if self.isnc:\n app(\" radius for non-linear core correction: %s\" % self.nlcc_radius)\n\n if self.has_hints:\n for accuracy in (\"low\", \"normal\", \"high\"):\n hint = self.hint_for_accuracy(accuracy=accuracy)\n app(\" hint for %s accuracy: %s\" % (accuracy, str(hint)))\n\n return \"\\n\".join(lines)", "def to_string(self):\n \"\"\"\n Returns a structure in mcsqs rndstr.in format.\n :return (str):\n \"\"\"\n # define coord system, use Cartesian\n output = [\"1.0 0.0 0.0\",\n \"0.0 1.0 0.0\",\n \"0.0 0.0 1.0\"]\n # add lattice vectors\n m = self.structure.lattice.matrix\n output.append(\"{:6f} {:6f} {:6f}\".format(*m[0]))\n output.append(\"{:6f} {:6f} {:6f}\".format(*m[1]))\n output.append(\"{:6f} {:6f} {:6f}\".format(*m[2]))\n # add species\n for site in self.structure:\n species_str = []\n for sp, occu in sorted(site.species.items()):\n if isinstance(sp, Specie):\n sp = sp.element\n species_str.append(\"{}={}\".format(sp, occu))\n species_str = \",\".join(species_str)\n output.append(\"{:6f} {:6f} {:6f} {}\".format(site.frac_coords[0],\n site.frac_coords[1],\n site.frac_coords[2],\n species_str))\n\n return \"\\n\".join(output)", "def runcommand(cosmology='WMAP5'):\n \"\"\" Example interface commands \"\"\"\n\n # Return the WMAP5 cosmology concentration predicted for\n # z=0 range of masses\n Mi = [1e8, 1e9, 1e10]\n zi = 0\n print(\"Concentrations for haloes of mass %s at z=%s\" % (Mi, zi))\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)\n\n print(output['c'].flatten())\n\n # Return the WMAP5 cosmology concentration predicted for\n # z=0 range of masses AND cosmological parameters\n Mi = [1e8, 1e9, 1e10]\n zi = 0\n print(\"Concentrations for haloes of mass %s at z=%s\" % (Mi, zi))\n output, cosmo = commah.run(cosmology=cosmology, zi=zi, Mi=Mi,\n retcosmo=True)\n\n print(output['c'].flatten())\n print(cosmo)\n\n # Return the WMAP5 cosmology concentration predicted for MW\n # mass (2e12 Msol) across redshift\n Mi = 2e12\n z = [0, 0.5, 1, 1.5, 2, 2.5]\n output = commah.run(cosmology=cosmology, zi=0, Mi=Mi, z=z)\n for zval in z:\n print(\"M(z=0)=%s has c(z=%s)=%s\"\n % (Mi, zval, output[output['z'] == zval]['c'].flatten()))\n\n # Return the WMAP5 cosmology concentration predicted for MW\n # mass (2e12 Msol) across redshift\n Mi = 2e12\n zi = [0, 0.5, 1, 1.5, 2, 2.5]\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)\n for zval in zi:\n print(\"M(z=%s)=%s has concentration %s\"\n % (zval, Mi, output[(output['zi'] == zval) &\n (output['z'] == zval)]['c'].flatten()))\n\n # Return the WMAP5 cosmology concentration and\n # rarity of high-z cluster\n Mi = 2e14\n zi = 6\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)\n print(\"Concentrations for haloes of mass %s at z=%s\" % (Mi, zi))\n print(output['c'].flatten())\n print(\"Mass variance sigma of haloes of mass %s at z=%s\" % (Mi, zi))\n print(output['sig'].flatten())\n print(\"Fluctuation for haloes of mass %s at z=%s\" % (Mi, zi))\n print(output['nu'].flatten())\n\n # Return the WMAP5 cosmology accretion rate prediction\n # for haloes at range of redshift and mass\n Mi = [1e8, 1e9, 1e10]\n zi = [0]\n z = [0, 0.5, 1, 1.5, 2, 2.5]\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi, z=z)\n for Mval in Mi:\n print(\"dM/dt for halo of mass %s at z=%s across redshift %s is: \"\n % (Mval, zi, z))\n print(output[output['Mi'] == Mval]['dMdt'].flatten())\n\n # Return the WMAP5 cosmology Halo Mass History for haloes with M(z=0) = 1e8\n M = [1e8]\n z = [0, 0.5, 1, 1.5, 2, 2.5]\n print(\"Halo Mass History for z=0 mass of %s across z=%s\" % (M, z))\n output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)\n print(output['Mz'].flatten())\n\n # Return the WMAP5 cosmology formation redshifts for haloes at\n # range of redshift and mass\n M = [1e8, 1e9, 1e10]\n z = [0]\n print(\"Formation Redshifts for haloes of mass %s at z=%s\" % (M, z))\n output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)\n for Mval in M:\n print(output[output['Mi'] == Mval]['zf'].flatten())\n\n return(\"Done\")", "def to_xyz(self, buf=None, sort_index=True,\n index=False, header=False, float_format='{:.6f}'.format,\n overwrite=True):\n \"\"\"Write xyz-file\n\n Args:\n buf (str): StringIO-like, optional buffer to write to\n sort_index (bool): If sort_index is true, the\n :class:`~chemcoord.Cartesian`\n is sorted by the index before writing.\n float_format (one-parameter function): Formatter function\n to apply to column’s elements if they are floats.\n The result of this function must be a unicode string.\n overwrite (bool): May overwrite existing files.\n\n Returns:\n formatted : string (or unicode, depending on data and options)\n \"\"\"\n if sort_index:\n molecule_string = self.sort_index().to_string(\n header=header, index=index, float_format=float_format)\n else:\n molecule_string = self.to_string(header=header, index=index,\n float_format=float_format)\n\n # NOTE the following might be removed in the future\n # introduced because of formatting bug in pandas\n # See https://github.com/pandas-dev/pandas/issues/13032\n space = ' ' * (self.loc[:, 'atom'].str.len().max()\n - len(self.iloc[0, 0]))\n\n output = '{n}\\n{message}\\n{alignment}{frame_string}'.format(\n n=len(self), alignment=space, frame_string=molecule_string,\n message='Created by chemcoord http://chemcoord.readthedocs.io/')\n\n if buf is not None:\n if overwrite:\n with open(buf, mode='w') as f:\n f.write(output)\n else:\n with open(buf, mode='x') as f:\n f.write(output)\n else:\n return output", "def Dump(self, key = None):\n \"\"\"\n Using the standard Python pretty printer, return the contents of the\n scons build environment as a string.\n\n If the key passed in is anything other than None, then that will\n be used as an index into the build environment dictionary and\n whatever is found there will be fed into the pretty printer. Note\n that this key is case sensitive.\n \"\"\"\n import pprint\n pp = pprint.PrettyPrinter(indent=2)\n if key:\n dict = self.Dictionary(key)\n else:\n dict = self.Dictionary()\n return pp.pformat(dict)", "def get_string(self, direct=True, vasp4_compatible=False,\n significant_figures=6):\n \"\"\"\n Returns a string to be written as a POSCAR file. By default, site\n symbols are written, which means compatibility is for vasp >= 5.\n\n Args:\n direct (bool): Whether coordinates are output in direct or\n cartesian. Defaults to True.\n vasp4_compatible (bool): Set to True to omit site symbols on 6th\n line to maintain backward vasp 4.x compatibility. Defaults\n to False.\n significant_figures (int): No. of significant figures to\n output all quantities. Defaults to 6. Note that positions are\n output in fixed point, while velocities are output in\n scientific format.\n\n Returns:\n String representation of POSCAR.\n \"\"\"\n\n # This corrects for VASP really annoying bug of crashing on lattices\n # which have triple product < 0. We will just invert the lattice\n # vectors.\n latt = self.structure.lattice\n if np.linalg.det(latt.matrix) < 0:\n latt = Lattice(-latt.matrix)\n\n format_str = \"{{:.{0}f}}\".format(significant_figures)\n lines = [self.comment, \"1.0\"]\n for v in latt.matrix:\n lines.append(\" \".join([format_str.format(c) for c in v]))\n\n if self.true_names and not vasp4_compatible:\n lines.append(\" \".join(self.site_symbols))\n lines.append(\" \".join([str(x) for x in self.natoms]))\n if self.selective_dynamics:\n lines.append(\"Selective dynamics\")\n lines.append(\"direct\" if direct else \"cartesian\")\n\n selective_dynamics = self.selective_dynamics\n for (i, site) in enumerate(self.structure):\n coords = site.frac_coords if direct else site.coords\n line = \" \".join([format_str.format(c) for c in coords])\n if selective_dynamics is not None:\n sd = [\"T\" if j else \"F\" for j in selective_dynamics[i]]\n line += \" %s %s %s\" % (sd[0], sd[1], sd[2])\n line += \" \" + site.species_string\n lines.append(line)\n\n if self.velocities:\n try:\n lines.append(\"\")\n for v in self.velocities:\n lines.append(\" \".join([format_str.format(i) for i in v]))\n except:\n warnings.warn(\"Velocities are missing or corrupted.\")\n\n if self.predictor_corrector:\n lines.append(\"\")\n if self.predictor_corrector_preamble:\n lines.append(self.predictor_corrector_preamble)\n pred = np.array(self.predictor_corrector)\n for col in range(3):\n for z in pred[:,col]:\n lines.append(\" \".join([format_str.format(i) for i in z]))\n else:\n warnings.warn(\n \"Preamble information missing or corrupt. \" \n \"Writing Poscar with no predictor corrector data.\")\n\n return \"\\n\".join(lines) + \"\\n\"", "def xspec_cosmo(H0=None,q0=None,lambda_0=None):\n \"\"\"\n Define the Cosmology in use within the XSpec models. See Xspec manual for help:\n\n http://heasarc.nasa.gov/xanadu/xspec/manual/XScosmo.html\n \n All parameters can be modified or just a single parameter\n\n :param H0: the hubble constant\n :param q0:\n :param lambda_0:\n :return: Either none or the current setting (H_0, q_0, lambda_0)\n \"\"\"\n\n current_settings = _xspec.get_xscosmo()\n\n if (H0 is None) and (q0 is None) and (lambda_0 is None):\n\n return current_settings\n\n\n else:\n\n # ok, we will see what was changed by the used\n\n user_inputs = [H0, q0, lambda_0]\n\n for i, current_setting in enumerate(current_settings):\n\n if user_inputs[i] is None:\n\n # the user didn't modify this,\n # so lets keep what was already set\n\n user_inputs[i] = current_setting\n\n\n # pass this to xspec\n\n _xspec.set_xscosmo(*user_inputs)", "def write_ensemble(ensemble, options):\n \"\"\"\n\tPrints out the ensemble composition at each size\n\t\"\"\"\n\n # set output file name\n size = len(ensemble)\n filename = '%s_%s_queries.csv' % (options.outname, size)\n file = os.path.join(os.getcwd(), filename)\n\n f = open(file, 'w')\n\n out = ', '.join(ensemble)\n\n f.write(out)\n\n f.close()", "def to_s\n text = ''\n\n text << tag_output_string + \"\\n\" unless tags.empty?\n text << \"#{@keyword}:#{name_output_string}\"\n text << \"\\n\" + description_output_string unless (description.nil? || description.empty?)\n text << \"\\n\\n\" + background_output_string if background\n text << \"\\n\\n\" + tests_output_string unless tests.empty?\n\n text\n end", "def format_xyz(title = \"Aims Geoemtry\")\n output = self.atoms.size.to_s + \"\\n\"\n output << \"#{title} \\n\"\n self.atoms.each{ |a| \n output << [a.species, a.x.to_s, a.y.to_s, a.z.to_s].join(\"\\t\") + \"\\n\"\n }\n output\n end", "def to_string(self):\n \"\"\"Export this namespace to a string suitable for incorporation\n in a VW example line, e.g.\n 'MetricFeatures:3.28 height:1.5 length:2.0 '\n \"\"\"\n if self._string is None:\n tokens = []\n if self.name:\n if self.scale:\n token = self.name + ':' + str(self.scale)\n else:\n token = self.name\n else:\n token = '' # Spacing element to indicate next string is a feature\n tokens.append(token)\n for label, value in self.features:\n if value is None:\n token = label\n else:\n token = label + ':' + str(value)\n tokens.append(token)\n tokens.append('') # Spacing element to separate from next pipe character\n output = ' '.join(tokens)\n if self.cache_string:\n self._string = output\n else:\n output = self._string\n return output" ]
[ 0.6981759667396545, 0.6744505763053894, 0.6708426475524902, 0.6707870960235596, 0.660865843296051, 0.6604917645454407, 0.6592658162117004, 0.6579853892326355, 0.656875491142273, 0.6565803289413452, 0.6555835604667664, 0.6547509431838989 ]
NFW conc from Duffy 08 Table 1 for halo mass and redshift
def cduffy(z, M, vir='200crit', relaxed=True): """ NFW conc from Duffy 08 Table 1 for halo mass and redshift""" if(vir == '200crit'): if relaxed: params = [6.71, -0.091, -0.44] else: params = [5.71, -0.084, -0.47] elif(vir == 'tophat'): if relaxed: params = [9.23, -0.090, -0.69] else: params = [7.85, -0.081, -0.71] elif(vir == '200mean'): if relaxed: params = [11.93, -0.090, -0.99] else: params = [10.14, -0.081, -1.01] else: print("Didn't recognise the halo boundary definition provided %s" % (vir)) return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))
[ "def c_Duffy(z, m, h=h):\n \"\"\"Concentration from c(M) relation published in Duffy et al. (2008).\n\n Parameters\n ----------\n z : float or array_like\n Redshift(s) of halos.\n m : float or array_like\n Mass(es) of halos (m200 definition), in units of solar masses.\n h : float, optional\n Hubble parameter. Default is from Planck13.\n\n Returns\n ----------\n ndarray\n Concentration values (c200) for halos.\n\n References\n ----------\n Results from N-body simulations using WMAP5 cosmology, presented in:\n\n A.R. Duffy, J. Schaye, S.T. Kay, and C. Dalla Vecchia, \"Dark matter\n halo concentrations in the Wilkinson Microwave Anisotropy Probe year 5\n cosmology,\" Monthly Notices of the Royal Astronomical Society, Volume\n 390, Issue 1, pp. L64-L68, 2008.\n\n This calculation uses the parameters corresponding to the NFW model,\n the '200' halo definition, and the 'full' sample of halos spanning\n z = 0-2. This means the values of fitted parameters (A,B,C) = (5.71,\n -0.084,-0.47) in Table 1 of Duffy et al. (2008).\n \"\"\"\n\n z, m = _check_inputs(z, m)\n\n M_pivot = 2.e12 / h # [M_solar]\n\n A = 5.71\n B = -0.084\n C = -0.47\n\n concentration = A * ((m / M_pivot)**B) * (1 + z)**C\n\n return concentration", "def c_DuttonMaccio(z, m, h=h):\n \"\"\"Concentration from c(M) relation in Dutton & Maccio (2014).\n\n Parameters\n ----------\n z : float or array_like\n Redshift(s) of halos.\n m : float or array_like\n Mass(es) of halos (m200 definition), in units of solar masses.\n h : float, optional\n Hubble parameter. Default is from Planck13.\n\n Returns\n ----------\n ndarray\n Concentration values (c200) for halos.\n\n References\n ----------\n Calculation from Planck-based results of simulations presented in:\n\n A.A. Dutton & A.V. Maccio, \"Cold dark matter haloes in the Planck era:\n evolution of structural parameters for Einasto and NFW profiles,\"\n Monthly Notices of the Royal Astronomical Society, Volume 441, Issue 4,\n p.3359-3374, 2014.\n \"\"\"\n\n z, m = _check_inputs(z, m)\n\n a = 0.52 + 0.385 * np.exp(-0.617 * (z**1.21)) # EQ 10\n b = -0.101 + 0.026 * z # EQ 11\n\n logc200 = a + b * np.log10(m * h / (10.**12)) # EQ 7\n\n concentration = 10.**logc200\n\n return concentration", "def sigma_nfw(self):\n \"\"\"Calculate NFW surface mass density profile.\n\n Generate the surface mass density profiles of each cluster halo,\n assuming a spherical NFW model. Optionally includes the effect of\n cluster miscentering offsets, if the parent object was initialized\n with offsets.\n\n Returns\n ----------\n Quantity\n Surface mass density profiles (ndarray, in astropy.units of\n Msun/pc/pc). Each row corresponds to a single cluster halo.\n \"\"\"\n def _centered_sigma(self):\n # perfectly centered cluster case\n\n # calculate f\n bigF = np.zeros_like(self._x)\n f = np.zeros_like(self._x)\n\n numerator_arg = ((1. / self._x[self._x_small]) +\n np.sqrt((1. / (self._x[self._x_small]**2)) - 1.))\n denominator = np.sqrt(1. - (self._x[self._x_small]**2))\n bigF[self._x_small] = np.log(numerator_arg) / denominator\n\n bigF[self._x_big] = (np.arccos(1. / self._x[self._x_big]) /\n np.sqrt(self._x[self._x_big]**2 - 1.))\n\n f = (1. - bigF) / (self._x**2 - 1.)\n f[self._x_one] = 1. / 3.\n if np.isnan(np.sum(f)) or np.isinf(np.sum(f)):\n print('\\nERROR: f is not all real\\n')\n\n # calculate & return centered profiles\n if f.ndim == 2:\n sigma = 2. * self._rs_dc_rcrit * f\n else:\n rs_dc_rcrit_4D = self._rs_dc_rcrit.T.reshape(1, 1,\n f.shape[2],\n f.shape[3])\n sigma = 2. * rs_dc_rcrit_4D * f\n\n return sigma\n\n def _offset_sigma(self):\n\n # size of \"x\" arrays to integrate over\n numRoff = self._numRoff\n numTh = self._numTh\n\n numRbins = self._nbins\n maxsig = self._sigmaoffset.value.max()\n\n # inner/outer bin edges\n roff_1D = np.linspace(0., 4. * maxsig, numRoff)\n theta_1D = np.linspace(0., 2. * np.pi, numTh)\n rMpc_1D = self._rbins.value\n\n # reshape for broadcasting: (numTh,numRoff,numRbins)\n theta = theta_1D.reshape(numTh, 1, 1)\n roff = roff_1D.reshape(1, numRoff, 1)\n rMpc = rMpc_1D.reshape(1, 1, numRbins)\n\n r_eq13 = np.sqrt(rMpc ** 2 + roff ** 2 -\n 2. * rMpc * roff * np.cos(theta))\n\n # 3D array r_eq13 -> 4D dimensionless radius (nlens)\n _set_dimensionless_radius(self, radii=r_eq13, integration=True)\n\n sigma = _centered_sigma(self)\n inner_integrand = sigma.value / (2. * np.pi)\n\n # INTEGRATE OVER theta\n sigma_of_RgivenRoff = simps(inner_integrand, x=theta_1D, axis=0,\n even='first')\n\n # theta is gone, now dimensions are: (numRoff,numRbins,nlens)\n sig_off_3D = self._sigmaoffset.value.reshape(1, 1, self._nlens)\n roff_v2 = roff_1D.reshape(numRoff, 1, 1)\n PofRoff = (roff_v2 / (sig_off_3D**2) *\n np.exp(-0.5 * (roff_v2 / sig_off_3D)**2))\n\n dbl_integrand = sigma_of_RgivenRoff * PofRoff\n\n # INTEGRATE OVER Roff\n # (integration axis=0 after theta is gone).\n sigma_smoothed = simps(dbl_integrand, x=roff_1D, axis=0,\n even='first')\n\n # reset _x to correspond to input rbins (default)\n _set_dimensionless_radius(self)\n\n sigma_sm = np.array(sigma_smoothed.T) * units.solMass / units.pc**2\n\n return sigma_sm\n\n if self._sigmaoffset is None:\n finalsigma = _centered_sigma(self)\n elif np.abs(self._sigmaoffset).sum() == 0:\n finalsigma = _centered_sigma(self)\n else:\n finalsigma = _offset_sigma(self)\n self._sigma_sm = finalsigma\n\n return finalsigma", "def deltasigma_nfw(self):\n \"\"\"Calculate NFW differential surface mass density profile.\n\n Generate the differential surface mass density profiles of each cluster\n halo, assuming a spherical NFW model. Optionally includes the effect of\n cluster miscentering offsets, if the parent object was initialized\n with offsets.\n\n Returns\n ----------\n Quantity\n Differential surface mass density profiles (ndarray, in\n astropy.units of Msun/pc/pc). Each row corresponds to a single\n cluster halo.\n \"\"\"\n def _centered_dsigma(self):\n # calculate g\n\n firstpart = np.zeros_like(self._x)\n secondpart = np.zeros_like(self._x)\n g = np.zeros_like(self._x)\n\n small_1a = 4. / self._x[self._x_small]**2\n small_1b = 2. / (self._x[self._x_small]**2 - 1.)\n small_1c = np.sqrt(1. - self._x[self._x_small]**2)\n firstpart[self._x_small] = (small_1a + small_1b) / small_1c\n\n big_1a = 8. / (self._x[self._x_big]**2 *\n np.sqrt(self._x[self._x_big]**2 - 1.))\n big_1b = 4. / ((self._x[self._x_big]**2 - 1.)**1.5)\n firstpart[self._x_big] = big_1a + big_1b\n\n small_2a = np.sqrt((1. - self._x[self._x_small]) /\n (1. + self._x[self._x_small]))\n secondpart[self._x_small] = np.log((1. + small_2a) /\n (1. - small_2a))\n\n big_2a = self._x[self._x_big] - 1.\n big_2b = 1. + self._x[self._x_big]\n secondpart[self._x_big] = np.arctan(np.sqrt(big_2a / big_2b))\n\n both_3a = (4. / (self._x**2)) * np.log(self._x / 2.)\n both_3b = 2. / (self._x**2 - 1.)\n g = firstpart * secondpart + both_3a - both_3b\n\n g[self._x_one] = (10. / 3.) + 4. * np.log(0.5)\n\n if np.isnan(np.sum(g)) or np.isinf(np.sum(g)):\n print('\\nERROR: g is not all real\\n', g)\n\n # calculate & return centered profile\n deltasigma = self._rs_dc_rcrit * g\n\n return deltasigma\n\n def _offset_dsigma(self):\n original_rbins = self._rbins.value\n\n # if offset sigma was already calculated, use it!\n try:\n sigma_sm_rbins = self._sigma_sm\n except AttributeError:\n sigma_sm_rbins = self.sigma_nfw()\n\n innermost_sampling = 1.e-10 # stable for anything below 1e-5\n inner_prec = self._numRinner\n r_inner = np.linspace(innermost_sampling,\n original_rbins.min(),\n endpoint=False, num=inner_prec)\n outer_prec = self._factorRouter * self._nbins\n r_outer = np.linspace(original_rbins.min(),\n original_rbins.max(),\n endpoint=False, num=outer_prec + 1)[1:]\n r_ext_unordered = np.hstack([r_inner, r_outer, original_rbins])\n r_extended = np.sort(r_ext_unordered)\n\n # set temporary extended rbins, nbins, x, rs_dc_rcrit array\n self._rbins = r_extended * units.Mpc\n self._nbins = self._rbins.shape[0]\n _set_dimensionless_radius(self) # uses _rbins, _nlens\n rs_dc_rcrit = self._rs * self._delta_c * self._rho_crit\n self._rs_dc_rcrit = rs_dc_rcrit.reshape(self._nlens,\n 1).repeat(self._nbins, 1)\n\n sigma_sm_extended = self.sigma_nfw()\n mean_inside_sigma_sm = np.zeros([self._nlens,\n original_rbins.shape[0]])\n\n for i, r in enumerate(original_rbins):\n index_of_rbin = np.where(r_extended == r)[0][0]\n x = r_extended[0:index_of_rbin + 1]\n y = sigma_sm_extended[:, 0:index_of_rbin + 1] * x\n\n integral = simps(y, x=x, axis=-1, even='first')\n\n # average of sigma_sm at r < rbin\n mean_inside_sigma_sm[:, i] = (2. / r**2) * integral\n\n mean_inside_sigma_sm = mean_inside_sigma_sm * (units.Msun /\n units.pc**2)\n\n # reset original rbins, nbins, x\n self._rbins = original_rbins * units.Mpc\n self._nbins = self._rbins.shape[0]\n _set_dimensionless_radius(self)\n rs_dc_rcrit = self._rs * self._delta_c * self._rho_crit\n self._rs_dc_rcrit = rs_dc_rcrit.reshape(self._nlens,\n 1).repeat(self._nbins, 1)\n self._sigma_sm = sigma_sm_rbins # reset to original sigma_sm\n\n dsigma_sm = mean_inside_sigma_sm - sigma_sm_rbins\n\n return dsigma_sm\n\n if self._sigmaoffset is None:\n finaldeltasigma = _centered_dsigma(self)\n elif np.abs(self._sigmaoffset).sum() == 0:\n finaldeltasigma = _centered_dsigma(self)\n else:\n finaldeltasigma = _offset_dsigma(self)\n\n return finaldeltasigma", "def nfwAlpha(self, R, Rs, rho0, r_trunc, ax_x, ax_y):\n \"\"\"\n deflection angel of NFW profile along the projection to coordinate axis\n\n :param R: radius of interest\n :type R: float/numpy array\n :param Rs: scale radius\n :type Rs: float\n :param rho0: density normalization (characteristic density)\n :type rho0: float\n :param r200: radius of (sub)halo\n :type r200: float>0\n :param axis: projection to either x- or y-axis\n :type axis: same as R\n :return: Epsilon(R) projected density at radius R\n \"\"\"\n if isinstance(R, int) or isinstance(R, float):\n R = max(R, 0.00001)\n else:\n R[R <= 0.00001] = 0.00001\n\n x = R / Rs\n tau = float(r_trunc) / Rs\n gx = self._g(x, tau)\n a = 4 * rho0 * Rs * gx / x ** 2\n return a * ax_x, a * ax_y", "def cnfwAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y):\n \"\"\"\n deflection angel of NFW profile along the projection to coordinate axis\n\n :param R: radius of interest\n :type R: float/numpy array\n :param Rs: scale radius\n :type Rs: float\n :param rho0: density normalization (characteristic density)\n :type rho0: float\n :param r200: radius of (sub)halo\n :type r200: float>0\n :param axis: projection to either x- or y-axis\n :type axis: same as R\n :return: Epsilon(R) projected density at radius R\n \"\"\"\n if isinstance(R, int) or isinstance(R, float):\n R = max(R, 0.00001)\n else:\n R[R <= 0.00001] = 0.00001\n\n x = R / Rs\n b = r_core * Rs ** -1\n b = max(b, 0.000001)\n gx = self._G(x, b)\n\n a = 4*rho0*Rs*gx/x**2\n return a * ax_x, a * ax_y", "def c_M_z(self, M, z):\n \"\"\"\n fitting function of http://moriond.in2p3.fr/J08/proceedings/duffy.pdf for the mass and redshift dependence of the concentration parameter\n\n :param M: halo mass in M_sun/h\n :type M: float or numpy array\n :param z: redshift\n :type z: float >0\n :return: concentration parameter as float\n \"\"\"\n # fitted parameter values\n A = 5.22\n B = -0.072\n C = -0.42\n M_pivot = 2.*10**12\n return A*(M/M_pivot)**B*(1+z)**C", "def _nfw_func(self, x):\n \"\"\"\n Classic NFW function in terms of arctanh and arctan\n :param x: r/Rs\n :return:\n \"\"\"\n\n c = 0.000001\n\n if isinstance(x, np.ndarray):\n x[np.where(x<c)] = c\n nfwvals = np.ones_like(x)\n inds1 = np.where(x < 1)\n inds2 = np.where(x > 1)\n\n nfwvals[inds1] = (1 - x[inds1] ** 2) ** -.5 * np.arctanh((1 - x[inds1] ** 2) ** .5)\n nfwvals[inds2] = (x[inds2] ** 2 - 1) ** -.5 * np.arctan((x[inds2] ** 2 - 1) ** .5)\n\n return nfwvals\n\n elif isinstance(x, float) or isinstance(x, int):\n x = max(x, c)\n if x == 1:\n return 1\n if x < 1:\n return (1 - x ** 2) ** -.5 * np.arctanh((1 - x ** 2) ** .5)\n else:\n return (x ** 2 - 1) ** -.5 * np.arctan((x ** 2 - 1) ** .5)", "def calc_nfw(self, rbins, offsets=None, numTh=200, numRoff=200,\n numRinner=20, factorRouter=3):\n \"\"\"Calculates Sigma and DeltaSigma profiles.\n\n Generates the surface mass density (sigma_nfw attribute of parent\n object) and differential surface mass density (deltasigma_nfw\n attribute of parent object) profiles of each cluster, assuming a\n spherical NFW model. Optionally includes the effect of cluster\n miscentering offsets.\n\n Parameters\n ----------\n rbins : array_like\n Radial bins (in Mpc) for calculating cluster profiles. Should\n be 1D, optionally with astropy.units of Mpc.\n offsets : array_like, optional\n Parameter describing the width (in Mpc) of the Gaussian\n distribution of miscentering offsets. Should be 1D, optionally\n with astropy.units of Mpc.\n\n Other Parameters\n -------------------\n numTh : int, optional\n Parameter to pass to SurfaceMassDensity(). Number of bins to\n use for integration over theta, for calculating offset profiles\n (no effect for offsets=None). Default 200.\n numRoff : int, optional\n Parameter to pass to SurfaceMassDensity(). Number of bins to\n use for integration over R_off, for calculating offset profiles\n (no effect for offsets=None). Default 200.\n numRinner : int, optional\n Parameter to pass to SurfaceMassDensity(). Number of bins at\n r < min(rbins) to use for integration over Sigma(<r), for\n calculating DeltaSigma (no effect for Sigma ever, and no effect\n for DeltaSigma if offsets=None). Default 20.\n factorRouter : int, optional\n Parameter to pass to SurfaceMassDensity(). Factor increase over\n number of rbins, at min(r) < r < max(r), of bins that will be\n used at for integration over Sigma(<r), for calculating\n DeltaSigma (no effect for Sigma, and no effect for DeltaSigma\n if offsets=None). Default 3.\n \"\"\"\n if offsets is None:\n self._sigoffset = np.zeros(self.number) * units.Mpc\n else:\n self._sigoffset = utils.check_units_and_type(offsets, units.Mpc,\n num=self.number)\n\n self.rbins = utils.check_units_and_type(rbins, units.Mpc)\n\n rhoc = self._rho_crit.to(units.Msun / units.pc**2 / units.Mpc)\n smd = SurfaceMassDensity(self.rs, self.delta_c, rhoc,\n offsets=self._sigoffset,\n rbins=self.rbins,\n numTh=numTh,\n numRoff=numRoff,\n numRinner=numRinner,\n factorRouter=factorRouter)\n\n self.sigma_nfw = smd.sigma_nfw()\n self.deltasigma_nfw = smd.deltasigma_nfw()", "def Sun_Duffey_Peng(x, rhol, rhog, sigma, m, D, P, Pc, g=g):\n r'''Calculates void fraction in two-phase flow according to the model of \n [1]_ as given in [2]_ and [3]_.\n \n .. math::\n \\alpha = \\frac{x}{\\rho_g}\\left[C_0\\left(\\frac{x}{\\rho_g} + \\frac{1-x}\n {\\rho_l}\\right) +\\frac{v_{gm}}{G} \\right]^{-1}\n \n .. math::\n v_{gm} = 1.41\\left[\\frac{g\\sigma(\\rho_l-\\rho_g)}{\\rho_l^2}\\right]^{0.25}\n \n .. math::\n C_0 = \\left(0.82 + 0.18\\frac{P}{P_c}\\right)^{-1}\n \n Parameters\n ----------\n x : float\n Quality at the specific tube interval []\n rhol : float\n Density of the liquid [kg/m^3]\n rhog : float\n Density of the gas [kg/m^3]\n sigma : float\n Surface tension of liquid [N/m]\n m : float\n Mass flow rate of both phases, [kg/s]\n D : float\n Diameter of the channel, [m]\n P : float\n Pressure of the fluid, [Pa]\n Pc : float\n Critical pressure of the fluid, [Pa]\n g : float, optional\n Acceleration due to gravity, [m/s^2]\n\n Returns\n -------\n alpha : float\n Void fraction (area of gas / total area of channel), [-]\n\n Notes\n -----\n \n Examples\n --------\n >>> Sun_Duffey_Peng(0.4, 800., 2.5, sigma=0.02, m=1, D=0.3, P=1E5, Pc=7E6)\n 0.7696546506515833\n\n References\n ----------\n .. [1] K.H. Sun, R.B. Duffey, C.M. Peng, A thermal-hydraulic analysis of\n core uncover, in: Proceedings of the 19th National Heat Transfer \n Conference, Experimental and Analytical Modeling of LWR Safety \n Experiments, 1980, pp. 1-10. Orlando, Florida, USA.\n .. [2] Xu, Yu, and Xiande Fang. \"Correlations of Void Fraction for Two-\n Phase Refrigerant Flow in Pipes.\" Applied Thermal Engineering 64, no. \n 1-2 (March 2014): 242–51. doi:10.1016/j.applthermaleng.2013.12.032. \n .. [3] Woldesemayat, Melkamu A., and Afshin J. Ghajar. \"Comparison of Void \n Fraction Correlations for Different Flow Patterns in Horizontal and \n Upward Inclined Pipes.\" International Journal of Multiphase Flow 33, \n no. 4 (April 2007): 347-370. doi:10.1016/j.ijmultiphaseflow.2006.09.004.\n '''\n G = m/(pi/4*D**2)\n C0 = (0.82 + 0.18*P/Pc)**-1\n vgm = 1.41*(g*sigma*(rhol-rhog)/rhol**2)**0.25\n return x/rhog*(C0*(x/rhog + (1-x)/rhol) + vgm/G)**-1", "def nfwParam_physical(self, M, c):\n \"\"\"\n returns the NFW parameters in physical units\n :param M: physical mass in M_sun\n :param c: concentration\n :return:\n \"\"\"\n r200 = self.nfw_param.r200_M(M * self.h) / self.h * self.a_z(self.z_lens) # physical radius r200\n rho0 = self.nfw_param.rho0_c(c) * self.h**2 / self.a_z(self.z_lens)**3 # physical density in M_sun/Mpc**3\n Rs = r200/c\n return rho0, Rs, r200", "private static double getDTHalogenF(double[] resultsH) {\n double result = 0.0;\n double SE = resultsH[0];\n double PSC = resultsH[2];\n double PIC = resultsH[3];\n double ETP = resultsH[4];\n double SPC = resultsH[5];\n double COUNTR = resultsH[6];\n double COUNTAr = resultsH[7];\n\n //\t\tSystem.out.println(\"SE : \"+SE+\", PE : \"+PE+\", PSC : \"+PSC+\", PIC : \"+PIC+\", ETP : \"+ETP+\", SPC : \"+SPC+\", COUNTR : \"+COUNTR+\", COUNTAr : \"+COUNTAr);\n //model leastMedSq\n result = 0.272 * SE + 13.5814 * PSC + -4.4765 * PIC + -0.4937 * ETP + 0.0095 * SPC + -0.3706 * COUNTR + 0.5172\n * COUNTAr + 12.4183;\n return result;\n }" ]
[ 0.7682119011878967, 0.7114080190658569, 0.690197765827179, 0.6887338757514954, 0.6878402829170227, 0.6870186924934387, 0.6866941452026367, 0.6808981895446777, 0.6693978309631348, 0.6669307351112366, 0.6632845997810364, 0.6630220413208008 ]
Perturb best-fit constant of proportionality Ascaling for rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c) Parameters ---------- cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- float The perturbed 'A' relation between rho_2 and rho_crit for the cosmology Raises ------
def _delta_sigma(**cosmo): """ Perturb best-fit constant of proportionality Ascaling for rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c) Parameters ---------- cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- float The perturbed 'A' relation between rho_2 and rho_crit for the cosmology Raises ------ """ M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo) perturbed_A = (0.796/cosmo['sigma_8']) * \ (M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6) return(perturbed_A)
[ "def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)", "def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,\n Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):\n \"\"\" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)\n for 1 unknown, i.e. concentration, returned by a minimisation call \"\"\"\n\n # Fn 1 (LHS of Eqn 18)\n\n Y1 = np.log(2) - 0.5\n Yc = np.log(1+c) - c/(1+c)\n f1 = Y1/Yc\n\n # Fn 2 (RHS of Eqn 18)\n\n # Eqn 14 - Define the mean inner density\n rho_2 = 200 * c**3 * Y1 / Yc\n\n # Eqn 17 rearranged to solve for Formation Redshift\n # essentially when universe had rho_2 density\n zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *\n (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1\n\n # RHS of Eqn 19\n f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)\n\n # LHS - RHS should be zero for the correct concentration\n return(f1-f2)", "def getAscaling(cosmology, newcosmo=None):\n \"\"\" Returns the normalisation constant between\n Rho_-2 and Rho_mean(z_formation) for a given cosmology\n\n Parameters\n ----------\n cosmology : str or dict\n Can be named cosmology, default WMAP7 (aka DRAGONS), or\n DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15\n or dictionary similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n newcosmo : str, optional\n If cosmology is not from predefined list have to perturbation\n A_scaling variable. Defaults to None.\n\n Returns\n -------\n float\n The scaled 'A' relation between rho_2 and rho_crit for the cosmology\n\n \"\"\"\n # Values from Correa 15c\n defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850,\n 'wmap5': 887, 'wmap7': 887, 'wmap9': 950,\n 'wmap1_lss': 853, 'wmap3_mean': 850,\n 'wmap5_ml': 887, 'wmap5_lss': 887,\n 'wmap7_lss': 887,\n 'planck13': 880, 'planck15': 880}\n\n if newcosmo:\n # Scale from default WMAP5 cosmology using Correa et al 14b eqn C1\n A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology)\n else:\n if cosmology.lower() in defaultcosmologies.keys():\n A_scaling = defaultcosmologies[cosmology.lower()]\n else:\n print(\"Error, don't recognise your cosmology for A_scaling \")\n print(\"You provided %s\" % (cosmology))\n\n return(A_scaling)", "def Planck_2015(flat=False, extras=True):\n \"\"\"Planck 2015 XII: Cosmological parameters Table 4\n column Planck TT, TE, EE + lowP + lensing + ext\n from Ade et al. (2015) A&A in press (arxiv:1502.01589v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_b_0 = 0.02230/(0.6774**2)\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': 0.3089,\n 'omega_lambda_0': 0.6911,\n 'h': 0.6774,\n 'n': 0.9667,\n 'sigma_8': 0.8159,\n 'tau': 0.066,\n 'z_reion': 8.8,\n 't_0': 13.799,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo", "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def oortC(self,R,romberg=False,nsigma=None,phi=0.):\n \"\"\"\n NAME:\n\n oortC\n\n PURPOSE:\n\n calculate the Oort function C\n\n INPUT:\n\n R - radius at which to calculate C (can be Quantity)\n\n OPTIONAL INPUT:\n\n nsigma - number of sigma to integrate the velocities over\n\n KEYWORDS:\n\n romberg - if True, use a romberg integrator (default: False)\n\n OUTPUT:\n\n Oort C at R\n\n HISTORY:\n\n 2011-04-19 - Written - Bovy (NYU)\n\n BUGS:\n\n could be made more efficient, e.g., surfacemass is calculated multiple times\n we know this is zero, but it is calculated anyway (bug or feature?)\n\n \"\"\"\n #2C= -meanvR/R-dmeanvphi/R/dphi+dmeanvR/dR\n meanvr= self.meanvR(R,romberg=romberg,nsigma=nsigma,phi=phi,\n use_physical=False)\n dmeanvphiRdphi= 0. #We know this, since the DF does not depend on phi\n surfmass= self._vmomentsurfacemass(R,0,0,phi=phi,romberg=romberg,nsigma=nsigma)\n dmeanvRdR= self._vmomentsurfacemass(R,1,0,deriv='R',phi=phi,romberg=romberg,nsigma=nsigma)/\\\n surfmass #other terms is zero because f is even in vR\n return 0.5*(-meanvr/R-dmeanvphiRdphi/R+dmeanvRdR)", "def Get_rhos(dur, **kwargs):\n '''\n Returns the value of the stellar density for a given transit\n duration :py:obj:`dur`, given\n the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.\n\n '''\n if ps is None:\n raise Exception(\"Unable to import `pysyzygy`.\")\n\n assert dur >= 0.01 and dur <= 0.5, \"Invalid value for the duration.\"\n\n def Dur(rhos, **kwargs):\n t0 = kwargs.get('t0', 0.)\n time = np.linspace(t0 - 0.5, t0 + 0.5, 1000)\n try:\n t = time[np.where(ps.Transit(rhos=rhos, **kwargs)(time) < 1)]\n except:\n return 0.\n return t[-1] - t[0]\n\n def DiffSq(rhos):\n return (dur - Dur(rhos, **kwargs)) ** 2\n\n return fmin(DiffSq, [0.2], disp=False)", "def Pgas(rho,T,mu):\n ''' \n P = R/mu * rho * T\n\n Parameters\n ----------\n mu : float\n Mean molecular weight\n rho : float\n Density [cgs]\n T : float\n Temperature [K]\n\n '''\n R = old_div(boltzmann_constant, atomic_mass_unit)\n return (old_div(R,mu)) * rho * T", "def DRAGONS(flat=False, extras=True):\n \"\"\"DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from\n Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.2292\n omega_b_0 = 0.0458\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.725,\n 'h': 0.702,\n 'n': 0.963,\n 'sigma_8': 0.816,\n 'tau': 0.088,\n 'z_reion': 10.6,\n 't_0': 13.76,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo", "def ccor2(alt, r, h1, zh, h2):\n '''\n/* CHEMISTRY/DISSOCIATION CORRECTION FOR MSIS MODELS\n * ALT - altitude\n * R - target ratio\n * H1 - transition scale length\n * ZH - altitude of 1/2 R\n * H2 - transition scale length #2 ?\n */\n '''\n e1 = (alt - zh) / h1;\n e2 = (alt - zh) / h2;\n if ((e1 > 70.0) or (e2 > 70)): # pragma: no cover\n return 1.0 # exp(0)\n if ((e1 < -70) and (e2 < -70)): # pragma: no cover\n return exp(r)\n ex1 = exp(e1);\n ex2 = exp(e2);\n ccor2v = r / (1.0 + 0.5 * (ex1 + ex2));\n return exp(ccor2v);", "def rho0_c(self, c):\n \"\"\"\n computes density normalization as a function of concentration parameter\n :return: density normalization in h^2/Mpc^3 (comoving)\n \"\"\"\n return 200./3*self.rhoc*c**3/(np.log(1.+c)-c/(1.+c))", "def _update_rho(self, k, r, s):\n \"\"\"\n Patched version of :func:`sporco.admm.admm.ADMM.update_rho`.\"\"\"\n\n if self.opt['AutoRho', 'Enabled']:\n tau = self.rho_tau\n mu = self.rho_mu\n xi = self.rho_xi\n if k != 0 and cp.mod(k + 1, self.opt['AutoRho', 'Period']) == 0:\n if self.opt['AutoRho', 'AutoScaling']:\n if s == 0.0 or r == 0.0:\n rhomlt = tau\n else:\n rhomlt = cp.sqrt(r / (s * xi) if r > s * xi\n else (s * xi) / r)\n if rhomlt > tau:\n rhomlt = tau\n else:\n rhomlt = tau\n rsf = 1.0\n if r > xi * mu * s:\n rsf = rhomlt\n elif s > (mu / xi) * r:\n rsf = 1.0 / rhomlt\n self.rho *= float(rsf)\n self.U /= rsf\n if rsf != 1.0:\n self.rhochange()" ]
[ 0.7133972644805908, 0.6964385509490967, 0.686434805393219, 0.677892804145813, 0.674342930316925, 0.673985481262207, 0.6658235192298889, 0.6623619198799133, 0.6607682704925537, 0.6578128933906555, 0.6576458215713501, 0.6519357562065125 ]
Returns the normalisation constant between Rho_-2 and Rho_mean(z_formation) for a given cosmology Parameters ---------- cosmology : str or dict Can be named cosmology, default WMAP7 (aka DRAGONS), or DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15 or dictionary similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} newcosmo : str, optional If cosmology is not from predefined list have to perturbation A_scaling variable. Defaults to None. Returns ------- float The scaled 'A' relation between rho_2 and rho_crit for the cosmology
def getAscaling(cosmology, newcosmo=None): """ Returns the normalisation constant between Rho_-2 and Rho_mean(z_formation) for a given cosmology Parameters ---------- cosmology : str or dict Can be named cosmology, default WMAP7 (aka DRAGONS), or DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15 or dictionary similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} newcosmo : str, optional If cosmology is not from predefined list have to perturbation A_scaling variable. Defaults to None. Returns ------- float The scaled 'A' relation between rho_2 and rho_crit for the cosmology """ # Values from Correa 15c defaultcosmologies = {'dragons': 887, 'wmap1': 853, 'wmap3': 850, 'wmap5': 887, 'wmap7': 887, 'wmap9': 950, 'wmap1_lss': 853, 'wmap3_mean': 850, 'wmap5_ml': 887, 'wmap5_lss': 887, 'wmap7_lss': 887, 'planck13': 880, 'planck15': 880} if newcosmo: # Scale from default WMAP5 cosmology using Correa et al 14b eqn C1 A_scaling = defaultcosmologies['wmap5'] * _delta_sigma(**cosmology) else: if cosmology.lower() in defaultcosmologies.keys(): A_scaling = defaultcosmologies[cosmology.lower()] else: print("Error, don't recognise your cosmology for A_scaling ") print("You provided %s" % (cosmology)) return(A_scaling)
[ "def _delta_sigma(**cosmo):\n \"\"\" Perturb best-fit constant of proportionality Ascaling for\n rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)\n\n Parameters\n ----------\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float\n The perturbed 'A' relation between rho_2 and rho_crit for the cosmology\n\n Raises\n ------\n\n \"\"\"\n\n M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)\n perturbed_A = (0.796/cosmo['sigma_8']) * \\\n (M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)\n return(perturbed_A)", "def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)", "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,\n Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):\n \"\"\" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)\n for 1 unknown, i.e. concentration, returned by a minimisation call \"\"\"\n\n # Fn 1 (LHS of Eqn 18)\n\n Y1 = np.log(2) - 0.5\n Yc = np.log(1+c) - c/(1+c)\n f1 = Y1/Yc\n\n # Fn 2 (RHS of Eqn 18)\n\n # Eqn 14 - Define the mean inner density\n rho_2 = 200 * c**3 * Y1 / Yc\n\n # Eqn 17 rearranged to solve for Formation Redshift\n # essentially when universe had rho_2 density\n zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *\n (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1\n\n # RHS of Eqn 19\n f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)\n\n # LHS - RHS should be zero for the correct concentration\n return(f1-f2)", "def DRAGONS(flat=False, extras=True):\n \"\"\"DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from\n Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.2292\n omega_b_0 = 0.0458\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.725,\n 'h': 0.702,\n 'n': 0.963,\n 'sigma_8': 0.816,\n 'tau': 0.088,\n 'z_reion': 10.6,\n 't_0': 13.76,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo", "def _getcosmoheader(cosmo):\n \"\"\" Output the cosmology to a string for writing to file \"\"\"\n\n cosmoheader = (\"# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, \"\n \"sigma8:{3:.3f}, ns:{4:.2f}\".format(\n cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],\n cosmo['sigma_8'], cosmo['n']))\n\n return(cosmoheader)", "def ccor2(alt, r, h1, zh, h2):\n '''\n/* CHEMISTRY/DISSOCIATION CORRECTION FOR MSIS MODELS\n * ALT - altitude\n * R - target ratio\n * H1 - transition scale length\n * ZH - altitude of 1/2 R\n * H2 - transition scale length #2 ?\n */\n '''\n e1 = (alt - zh) / h1;\n e2 = (alt - zh) / h2;\n if ((e1 > 70.0) or (e2 > 70)): # pragma: no cover\n return 1.0 # exp(0)\n if ((e1 < -70) and (e2 < -70)): # pragma: no cover\n return exp(r)\n ex1 = exp(e1);\n ex2 = exp(e2);\n ccor2v = r / (1.0 + 0.5 * (ex1 + ex2));\n return exp(ccor2v);", "def oortC(self,R,romberg=False,nsigma=None,phi=0.):\n \"\"\"\n NAME:\n\n oortC\n\n PURPOSE:\n\n calculate the Oort function C\n\n INPUT:\n\n R - radius at which to calculate C (can be Quantity)\n\n OPTIONAL INPUT:\n\n nsigma - number of sigma to integrate the velocities over\n\n KEYWORDS:\n\n romberg - if True, use a romberg integrator (default: False)\n\n OUTPUT:\n\n Oort C at R\n\n HISTORY:\n\n 2011-04-19 - Written - Bovy (NYU)\n\n BUGS:\n\n could be made more efficient, e.g., surfacemass is calculated multiple times\n we know this is zero, but it is calculated anyway (bug or feature?)\n\n \"\"\"\n #2C= -meanvR/R-dmeanvphi/R/dphi+dmeanvR/dR\n meanvr= self.meanvR(R,romberg=romberg,nsigma=nsigma,phi=phi,\n use_physical=False)\n dmeanvphiRdphi= 0. #We know this, since the DF does not depend on phi\n surfmass= self._vmomentsurfacemass(R,0,0,phi=phi,romberg=romberg,nsigma=nsigma)\n dmeanvRdR= self._vmomentsurfacemass(R,1,0,deriv='R',phi=phi,romberg=romberg,nsigma=nsigma)/\\\n surfmass #other terms is zero because f is even in vR\n return 0.5*(-meanvr/R-dmeanvphiRdphi/R+dmeanvRdR)", "def nu_mu_converter(rho, mu=None, nu=None):\n r'''Calculates either kinematic or dynamic viscosity, depending on inputs.\n Used when one type of viscosity is known as well as density, to obtain\n the other type. Raises an error if both types of viscosity or neither type\n of viscosity is provided.\n\n .. math::\n \\nu = \\frac{\\mu}{\\rho}\n\n .. math::\n \\mu = \\nu\\rho\n\n Parameters\n ----------\n rho : float\n Density, [kg/m^3]\n mu : float, optional\n Dynamic viscosity, [Pa*s]\n nu : float, optional\n Kinematic viscosity, [m^2/s]\n\n Returns\n -------\n mu or nu : float\n Dynamic viscosity, Pa*s or Kinematic viscosity, m^2/s\n\n Examples\n --------\n >>> nu_mu_converter(998., nu=1.0E-6)\n 0.000998\n\n References\n ----------\n .. [1] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and\n Applications. Boston: McGraw Hill Higher Education, 2006.\n '''\n if (nu and mu) or not rho or (not nu and not mu):\n raise Exception('Inputs must be rho and one of mu and nu.')\n if mu:\n return mu/rho\n elif nu:\n return nu*rho", "def Rz_to_coshucosv(R,z,delta=1.,oblate=False):\n \"\"\"\n NAME:\n\n Rz_to_coshucosv\n\n PURPOSE:\n\n calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta\n\n INPUT:\n\n R - radius\n\n z - height\n\n delta= focus\n\n oblate= (False) if True, compute oblate confocal coordinates instead of prolate\n OUTPUT:\n\n (cosh(u),cos(v))\n\n HISTORY:\n\n 2012-11-27 - Written - Bovy (IAS)\n\n 2017-10-11 - Added oblate coordinates - Bovy (UofT)\n\n \"\"\"\n if oblate:\n d12= (R+delta)**2.+z**2.\n d22= (R-delta)**2.+z**2.\n else:\n d12= (z+delta)**2.+R**2.\n d22= (z-delta)**2.+R**2.\n coshu= 0.5/delta*(sc.sqrt(d12)+sc.sqrt(d22))\n cosv= 0.5/delta*(sc.sqrt(d12)-sc.sqrt(d22))\n if oblate: # cosv is currently really sinv\n cosv= sc.sqrt(1.-cosv**2.)\n return (coshu,cosv)", "def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,\n **kwargs):\n r\"\"\"Returns the value of a cosmological quantity (e.g., age) at a redshift.\n\n Parameters\n ----------\n z : float\n The redshift.\n quantity : str\n The name of the quantity to get. The name may be any attribute of\n :py:class:`astropy.cosmology.FlatLambdaCDM`.\n strip_unit : bool, optional\n Just return the value of the quantity, sans units. Default is True.\n \\**kwargs :\n All other keyword args are passed to :py:func:`get_cosmology` to\n select a cosmology. If none provided, will use\n :py:attr:`DEFAULT_COSMOLOGY`.\n\n Returns\n -------\n float or astropy.units.quantity :\n The value of the quantity at the requested value. If ``strip_unit`` is\n ``True``, will return the value. Otherwise, will return the value with\n units.\n \"\"\"\n cosmology = get_cosmology(**kwargs)\n val = getattr(cosmology, quantity)(z)\n if strip_unit:\n val = val.value\n return val", "def rho0_c(self, c):\n \"\"\"\n computes density normalization as a function of concentration parameter\n :return: density normalization in h^2/Mpc^3 (comoving)\n \"\"\"\n return 200./3*self.rhoc*c**3/(np.log(1.+c)-c/(1.+c))" ]
[ 0.7695544958114624, 0.7592920660972595, 0.695005476474762, 0.6937953233718872, 0.6598114967346191, 0.6570345759391785, 0.6557053923606873, 0.6541890501976013, 0.6424201726913452, 0.6384008526802063, 0.6375933885574341, 0.6364959478378296 ]
Returns integral of the linear growth factor from z=200 to z=z
def _int_growth(z, **cosmo): """ Returns integral of the linear growth factor from z=200 to z=z """ zmax = 200 if hasattr(z, "__len__"): for zval in z: assert(zval < zmax) else: assert(z < zmax) y, yerr = scipy.integrate.quad( lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(1.5), z, zmax) return(y)
[ "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def _JzIntegrand(z,Ez,pot):\n \"\"\"The J_z integrand\"\"\"\n return nu.sqrt(2.*(Ez-potentialVertical(z,pot)))", "def _g(self, z):\n \"\"\"Helper function to solve Frank copula.\n\n This functions encapsulates :math:`g_z = e^{-\\\\theta z} - 1` used on Frank copulas.\n\n Argument:\n z: np.ndarray\n\n Returns:\n np.ndarray\n \"\"\"\n return np.exp(np.multiply(-self.theta, z)) - 1", "def get_total_spatial_integral(self, z=None): \n \"\"\"\n Returns the total integral (for 2D functions) or the integral over the spatial components (for 3D functions).\n needs to be implemented in subclasses.\n\n :return: an array of values of the integral (same dimension as z).\n \"\"\"\n\n dL= self.l_max.value-self.l_min.value if self.l_max.value > self.l_min.value else 360 + self.l_max.value - self.l_max.value\n\n #integral -inf to inf exp(-b**2 / 2*sigma_b**2 ) db = sqrt(2pi)*sigma_b \n #Note that K refers to the peak diffuse flux (at b = 0) per square degree.\n integral = np.sqrt( 2*np.pi ) * self.sigma_b.value * self.K.value * dL \n\n if isinstance( z, u.Quantity):\n z = z.value\n return integral * np.power( 180. / np.pi, -2 ) * np.ones_like( z )", "def _forceInt(x,y,z,dens,b2,c2,i,glx=None,glw=None):\n \"\"\"Integral that gives the force in x,y,z\"\"\"\n def integrand(s):\n t= 1/s**2.-1.\n return dens(numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t)))\\\n *(x/(1.+t)*(i==0)+y/(b2+t)*(i==1)+z/(c2+t)*(i==2))\\\n /numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.))\n if glx is None:\n return integrate.quad(integrand,0.,1.)[0] \n else:\n return numpy.sum(glw*integrand(glx))", "public static double gaussCdf(double z) {\n // input = z-value (-inf to +inf)\n // output = p under Normal curve from -inf to z\n // e.g., if z = 0.0, function returns 0.5000\n // ACM Algorithm #209\n double y; // 209 scratch variable\n double p; // result. called ‘z’ in 209\n double w; // 209 scratch variable\n\n if (z == 0.0) {\n p = 0.0;\n }\n else {\n y = Math.abs(z) / 2.0;\n if (y >= 3.0) {\n p = 1.0;\n }\n else if (y < 1.0) {\n w = y * y;\n p = ((((((((0.000124818987 * w\n - 0.001075204047) * w + 0.005198775019) * w\n - 0.019198292004) * w + 0.059054035642) * w\n - 0.151968751364) * w + 0.319152932694) * w\n - 0.531923007300) * w + 0.797884560593) * y * 2.0;\n }\n else {\n y = y - 2.0;\n p = (((((((((((((-0.000045255659 * y\n + 0.000152529290) * y - 0.000019538132) * y\n - 0.000676904986) * y + 0.001390604284) * y\n - 0.000794620820) * y - 0.002034254874) * y\n + 0.006549791214) * y - 0.010557625006) * y\n + 0.011630447319) * y - 0.009279453341) * y\n + 0.005353579108) * y - 0.002141268741) * y\n + 0.000535310849) * y + 0.999936657524;\n }\n }\n\n if (z > 0.0) {\n return (p + 1.0) / 2.0;\n }\n \n return (1.0 - p) / 2.0;\n }", "public static double GaussCdf(double z)\n {\n // input = z-value (-inf to +inf)\n // output = p under Normal curve from -inf to z\n // e.g., if z = 0.0, function returns 0.5000\n // ACM Algorithm #209\n double y; // 209 scratch variable\n double p; // result. called ‘z’ in 209\n double w; // 209 scratch variable\n\n if (z == 0.0)\n {\n p = 0.0;\n }\n else\n {\n y = Math.abs(z) / 2.0;\n if (y >= 3.0)\n {\n p = 1.0;\n }\n else if (y < 1.0)\n {\n w = y * y;\n p = ((((((((0.000124818987 * w\n - 0.001075204047) * w + 0.005198775019) * w\n - 0.019198292004) * w + 0.059054035642) * w\n - 0.151968751364) * w + 0.319152932694) * w\n - 0.531923007300) * w + 0.797884560593) * y * 2.0;\n }\n else\n {\n y = y - 2.0;\n p = (((((((((((((-0.000045255659 * y\n + 0.000152529290) * y - 0.000019538132) * y\n - 0.000676904986) * y + 0.001390604284) * y\n - 0.000794620820) * y - 0.002034254874) * y\n + 0.006549791214) * y - 0.010557625006) * y\n + 0.011630447319) * y - 0.009279453341) * y\n + 0.005353579108) * y - 0.002141268741) * y\n + 0.000535310849) * y + 0.999936657524;\n }\n }\n\n if (z > 0.0)\n {\n return (p + 1.0) / 2.0;\n }\n\n return (1.0 - p) / 2.0;\n }", "def _potInt(x,y,z,psi,b2,c2,glx=None,glw=None):\n \"\"\"int_0^\\infty [psi(m)-psi(\\infy)]/sqrt([1+tau]x[b^2+tau]x[c^2+tau])dtau\"\"\"\n def integrand(s):\n t= 1/s**2.-1.\n return psi(numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t)))\\\n /numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.))\n if glx is None:\n return integrate.quad(integrand,0.,1.)[0] \n else:\n return numpy.sum(glw*integrand(glx))", "def _forceInt(x,y,z,a2,b2,c2,n,i):\n \"\"\"Integral involved in the force at (x,y,z)\n integrates 1/A B^n (x_i/(tau+a_i)) where\n A = sqrt((tau+a)(tau+b)(tau+c)) and B = (1-x^2/(tau+a)-y^2/(tau+b)-z^2/(tau+c))\n from lambda to infty with respect to tau.\n The lower limit lambda is given by lowerlim function.\n \"\"\"\n def integrand(tau):\n return (x*(i==0) + y*(i==1) + z*(i==2))/(a2*(i==0) + b2*(i==1) + c2*(i==2) + tau) * \\\n _FracInt(x, y, z, a2, b2, c2, tau, n)\n return integrate.quad(integrand, lowerlim(x**2, y**2, z**2, a2, b2, c2), np.inf, epsabs=1e-12)[0]", "def _jmomentsurfaceIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,n,m,o): #pragma: no cover because this is too slow; a warning is shown\n \"\"\"Internal function that is the integrand for the vmomentsurface mass integration\"\"\"\n return df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,use_physical=False,\n func= (lambda x,y,z: x**n*y**m*z**o))", "def get_total_spatial_integral(self, z=None): \n \"\"\"\n Returns the total integral (for 2D functions) or the integral over the spatial components (for 3D functions).\n needs to be implemented in subclasses.\n\n :return: an array of values of the integral (same dimension as z).\n \"\"\"\n\n if isinstance( z, u.Quantity):\n z = z.value\n return np.ones_like( z )", "public static double logGamma(double Z) {\n double S = 1.0 + 76.18009173/Z-86.50532033/(Z+1.0)+24.01409822/(Z+2.0)-1.231739516/(Z+3.0)+0.00120858003/(Z+4.0)-0.00000536382/(Z+5.0);\n double LG = (Z-0.5)*Math.log(Z+4.5)-(Z+4.5)+Math.log(S*2.50662827465);\n \n return LG;\n }" ]
[ 0.7333215475082397, 0.7216445803642273, 0.7022789716720581, 0.6984469890594482, 0.6969737410545349, 0.6935060024261475, 0.6865224838256836, 0.6841789484024048, 0.6812206506729126, 0.6792978644371033, 0.6761254072189331, 0.6734105944633484 ]
Returns derivative of the linear growth factor at z for a given cosmology **cosmo
def _deriv_growth(z, **cosmo): """ Returns derivative of the linear growth factor at z for a given cosmology **cosmo """ inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5) fz = (1 + z) * inv_h**3 deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\ 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\ fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo) return(deriv_g)
[ "def _int_growth(z, **cosmo):\n \"\"\" Returns integral of the linear growth factor from z=200 to z=z \"\"\"\n\n zmax = 200\n\n if hasattr(z, \"__len__\"):\n for zval in z:\n assert(zval < zmax)\n else:\n assert(z < zmax)\n\n y, yerr = scipy.integrate.quad(\n lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +\n cosmo['omega_lambda_0'])**(1.5),\n z, zmax)\n\n return(y)", "def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)", "def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,\n Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):\n \"\"\" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)\n for 1 unknown, i.e. concentration, returned by a minimisation call \"\"\"\n\n # Fn 1 (LHS of Eqn 18)\n\n Y1 = np.log(2) - 0.5\n Yc = np.log(1+c) - c/(1+c)\n f1 = Y1/Yc\n\n # Fn 2 (RHS of Eqn 18)\n\n # Eqn 14 - Define the mean inner density\n rho_2 = 200 * c**3 * Y1 / Yc\n\n # Eqn 17 rearranged to solve for Formation Redshift\n # essentially when universe had rho_2 density\n zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *\n (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1\n\n # RHS of Eqn 19\n f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)\n\n # LHS - RHS should be zero for the correct concentration\n return(f1-f2)", "def _delta_sigma(**cosmo):\n \"\"\" Perturb best-fit constant of proportionality Ascaling for\n rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)\n\n Parameters\n ----------\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float\n The perturbed 'A' relation between rho_2 and rho_crit for the cosmology\n\n Raises\n ------\n\n \"\"\"\n\n M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)\n perturbed_A = (0.796/cosmo['sigma_8']) * \\\n (M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)\n return(perturbed_A)", "def growthfactor(z, norm=True, **cosmo):\n \"\"\" Returns linear growth factor at a given redshift, normalised to z=0\n by default, for a given cosmology\n\n Parameters\n ----------\n\n z : float or numpy array\n The redshift at which the growth factor should be calculated\n norm : boolean, optional\n If true then normalise the growth factor to z=0 case defaults True\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float or numpy array\n The growth factor at a range of redshifts 'z'\n\n Raises\n ------\n\n \"\"\"\n H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +\n cosmo['omega_lambda_0'])\n growthval = H * _int_growth(z, **cosmo)\n if norm:\n growthval /= _int_growth(0, **cosmo)\n\n return(growthval)", "def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,\n **kwargs):\n r\"\"\"Returns the value of a cosmological quantity (e.g., age) at a redshift.\n\n Parameters\n ----------\n z : float\n The redshift.\n quantity : str\n The name of the quantity to get. The name may be any attribute of\n :py:class:`astropy.cosmology.FlatLambdaCDM`.\n strip_unit : bool, optional\n Just return the value of the quantity, sans units. Default is True.\n \\**kwargs :\n All other keyword args are passed to :py:func:`get_cosmology` to\n select a cosmology. If none provided, will use\n :py:attr:`DEFAULT_COSMOLOGY`.\n\n Returns\n -------\n float or astropy.units.quantity :\n The value of the quantity at the requested value. If ``strip_unit`` is\n ``True``, will return the value. Otherwise, will return the value with\n units.\n \"\"\"\n cosmology = get_cosmology(**kwargs)\n val = getattr(cosmology, quantity)(z)\n if strip_unit:\n val = val.value\n return val", "def _Rzderiv(self,R,z,phi=0.,t=0.): #pragma: no cover\n \"\"\"\n NAME:\n _Rzderiv\n PURPOSE:\n evaluate the mixed R,z derivative for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n d2phi/dR/dz\n HISTORY:\n 2016-12-26 - Written - Bovy (UofT/CCA)\n \"\"\"\n raise AttributeError\n # Implementation above does not work bc SCF.Rzderiv is not implemented\n r= numpy.sqrt(R**2.+z**2.)\n out= self._scf.Rzderiv(R,z,phi=phi,use_physical=False)\n for a,ds,d2s,H,dH in zip(self._Sigma_amp,self._dsigmadR,\n self._d2SigmadR2,self._Hz,self._dHzdz):\n out+= 4.*numpy.pi*a*(H(z)*R*z/r**2.*(d2s(r)-ds(r)/r)\n +ds(r)*dH(z)*R/r)\n return out", "def _denom(self, R, z):\n \"\"\"\n NAME:\n _denom\n PURPOSE:\n evaluate R^2 + (a + |z|)^2 which is used in the denominator\n of most equations\n INPUT:\n R - Cylindrical Galactocentric radius\n z - vertical height\n OUTPUT:\n R^2 + (a + |z|)^2\n HISTORY:\n 2016-05-09 - Written - Aladdin \n \"\"\"\n return (R**2. + (self._a + nu.fabs(z))**2.)", "def derivativeZ(self,mLvl,pLvl,MedShk):\n '''\n Evaluate the derivative of consumption and medical care with respect to\n medical need shock at given levels of market resources, permanent income,\n and medical need shocks.\n\n Parameters\n ----------\n mLvl : np.array\n Market resource levels.\n pLvl : np.array\n Permanent income levels; should be same size as mLvl.\n MedShk : np.array\n Medical need shocks; should be same size as mLvl.\n\n Returns\n -------\n dcdShk : np.array\n Derivative of consumption with respect to medical need for each\n point in (xLvl,MedShk).\n dMeddShk : np.array\n Derivative of medical care with respect to medical need for each\n point in (xLvl,MedShk).\n '''\n xLvl = self.xFunc(mLvl,pLvl,MedShk)\n dxdShk = self.xFunc.derivativeZ(mLvl,pLvl,MedShk)\n dcdx = self.cFunc.derivativeX(xLvl,MedShk)\n dcdShk = dxdShk*dcdx + self.cFunc.derivativeY(xLvl,MedShk)\n dMeddShk = (dxdShk - dcdShk)/self.MedPrice\n return dcdShk,dMeddShk", "def _z2deriv(self,R,z,phi=0.,t=0.):\n \"\"\"\n NAME:\n _z2deriv\n PURPOSE:\n evaluate the second vertical derivative for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n the second vertical derivative\n HISTORY:\n 2016-05-13 - Written - Aladdin \n \"\"\"\n a = self._a\n return self._denom(R, z)**-1.5 - 3. * (a + nu.fabs(z))**2. * self._denom(R, z)**-2.5", "def _z2deriv(self,R,z,phi=0.,t=0.): #pragma: no cover\n \"\"\"\n NAME:\n _z2deriv\n PURPOSE:\n evaluate the second vertical derivative for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n the second vertical derivative\n HISTORY:\n 2016-12-26 - Written - Bovy (UofT/CCA)\n \"\"\"\n raise AttributeError\n # Implementation above does not work bc SCF.z2deriv is not implemented\n r= numpy.sqrt(R**2.+z**2.)\n out= self._scf.z2deriv(R,z,phi=phi,use_physical=False)\n for a,s,ds,d2s,h,H,dH in zip(self._Sigma_amp,\n self._Sigma,self._dSigmadR,self._d2SigmadR2,\n self._hz,self._Hz,self._dHzdz):\n out+= 4.*numpy.pi*a*(H(z)/r**2.*(d2s(r)*z**2.+ds(r)*R**2./r)\n +2.*ds(r)*dH(z)*z/r+s(r)*h(z))\n return out", "def Rzderiv(self,R,Z,phi=0.,t=0.):\n \"\"\"\n NAME:\n\n Rzderiv\n\n PURPOSE:\n\n evaluate the mixed R,z derivative\n\n INPUT:\n\n R - Galactocentric radius (can be Quantity)\n\n Z - vertical height (can be Quantity)\n\n phi - Galactocentric azimuth (can be Quantity)\n\n t - time (can be Quantity)\n\n OUTPUT:\n\n d2phi/dz/dR\n\n HISTORY:\n\n 2013-08-26 - Written - Bovy (IAS)\n\n \"\"\"\n try:\n return self._amp*self._Rzderiv(R,Z,phi=phi,t=t)\n except AttributeError: #pragma: no cover\n raise PotentialError(\"'_Rzderiv' function not implemented for this potential\")" ]
[ 0.8032103180885315, 0.7147066593170166, 0.7002454996109009, 0.6993910074234009, 0.6919756531715393, 0.6883453726768494, 0.6882690191268921, 0.6829437017440796, 0.6825976371765137, 0.6817625164985657, 0.6801071763038635, 0.6786118745803833 ]
Returns linear growth factor at a given redshift, normalised to z=0 by default, for a given cosmology Parameters ---------- z : float or numpy array The redshift at which the growth factor should be calculated norm : boolean, optional If true then normalise the growth factor to z=0 case defaults True cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- float or numpy array The growth factor at a range of redshifts 'z' Raises ------
def growthfactor(z, norm=True, **cosmo): """ Returns linear growth factor at a given redshift, normalised to z=0 by default, for a given cosmology Parameters ---------- z : float or numpy array The redshift at which the growth factor should be calculated norm : boolean, optional If true then normalise the growth factor to z=0 case defaults True cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- float or numpy array The growth factor at a range of redshifts 'z' Raises ------ """ H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 + cosmo['omega_lambda_0']) growthval = H * _int_growth(z, **cosmo) if norm: growthval /= _int_growth(0, **cosmo) return(growthval)
[ "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def _int_growth(z, **cosmo):\n \"\"\" Returns integral of the linear growth factor from z=200 to z=z \"\"\"\n\n zmax = 200\n\n if hasattr(z, \"__len__\"):\n for zval in z:\n assert(zval < zmax)\n else:\n assert(z < zmax)\n\n y, yerr = scipy.integrate.quad(\n lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +\n cosmo['omega_lambda_0'])**(1.5),\n z, zmax)\n\n return(y)", "def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)", "def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,\n **kwargs):\n r\"\"\"Returns the value of a cosmological quantity (e.g., age) at a redshift.\n\n Parameters\n ----------\n z : float\n The redshift.\n quantity : str\n The name of the quantity to get. The name may be any attribute of\n :py:class:`astropy.cosmology.FlatLambdaCDM`.\n strip_unit : bool, optional\n Just return the value of the quantity, sans units. Default is True.\n \\**kwargs :\n All other keyword args are passed to :py:func:`get_cosmology` to\n select a cosmology. If none provided, will use\n :py:attr:`DEFAULT_COSMOLOGY`.\n\n Returns\n -------\n float or astropy.units.quantity :\n The value of the quantity at the requested value. If ``strip_unit`` is\n ``True``, will return the value. Otherwise, will return the value with\n units.\n \"\"\"\n cosmology = get_cosmology(**kwargs)\n val = getattr(cosmology, quantity)(z)\n if strip_unit:\n val = val.value\n return val", "def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,\n Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):\n \"\"\" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)\n for 1 unknown, i.e. concentration, returned by a minimisation call \"\"\"\n\n # Fn 1 (LHS of Eqn 18)\n\n Y1 = np.log(2) - 0.5\n Yc = np.log(1+c) - c/(1+c)\n f1 = Y1/Yc\n\n # Fn 2 (RHS of Eqn 18)\n\n # Eqn 14 - Define the mean inner density\n rho_2 = 200 * c**3 * Y1 / Yc\n\n # Eqn 17 rearranged to solve for Formation Redshift\n # essentially when universe had rho_2 density\n zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *\n (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1\n\n # RHS of Eqn 19\n f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)\n\n # LHS - RHS should be zero for the correct concentration\n return(f1-f2)", "def _delta_sigma(**cosmo):\n \"\"\" Perturb best-fit constant of proportionality Ascaling for\n rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)\n\n Parameters\n ----------\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float\n The perturbed 'A' relation between rho_2 and rho_crit for the cosmology\n\n Raises\n ------\n\n \"\"\"\n\n M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)\n perturbed_A = (0.796/cosmo['sigma_8']) * \\\n (M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)\n return(perturbed_A)", "def astro_redshifts(min_z, max_z, nsamples):\n '''Sample the redshifts for sources, with redshift\n independent rate, using standard cosmology\n\n Parameters\n ----------\n min_z: float\n Minimum redshift\n max_z: float\n Maximum redshift\n nsamples: int\n Number of samples\n\n Returns\n -------\n z_astro: array\n nsamples of redshift, between min_z, max_z, by standard cosmology\n '''\n\n dz, fac = 0.001, 3.0\n # use interpolation instead of directly estimating all the pdfz for rndz\n V = quad(contracted_dVdc, 0., max_z)[0]\n zbins = np.arange(min_z, max_z + dz/2., dz)\n zcenter = (zbins[:-1] + zbins[1:]) / 2\n pdfz = cosmo.differential_comoving_volume(zcenter).value/(1+zcenter)/V\n\n int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=0)\n\n rndz = np.random.uniform(min_z, max_z, int(fac*nsamples))\n pdf_zs = int_pdf(rndz)\n maxpdf = max(pdf_zs)\n rndn = np.random.uniform(0, 1, int(fac*nsamples)) * maxpdf\n diff = pdf_zs - rndn\n idx = np.where(diff > 0)\n z_astro = rndz[idx]\n\n np.random.shuffle(z_astro)\n z_astro.resize(nsamples)\n\n return z_astro", "def estimate_hz(self,R,z,dz=10.**-8.,**kwargs):\n \"\"\"\n NAME:\n\n estimate_hz\n\n PURPOSE:\n\n estimate the exponential scale height at R\n\n INPUT:\n\n R - Galactocentric radius (can be Quantity)\n\n dz - z range to use (can be Quantity)\n\n density kwargs\n\n OUTPUT:\n\n estimated hz\n\n HISTORY:\n\n 2012-08-30 - Written - Bovy (IAS)\n\n 2013-01-28 - Re-written - Bovy\n\n \"\"\"\n if z == 0.:\n zs= [z,z+dz]\n else:\n zs= [z-dz/2.,z+dz/2.]\n sf= numpy.array([self.density(R,zz,use_physical=False,\n **kwargs) for zz in zs])\n lsf= numpy.log(sf)\n return -dz/(lsf[1]-lsf[0])", "def c_M_z(self, M, z):\n \"\"\"\n fitting function of http://moriond.in2p3.fr/J08/proceedings/duffy.pdf for the mass and redshift dependence of the concentration parameter\n\n :param M: halo mass in M_sun/h\n :type M: float or numpy array\n :param z: redshift\n :type z: float >0\n :return: concentration parameter as float\n \"\"\"\n # fitted parameter values\n A = 5.22\n B = -0.072\n C = -0.42\n M_pivot = 2.*10**12\n return A*(M/M_pivot)**B*(1+z)**C", "def Onu(self, z):\n \"\"\"\n Returns the sum of :func:`~classylss.binding.Background.Omega_ncdm`\n and :func:`~classylss.binding.Background.Omega_ur`.\n \"\"\"\n return self.bg.Omega_ncdm(z) + self.bg.Omega_ur(z)", "def calculate_z1pt0(vs30):\n '''\n Reads an array of vs30 values (in m/s) and\n returns the depth to the 1.0 km/s velocity horizon (in m)\n Ref: Chiou & Youngs (2014) California model\n :param vs30: the shear wave velocity (in m/s) at a depth of 30m\n '''\n c1 = 571 ** 4.\n c2 = 1360.0 ** 4.\n return numpy.exp((-7.15 / 4.0) * numpy.log((vs30 ** 4. + c1) / (c2 + c1)))", "def get_residual(self, z, compute_norm=False):\n r'''Compute residual.\n\n For a given :math:`z\\in\\mathbb{C}^N`, the residual\n\n .. math::\n\n r = M M_l ( b - A z )\n\n is computed. If ``compute_norm == True``, then also the absolute\n residual norm\n\n .. math::\n\n \\| M M_l (b-Az)\\|_{M^{-1}}\n\n is computed.\n\n :param z: approximate solution with ``z.shape == (N, 1)``.\n :param compute_norm: (bool, optional) pass ``True`` if also the norm\n of the residual should be computed.\n '''\n if z is None:\n if compute_norm:\n return self.MMlb, self.Mlb, self.MMlb_norm\n return self.MMlb, self.Mlb\n r = self.b - self.A*z\n Mlr = self.Ml*r\n MMlr = self.M*Mlr\n if compute_norm:\n return MMlr, Mlr, utils.norm(Mlr, MMlr, ip_B=self.ip_B)\n return MMlr, Mlr" ]
[ 0.7684199213981628, 0.74662846326828, 0.6595988869667053, 0.6529549360275269, 0.6396779417991638, 0.6359613537788391, 0.6332837343215942, 0.6323447823524475, 0.6304025650024414, 0.6291844248771667, 0.6244160532951355, 0.6230838894844055 ]
Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c) for 1 unknown, i.e. concentration, returned by a minimisation call
def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75): """ Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c) for 1 unknown, i.e. concentration, returned by a minimisation call """ # Fn 1 (LHS of Eqn 18) Y1 = np.log(2) - 0.5 Yc = np.log(1+c) - c/(1+c) f1 = Y1/Yc # Fn 2 (RHS of Eqn 18) # Eqn 14 - Define the mean inner density rho_2 = 200 * c**3 * Y1 / Yc # Eqn 17 rearranged to solve for Formation Redshift # essentially when universe had rho_2 density zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) * (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1 # RHS of Eqn 19 f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde) # LHS - RHS should be zero for the correct concentration return(f1-f2)
[ "def pressision_try(orbitals, U, beta, step):\n \"\"\"perform a better initial guess of lambda\n no improvement\"\"\"\n mu, lam = main(orbitals, U, beta, step)\n mu2, lam2 = linspace(0, U*orbitals, step), zeros(step)\n for i in range(99):\n lam2[i+1] = fsolve(restriction, lam2[i], (mu2[i+1], orbitals, U, beta))\n plot(mu2, 2*orbitals*fermi_dist(-(mu2+lam2), beta), label='Test guess')\n legend(loc=0)", "def onecons_qcqp(z, f, tol=1e-6):\n \"\"\" Solves a nonconvex problem\n minimize ||x-z||_2^2\n subject to f(x) = x^T P x + q^T x + r ~ 0\n where the relation ~ is given by f.relop (either <= or ==)\n \"\"\"\n\n # if constraint is ineq and z is feasible: z is the solution\n if f.relop == '<=' and f.eval(z) <= 0:\n return z\n\n if f.eigh is None:\n Psymm = (f.P + f.P.T)/2.\n f.eigh = LA.eigh(np.asarray(Psymm.todense()))\n\n lmb, Q = f.eigh\n zhat = Q.T.dot(z)\n qhat = Q.T.dot(f.qarray)\n\n # now solve a transformed problem\n # minimize ||xhat - zhat||_2^2\n # subject to sum(lmb_i xhat_i^2) + qhat^T xhat + r = 0\n # constraint is now equality from\n # complementary slackness\n xhat = lambda nu: -np.divide(nu*qhat-2*zhat, 2*(1+nu*lmb))\n phi = lambda xhat: lmb.dot(np.power(xhat, 2)) + qhat.dot(xhat) + f.r\n\n s = -np.inf\n e = np.inf\n for l in lmb:\n if l > 0: s = max(s, -1./l)\n if l < 0: e = min(e, -1./l)\n if s == -np.inf:\n s = -1.\n while phi(xhat(s)) <= 0: s *= 2.\n if e == np.inf:\n e = 1.\n while phi(xhat(e)) >= 0: e *= 2.\n while e-s > tol:\n m = (s+e)/2.\n p = phi(xhat(m))\n if p > 0: s = m\n elif p < 0: e = m\n else:\n s = e = m\n break\n nu = (s+e)/2.\n return Q.dot(xhat(nu))", "function qt(df, lowerTail, logp) {\n lowerTail = lowerTail !== false;\n logp = logp === true;\n\n if (utils.hasNaN(df)) { return function(p) { return NaN; }; }\n return utils.qhelper(lowerTail, logp, -Infinity, Infinity, function(p) {\n // var pp;\n\n // two-tailed prob, pp = 2 * min(p, 1-p)\n // pp = 2 * (logp ? Math.min(Math.exp(p), -expm1(p))\n // : Math.min(p, 1 - p));\n if (df <= 0) { return NaN; }\n // Using the solver on whole range (possibly inefficient but works)\n // if (df < 1) {\n return utils.binSearchSolve(function(x) {\n return pt(df, lowerTail, logp)(x);\n }, p);\n // }\n\n // if (df > 1e20) {\n // return qnorm(0, 1, lowerTail, logp)(p);\n // }\n\n // TODO: Could consider optimizing for df close to 1 or 2\n\n });\n }", "def _u0Eq(logu,delta,pot,E,Lz22):\n \"\"\"The equation that needs to be minimized to find u0\"\"\"\n u= numpy.exp(logu)\n sinh2u= numpy.sinh(u)**2.\n cosh2u= numpy.cosh(u)**2.\n dU= cosh2u*actionAngleStaeckel.potentialStaeckel(u,numpy.pi/2.,pot,delta)\n return -(E*sinh2u-dU-Lz22/delta**2./sinh2u)", "def solve(self, reaction_1, reaction_2):\n \"\"\"Return the flux coupling between two reactions\n\n The flux coupling is returned as a tuple indicating the minimum and\n maximum value of the v1/v2 reaction flux ratio. A value of None as\n either the minimum or maximum indicates that the interval is unbounded\n in that direction.\n \"\"\"\n # Update objective for reaction_1\n self._prob.set_objective(self._vbow(reaction_1))\n\n # Update constraint for reaction_2\n if self._reaction_constr is not None:\n self._reaction_constr.delete()\n\n self._reaction_constr, = self._prob.add_linear_constraints(\n self._vbow(reaction_2) == 1)\n\n results = []\n for sense in (lp.ObjectiveSense.Minimize, lp.ObjectiveSense.Maximize):\n try:\n result = self._prob.solve(sense)\n except lp.SolverError:\n results.append(None)\n else:\n results.append(result.get_value(self._vbow(reaction_1)))\n\n return tuple(results)", "def onepara(R):\n \"\"\"Converts an ill-conditioned correlation matrix\n into well-conditioned matrix with one common\n correlation coefficient\n\n Parameters:\n -----------\n R : ndarray\n an illconditioned correlation matrix,\n e.g. oxyba.illcond_corrmat\n\n Return:\n -------\n cmat : ndarray\n DxD matrix with +1 as diagonal elements\n and 1 common coefficient for all other\n relations.\n \"\"\"\n import numpy as np\n import warnings\n\n d = R.shape[0]\n\n if d < 2:\n raise Exception((\n \"More than one variable is required.\"\n \"Supply at least a 2x2 matrix.\"))\n\n # the explicit solution\n x = (np.sum(R) + np.trace(R)) / (d**2 - d)\n\n if x < (-1. / (d - 1)) or x > 1:\n warnings.warn(\"No analytic solution found x={:.8f}\".format(x))\n return None\n else:\n C = np.eye(d)\n C[np.logical_not(C)] = x\n return C", "def findmin(psr,method='Nelder-Mead',history=False,formbats=False,renormalize=True,bounds={},**kwargs):\n \"\"\"Use scipy.optimize.minimize to find minimum-chisq timing solution,\n passing through all extra options. Resets psr[...].val to the final solution,\n and returns the final chisq. Will use chisq gradient if method requires it.\n Ignores deleted points.\"\"\"\n\n ctr, err = psr.vals(), psr.errs()\n\n # to avoid losing precision, we're searching in units of parameter errors\n \n if numpy.any(err == 0.0):\n print(\"Warning: one or more fit parameters have zero a priori error, and won't be searched.\")\n\n hloc, hval = [], []\n\n def func(xs):\n psr.vals([c + x*e for x,c,e in zip(xs,ctr,err)])\n\n ret = chisq(psr,formbats=formbats)\n\n if numpy.isnan(ret):\n print(\"Warning: chisq is nan at {0}.\".format(psr.vals()))\n\n if history:\n hloc.append(psr.vals())\n hval.append(ret)\n\n return ret\n\n def dfunc(xs):\n psr.vals([c + x*e for x,c,e in zip(xs,ctr,err)])\n\n dc = dchisq(psr,formbats=formbats,renormalize=renormalize)\n ret = numpy.array([d*e for d,e in zip(dc,err)],'d')\n\n return ret\n\n opts = kwargs.copy()\n\n if method not in ['Nelder-Mead','Powell']:\n opts['jac'] = dfunc\n\n if method in ['L-BFGS-B']:\n opts['bounds'] = [(float((bounds[par][0] - ctr[i])/err[i]),\n float((bounds[par][1] - ctr[i])/err[i])) if par in bounds else (None,None)\n for i,par in enumerate(psr.pars())]\n\n res = scipy.optimize.minimize(func,[0.0]*len(ctr),method=method,**opts)\n\n if hasattr(res,'message'):\n print(res.message)\n\n # this will also set parameters to the minloc\n minchisq = func(res.x)\n\n if history:\n return minchisq, numpy.array(hval), numpy.array(hloc)\n else: \n return minchisq", "def ccor2(alt, r, h1, zh, h2):\n '''\n/* CHEMISTRY/DISSOCIATION CORRECTION FOR MSIS MODELS\n * ALT - altitude\n * R - target ratio\n * H1 - transition scale length\n * ZH - altitude of 1/2 R\n * H2 - transition scale length #2 ?\n */\n '''\n e1 = (alt - zh) / h1;\n e2 = (alt - zh) / h2;\n if ((e1 > 70.0) or (e2 > 70)): # pragma: no cover\n return 1.0 # exp(0)\n if ((e1 < -70) and (e2 < -70)): # pragma: no cover\n return exp(r)\n ex1 = exp(e1);\n ex2 = exp(e2);\n ccor2v = r / (1.0 + 0.5 * (ex1 + ex2));\n return exp(ccor2v);", "function (Y) {\n\t\t\tvar N = this.N;\n\t\t\tvar dim = this.dim; // dim of output space\n\t\t\tvar P = this.P;\n\n\t\t\tvar pmul = this.iter < 100 ? 4 : 1; // trick that helps with local optima\n\n\t\t\t// compute current Q distribution, unnormalized first\n\t\t\tvar Qu = zeros(N * N);\n\t\t\tvar qsum = 0.0;\n\t\t\tfor (var i = 0; i < N; i++) {\n\t\t\t\tfor (var j = i + 1; j < N; j++) {\n\t\t\t\t\tvar dsum = 0.0;\n\t\t\t\t\tfor (var d = 0; d < dim; d++) {\n\t\t\t\t\t\tvar dhere = Y[i][d] - Y[j][d];\n\t\t\t\t\t\tdsum += dhere * dhere;\n\t\t\t\t\t}\n\t\t\t\t\tvar qu = 1.0 / (1.0 + dsum); // Student t-distribution\n\t\t\t\t\tQu[i * N + j] = qu;\n\t\t\t\t\tQu[j * N + i] = qu;\n\t\t\t\t\tqsum += 2 * qu;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// normalize Q distribution to sum to 1\n\t\t\tvar NN = N * N;\n\t\t\tvar Q = zeros(NN);\n\t\t\tfor (var q = 0; q < NN; q++) {\n\t\t\t\tQ[q] = Math.max(Qu[q] / qsum, 1e-100);\n\t\t\t}\n\n\t\t\tvar cost = 0.0;\n\t\t\tvar grad = [];\n\t\t\tfor (var i = 0; i < N; i++) {\n\t\t\t\tvar gsum = new Array(dim); // init grad for point i\n\t\t\t\tfor (var d = 0; d < dim; d++) {\n\t\t\t\t\tgsum[d] = 0.0;\n\t\t\t\t}\n\t\t\t\tfor (var j = 0; j < N; j++) {\n\t\t\t\t\tcost += -P[i * N + j] * Math.log(Q[i * N + j]); // accumulate cost (the non-constant portion at least...)\n\t\t\t\t\tvar premult = 4 * (pmul * P[i * N + j] - Q[i * N + j]) * Qu[i * N + j];\n\t\t\t\t\tfor (var d = 0; d < dim; d++) {\n\t\t\t\t\t\tgsum[d] += premult * (Y[i][d] - Y[j][d]);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tgrad.push(gsum);\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\tcost: cost,\n\t\t\t\tgrad: grad\n\t\t\t};\n\t\t}", "def solve_full(z, Fval, DPhival, G, A): \n M, N=G.shape\n P, N=A.shape\n\n \"\"\"Total number of inequality constraints\"\"\"\n m=M \n\n \"\"\"Primal variable\"\"\"\n x=z[0:N]\n\n \"\"\"Multiplier for equality constraints\"\"\"\n nu=z[N:N+P]\n\n \"\"\"Multiplier for inequality constraints\"\"\"\n l=z[N+P:N+P+M]\n\n \"\"\"Slacks\"\"\"\n s=z[N+P+M:]\n\n \"\"\"Dual infeasibility\"\"\"\n rd = Fval[0:N]\n \n \"\"\"Primal infeasibility\"\"\"\n rp1 = Fval[N:N+P]\n rp2 = Fval[N+P:N+P+M]\n\n \"\"\"Centrality\"\"\"\n rc = Fval[N+P+M:]\n\n \"\"\"Sigma matrix\"\"\"\n SIG = np.diag(l/s)\n\n \"\"\"Condensed system\"\"\"\n if issparse(DPhival):\n if not issparse(A):\n A = csr_matrix(A) \n H = DPhival + mydot(G.T, mydot(SIG, G))\n J = bmat([[H, A.T], [A, None]])\n else:\n if issparse(A):\n A = A.toarray()\n J = np.zeros((N+P, N+P))\n J[0:N, 0:N] = DPhival + mydot(G.T, mydot(SIG, G)) \n J[0:N, N:] = A.T\n J[N:, 0:N] = A\n\n b1 = -rd - mydot(G.T, mydot(SIG, rp2)) + mydot(G.T, rc/s)\n b2 = -rp1\n b = np.hstack((b1, b2))\n\n \"\"\"Prepare iterative solve via MINRES\"\"\"\n sign = np.zeros(N+P)\n sign[0:N/2] = 1.0\n sign[N/2:] = -1.0\n S = diags(sign, 0)\n J_new = mydot(S, csr_matrix(J))\n b_new = mydot(S, b)\n\n dJ_new = np.abs(J_new.diagonal())\n dPc = np.ones(J_new.shape[0])\n ind = (dJ_new > 0.0)\n dPc[ind] = 1.0/dJ_new[ind]\n Pc = diags(dPc, 0) \n dxnu, info = minres(J_new, b_new, tol=1e-8, M=Pc)\n \n # dxnu = solve(J, b)\n dx = dxnu[0:N]\n dnu = dxnu[N:]\n\n \"\"\"Obtain search directions for l and s\"\"\"\n ds = -rp2 - mydot(G, dx)\n dl = -mydot(SIG, ds) - rc/s\n\n dz = np.hstack((dx, dnu, dl, ds))\n return dz", "def primal_dual_solve(func, x0, Dfunc, A, b, G, h, args=(), tol=1e-10,\n maxiter=100, show_progress=True, full_output=False):\n \"\"\"Wrap calls to function and Jacobian\"\"\"\n fcalls, func = wrap_function(func, args)\n Dfcalls, Dfunc = wrap_function(Dfunc, args)\n\n M, N = G.shape\n P, N = A.shape\n\n \"\"\"Total number of inequality constraints\"\"\"\n m = M\n\n def gap(z):\n r\"\"\"Gap-function\"\"\"\n l = z[N+P:N+P+M]\n s = z[N+P+M:]\n return mydot(l, s)/m\n\n def centrality(z):\n r\"\"\"Centrality function\"\"\"\n l = z[N+P:N+P+M]\n s = z[N+P+M:]\n return np.min(l*s)\n\n def KKT(z, sigma=0.0):\n r\"\"\"KKT system (possible perturbed).\"\"\"\n\n \"\"\"Primal variable\"\"\"\n x = z[0:N]\n\n \"\"\"Multiplier for equality constraints\"\"\"\n nu = z[N:N+P]\n\n \"\"\"Multiplier for inequality constraints\"\"\"\n l = z[N+P:N+P+M]\n\n \"\"\"Slacks\"\"\"\n s = z[N+P+M:]\n\n \"\"\"Evaluate objective function\"\"\"\n F = func(x)\n\n \"\"\"Dual infeasibility\"\"\"\n rdual = F+mydot(A.transpose(), nu)+mydot(G.transpose(), l)\n\n \"\"\"Primal infeasibilities\"\"\"\n rprim1 = mydot(A, x)-b\n rprim2 = mydot(G, x)-h+s\n\n \"\"\"Complementary slackness (perturbed)\"\"\"\n mu = gap(z)\n rslack = l*s-sigma*mu\n\n return np.hstack((rdual, rprim1, rprim2, rslack))\n\n def step_fast(z, KKTval, LU, G, A, mu, beta, gamma, alpha0):\n r\"\"\"Affine scaling step.\"\"\"\n dz = solve_factorized(z, KKTval, LU, G, A)\n\n \"\"\"Reduce step length until slacks s and multipliers l are positive\"\"\"\n alpha = 1.0*alpha_0\n k = 0\n for k in range(10):\n z_new = z + alpha*dz\n if np.all( z_new[N+P:] > 0.0 ):\n break\n alpha *= 0.5\n k += 1\n if k == 10 - 1:\n raise RuntimeError(\"Maximum steplength reduction reached\")\n\n \"\"\"Reduce step length until iterates lie in correct neighborhood\"\"\"\n for k in range(10):\n z_new = z + alpha*dz\n KKTval_new = KKT(z_new)\n dual = mynorm(KKTval_new[0:N])\n prim = mynorm(KKTval_new[N:N+P+M])\n mu_new = gap(z_new)\n cent_new = centrality(z_new)\n\n if (dual <= beta*mu_new and prim <= beta*mu_new and\n cent_new >= gamma*mu_new):\n break\n alpha *= 0.5\n # alpha *= 0.95\n if k == 10 - 1:\n raise RuntimeError(\"Maximum steplength reduction reached\")\n return z_new, mu_new\n\n def step_safe(z, KKTval, LU, G, A, mu, beta, gamma):\n r\"\"\"Centering step.\"\"\"\n dz = solve_factorized(z, KKTval, LU, G, A)\n\n \"\"\"Reduce step length until slacks s and multipliers l are positive\"\"\"\n alpha = 1.0\n k = 0\n for k in range(10):\n z_new = z + alpha*dz\n if np.all( z_new[N+P:] > 0.0 ):\n break\n alpha *= 0.5\n k += 1\n if k == 10 - 1:\n raise RuntimeError(\"Maximum steplength reduction (pos.) reached\")\n\n \"\"\"Reduce step length until iterates lie in correct neighborhood\n and mu fulfills Armijo condition\"\"\"\n k = 0\n for k in range(10):\n z_new = z+alpha*dz\n KKTval_new = KKT(z_new, sigma=SIGMA)\n dual = mynorm(KKTval_new[0:N])\n prim = mynorm(KKTval_new[N:N+P+M])\n mu_new = gap(z_new)\n cent_new = centrality(z_new)\n\n if (dual <= beta*mu_new and prim <= beta*mu_new and\n cent_new >= gamma*mu_new and\n mu_new<=(1.0-KAPPA*alpha*(1.0-SIGMA))*mu):\n break\n alpha *= 0.5\n if k == 10 - 1:\n raise RuntimeError(\"Maximum steplength reduction reached\")\n return z_new, mu_new\n\n \"\"\"INITIALIZATION\"\"\"\n\n \"\"\"Initial Slacks for inequality constraints\"\"\"\n s0 = -1.0*(mydot(G, x0)-h)\n\n \"\"\"Initial multipliers for inequality constraints\"\"\"\n l0 = 1.0*np.ones(M)\n\n \"\"\"Initial multipliers for equality constraints\"\"\"\n nu0 = np.zeros(P)\n\n \"\"\"Initial point\"\"\"\n z0 = np.hstack((x0, nu0, l0, s0))\n\n \"\"\"Initial KKT-values\"\"\"\n KKTval0 = KKT(z0, sigma=0.0)\n mu0 = gap(z0)\n dual0 = mynorm(KKTval0[0:N])\n prim0 = mynorm(KKTval0[N:N+P+M])\n\n \"\"\"Initial neighborhood\"\"\"\n beta = BETA * np.sqrt(dual0**2 + prim0**2)/mu0\n gamma = 1.0 * GAMMA_MAX\n\n \"\"\"Number of fast steps\"\"\"\n t = 0\n\n \"\"\"Number of iterations\"\"\"\n n = 0\n\n \"\"\"Dummy variable for step type\"\"\"\n step_type = \" \"\n\n if show_progress:\n print(\"%s %s %s %s %s %s %s\" %(\"iter\", \"gap\", \"dual\", \"primal\",\n \"min\", \"max\", \"step\"))\n \"\"\"MAIN LOOP\"\"\"\n z = z0\n x = z0[0:N]\n KKTval = KKTval0\n dual = dual0\n prim = prim0\n mu = mu0\n Dfunc_val = Dfunc(x)\n LU = factor(z, Dfunc_val, G, A)\n\n if full_output:\n info = {'z': []}\n info['z'].append(z)\n\n for n in range(maxiter):\n if show_progress:\n l=z[N+P:N+P+M]\n s=z[N+P+M:]\n print(\"%i %.6e %.6e %.6e %.6e %.6e %s\" %(n+1, mu, dual, prim,\n np.min(l*s), np.max(l*s),\n step_type))\n \"\"\"Attempt fast step\"\"\"\n beta_new = (1.0 + GAMMA_BAR**(t+1)) * beta\n gamma_new = GAMMA_MIN + GAMMA_BAR**(t+1)*(GAMMA_MAX - GAMMA_MIN)\n alpha_0 = 1.0 - np.sqrt(mu)/GAMMA_BAR**t\n\n if alpha_0 > 0.0:\n z_new, mu_new = step_fast(z, KKTval, LU, G, A, mu,\n beta_new, gamma_new, alpha_0)\n if mu_new < RHO * mu:\n \"\"\"Fast successful\"\"\"\n z = z_new\n mu = mu_new\n beta = beta_new\n gamma = gamma_new\n t += 1\n step_type = \"f\"\n else:\n \"\"\"Perturbed right-had side\"\"\"\n KKTval_pert = 1.0*KKTval\n KKTval_pert[N+P+M:] -= SIGMA * mu\n z, mu = step_safe(z, KKTval_pert, LU, G, A, mu, beta, gamma)\n step_type = \"s\"\n else:\n \"\"\"Perturbed right-hand side\"\"\"\n KKTval_pert = 1.0*KKTval\n KKTval_pert[N+P+M:] -= SIGMA * mu\n z, mu = step_safe(z, KKTval_pert, LU, G, A, mu, beta, gamma)\n step_type = \"s\"\n\n \"\"\"Compute new iterates\"\"\"\n KKTval = KKT(z, sigma=0.0)\n dual = mynorm(KKTval[0:N])\n prim = mynorm(KKTval[N:N+P+M])\n x = z[0:N]\n Dfunc_val = Dfunc(x)\n LU = factor(z, Dfunc_val, G, A)\n if full_output:\n info['z'].append(z)\n if mu < tol and dual < tol and prim < tol:\n break\n if n == maxiter - 1:\n raise RuntimeError(\"Maximum number of iterations reached\")\n\n if show_progress:\n l=z[N+P:N+P+M]\n s=z[N+P+M:]\n print(\"%i %.6e %.6e %.6e %.6e %.6e %s\" %(n+1, mu, dual, prim,\n np.min(l*s), np.max(l*s),\n step_type))\n if full_output:\n return z[0:N], info\n else:\n return z[0:N]", "def cost_min2(self, alpha):\n \"\"\"Residual formulation, Hessian is a low-rank update of the identity.\n \"\"\"\n n = self.V.dim()\n ax = alpha[:n]\n ay = alpha[n:]\n\n # ml = pyamg.ruge_stuben_solver(self.L)\n # # ml = pyamg.smoothed_aggregation_solver(self.L)\n # print(ml)\n # print()\n # print(self.L)\n # print()\n # x = ml.solve(ax, tol=1e-10)\n # print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))\n # print()\n # print(ax)\n # print()\n # print(x)\n # exit(1)\n\n # x = sparse.linalg.spsolve(self.L, ax)\n # print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))\n # exit(1)\n\n q2, r2 = self.get_q2_r2(ax, ay)\n\n Lax = self.L * ax\n Lay = self.L * ay\n\n out = [\n 0.5 * numpy.dot(Lax, Lax),\n 0.5 * numpy.dot(Lay, Lay),\n 0.5 * numpy.dot(q2 - 1, q2 - 1),\n 0.5 * numpy.dot(r2, r2),\n ]\n\n if self.num_f_eval % 10000 == 0:\n print(\"{:7d} {:e} {:e} {:e} {:e}\".format(self.num_f_eval, *out))\n\n self.num_f_eval += 1\n return numpy.sum(out)" ]
[ 0.6632798910140991, 0.6630605459213257, 0.6583905220031738, 0.6583086848258972, 0.6556440591812134, 0.6548488140106201, 0.6548237800598145, 0.6544963717460632, 0.6540391445159912, 0.653718113899231, 0.6535154581069946, 0.6519917845726013 ]
Rearrange eqn 18 from Correa et al (2015c) to return formation redshift for a concentration at a given redshift Parameters ---------- c : float / numpy array Concentration of halo z : float / numpy array Redshift of halo with concentration c Ascaling : float Cosmological dependent scaling between densities, use function getAscaling('WMAP5') if unsure. Default is 900. omega_M_0 : float Mass density of the universe. Default is 0.25 omega_lambda_0 : float Dark Energy density of the universe. Default is 0.75 Returns ------- zf : float / numpy array Formation redshift for halo of concentration 'c' at redshift 'z'
def formationz(c, z, Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75): """ Rearrange eqn 18 from Correa et al (2015c) to return formation redshift for a concentration at a given redshift Parameters ---------- c : float / numpy array Concentration of halo z : float / numpy array Redshift of halo with concentration c Ascaling : float Cosmological dependent scaling between densities, use function getAscaling('WMAP5') if unsure. Default is 900. omega_M_0 : float Mass density of the universe. Default is 0.25 omega_lambda_0 : float Dark Energy density of the universe. Default is 0.75 Returns ------- zf : float / numpy array Formation redshift for halo of concentration 'c' at redshift 'z' """ Y1 = np.log(2) - 0.5 Yc = np.log(1+c) - c/(1+c) rho_2 = 200*(c**3)*Y1/Yc zf = (((1+z)**3 + omega_lambda_0/omega_M_0) * (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1 return(zf)
[ "def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,\n Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):\n \"\"\" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)\n for 1 unknown, i.e. concentration, returned by a minimisation call \"\"\"\n\n # Fn 1 (LHS of Eqn 18)\n\n Y1 = np.log(2) - 0.5\n Yc = np.log(1+c) - c/(1+c)\n f1 = Y1/Yc\n\n # Fn 2 (RHS of Eqn 18)\n\n # Eqn 14 - Define the mean inner density\n rho_2 = 200 * c**3 * Y1 / Yc\n\n # Eqn 17 rearranged to solve for Formation Redshift\n # essentially when universe had rho_2 density\n zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *\n (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1\n\n # RHS of Eqn 19\n f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)\n\n # LHS - RHS should be zero for the correct concentration\n return(f1-f2)", "def c_M_z(self, M, z):\n \"\"\"\n fitting function of http://moriond.in2p3.fr/J08/proceedings/duffy.pdf for the mass and redshift dependence of the concentration parameter\n\n :param M: halo mass in M_sun/h\n :type M: float or numpy array\n :param z: redshift\n :type z: float >0\n :return: concentration parameter as float\n \"\"\"\n # fitted parameter values\n A = 5.22\n B = -0.072\n C = -0.42\n M_pivot = 2.*10**12\n return A*(M/M_pivot)**B*(1+z)**C", "def COM(z, M, **cosmo):\n \"\"\" Calculate concentration for halo of mass 'M' at redshift 'z'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to find concentration of halo\n M : float / numpy array\n Halo mass at redshift 'z'. Must be same size as 'z'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (c_array, sig_array, nu_array, zf_array) : float / numpy arrays\n of equivalent size to 'z' and 'M'. Variables are\n Concentration, Mass Variance 'sigma' this corresponds too,\n the dimnesionless fluctuation this represents and formation redshift\n\n \"\"\"\n # Check that z and M are arrays\n z = np.array(z, ndmin=1, dtype=float)\n M = np.array(M, ndmin=1, dtype=float)\n\n # Create array\n c_array = np.empty_like(z)\n sig_array = np.empty_like(z)\n nu_array = np.empty_like(z)\n zf_array = np.empty_like(z)\n\n for i_ind, (zval, Mval) in enumerate(_izip(z, M)):\n # Evaluate the indices at each redshift and mass combination\n # that you want a concentration for, different to MAH which\n # uses one a_tilde and b_tilde at the starting redshift only\n a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)\n\n # Minimize equation to solve for 1 unknown, 'c'\n c = scipy.optimize.brentq(_minimize_c, 2, 1000,\n args=(zval, a_tilde, b_tilde,\n cosmo['A_scaling'], cosmo['omega_M_0'],\n cosmo['omega_lambda_0']))\n\n if np.isclose(c, 0):\n print(\"Error solving for concentration with given redshift and \"\n \"(probably) too small a mass\")\n c = -1\n sig = -1\n nu = -1\n zf = -1\n else:\n # Calculate formation redshift for this concentration,\n # redshift at which the scale radius = virial radius: z_-2\n zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],\n omega_M_0=cosmo['omega_M_0'],\n omega_lambda_0=cosmo['omega_lambda_0'])\n\n R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)\n\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)\n nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))\n\n c_array[i_ind] = c\n sig_array[i_ind] = sig\n nu_array[i_ind] = nu\n zf_array[i_ind] = zf\n\n return(c_array, sig_array, nu_array, zf_array)", "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def Onu(self, z):\n \"\"\"\n Returns the sum of :func:`~classylss.binding.Background.Omega_ncdm`\n and :func:`~classylss.binding.Background.Omega_ur`.\n \"\"\"\n return self.bg.Omega_ncdm(z) + self.bg.Omega_ur(z)", "def redshift(self, z):\n \"\"\"Apply redshift to the flat spectrum.\n\n Unlike :meth:`SourceSpectrum.redshift`, the redshifted spectrum\n remains an analytic flat source.\n\n Parameters\n ----------\n z : number\n Redshift value.\n\n Returns\n -------\n ans : `FlatSpectrum`\n\n \"\"\"\n tmp = SourceSpectrum.redshift(self, z)\n ans = FlatSpectrum(tmp.flux.max(), fluxunits=tmp.fluxunits)\n return ans", "def c_DuttonMaccio(z, m, h=h):\n \"\"\"Concentration from c(M) relation in Dutton & Maccio (2014).\n\n Parameters\n ----------\n z : float or array_like\n Redshift(s) of halos.\n m : float or array_like\n Mass(es) of halos (m200 definition), in units of solar masses.\n h : float, optional\n Hubble parameter. Default is from Planck13.\n\n Returns\n ----------\n ndarray\n Concentration values (c200) for halos.\n\n References\n ----------\n Calculation from Planck-based results of simulations presented in:\n\n A.A. Dutton & A.V. Maccio, \"Cold dark matter haloes in the Planck era:\n evolution of structural parameters for Einasto and NFW profiles,\"\n Monthly Notices of the Royal Astronomical Society, Volume 441, Issue 4,\n p.3359-3374, 2014.\n \"\"\"\n\n z, m = _check_inputs(z, m)\n\n a = 0.52 + 0.385 * np.exp(-0.617 * (z**1.21)) # EQ 10\n b = -0.101 + 0.026 * z # EQ 11\n\n logc200 = a + b * np.log10(m * h / (10.**12)) # EQ 7\n\n concentration = 10.**logc200\n\n return concentration", "def cduffy(z, M, vir='200crit', relaxed=True):\n \"\"\" NFW conc from Duffy 08 Table 1 for halo mass and redshift\"\"\"\n\n if(vir == '200crit'):\n if relaxed:\n params = [6.71, -0.091, -0.44]\n else:\n params = [5.71, -0.084, -0.47]\n elif(vir == 'tophat'):\n if relaxed:\n params = [9.23, -0.090, -0.69]\n else:\n params = [7.85, -0.081, -0.71]\n elif(vir == '200mean'):\n if relaxed:\n params = [11.93, -0.090, -0.99]\n else:\n params = [10.14, -0.081, -1.01]\n else:\n print(\"Didn't recognise the halo boundary definition provided %s\"\n % (vir))\n\n return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))", "public static double GaussCdf(double z)\n {\n // input = z-value (-inf to +inf)\n // output = p under Normal curve from -inf to z\n // e.g., if z = 0.0, function returns 0.5000\n // ACM Algorithm #209\n double y; // 209 scratch variable\n double p; // result. called ‘z’ in 209\n double w; // 209 scratch variable\n\n if (z == 0.0)\n {\n p = 0.0;\n }\n else\n {\n y = Math.abs(z) / 2.0;\n if (y >= 3.0)\n {\n p = 1.0;\n }\n else if (y < 1.0)\n {\n w = y * y;\n p = ((((((((0.000124818987 * w\n - 0.001075204047) * w + 0.005198775019) * w\n - 0.019198292004) * w + 0.059054035642) * w\n - 0.151968751364) * w + 0.319152932694) * w\n - 0.531923007300) * w + 0.797884560593) * y * 2.0;\n }\n else\n {\n y = y - 2.0;\n p = (((((((((((((-0.000045255659 * y\n + 0.000152529290) * y - 0.000019538132) * y\n - 0.000676904986) * y + 0.001390604284) * y\n - 0.000794620820) * y - 0.002034254874) * y\n + 0.006549791214) * y - 0.010557625006) * y\n + 0.011630447319) * y - 0.009279453341) * y\n + 0.005353579108) * y - 0.002141268741) * y\n + 0.000535310849) * y + 0.999936657524;\n }\n }\n\n if (z > 0.0)\n {\n return (p + 1.0) / 2.0;\n }\n\n return (1.0 - p) / 2.0;\n }", "public static double gaussCdf(double z) {\n // input = z-value (-inf to +inf)\n // output = p under Normal curve from -inf to z\n // e.g., if z = 0.0, function returns 0.5000\n // ACM Algorithm #209\n double y; // 209 scratch variable\n double p; // result. called ‘z’ in 209\n double w; // 209 scratch variable\n\n if (z == 0.0) {\n p = 0.0;\n }\n else {\n y = Math.abs(z) / 2.0;\n if (y >= 3.0) {\n p = 1.0;\n }\n else if (y < 1.0) {\n w = y * y;\n p = ((((((((0.000124818987 * w\n - 0.001075204047) * w + 0.005198775019) * w\n - 0.019198292004) * w + 0.059054035642) * w\n - 0.151968751364) * w + 0.319152932694) * w\n - 0.531923007300) * w + 0.797884560593) * y * 2.0;\n }\n else {\n y = y - 2.0;\n p = (((((((((((((-0.000045255659 * y\n + 0.000152529290) * y - 0.000019538132) * y\n - 0.000676904986) * y + 0.001390604284) * y\n - 0.000794620820) * y - 0.002034254874) * y\n + 0.006549791214) * y - 0.010557625006) * y\n + 0.011630447319) * y - 0.009279453341) * y\n + 0.005353579108) * y - 0.002141268741) * y\n + 0.000535310849) * y + 0.999936657524;\n }\n }\n\n if (z > 0.0) {\n return (p + 1.0) / 2.0;\n }\n \n return (1.0 - p) / 2.0;\n }", "def rho0_c(self, c):\n \"\"\"\n computes density normalization as a function of concentration parameter\n :return: density normalization in h^2/Mpc^3 (comoving)\n \"\"\"\n return 200./3*self.rhoc*c**3/(np.log(1.+c)-c/(1.+c))", "def flattening(self,R,z):\n \"\"\"\n \n NAME:\n \n flattening\n \n PURPOSE:\n \n calculate the potential flattening, defined as sqrt(fabs(z/R F_R/F_z))\n \n INPUT:\n \n R - Galactocentric radius (can be Quantity)\n\n z - height (can be Quantity)\n \n OUTPUT:\n \n flattening\n \n HISTORY:\n \n 2012-09-13 - Written - Bovy (IAS)\n \n \"\"\"\n return nu.sqrt(nu.fabs(z/R*self.Rforce(R,z,use_physical=False)\\\n /self.zforce(R,z,use_physical=False)))" ]
[ 0.7827358245849609, 0.7338221073150635, 0.6758645176887512, 0.6744975447654724, 0.6709538102149963, 0.6624672412872314, 0.6606255173683167, 0.6586936712265015, 0.6488917469978333, 0.6486613750457764, 0.6458823084831238, 0.6458626389503479 ]
Calculate growth rate indices a_tilde and b_tilde Parameters ---------- zi : float Redshift Mi : float Halo mass at redshift 'zi' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (a_tilde, b_tilde) : float
def calc_ab(zi, Mi, **cosmo): """ Calculate growth rate indices a_tilde and b_tilde Parameters ---------- zi : float Redshift Mi : float Halo mass at redshift 'zi' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (a_tilde, b_tilde) : float """ # When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta # Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17) # Arbitray formation redshift, z_-2 in COM is more physically motivated zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837 # Eqn 22 of Correa et al 2015a q = 4.137 * zf**(-0.9476) # Radius of a mass Mi R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc] # Radius of a mass Mi/q Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc] # Mass variance 'sigma' evaluate at z=0 to a good approximation sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc] sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc] f = (sigq**2 - sig**2)**(-0.5) # Eqn 9 and 10 from Correa et al 2015c # (generalised to zi from Correa et al 2015a's z=0 special case) # a_tilde is power law growth rate a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) / growthfactor(zi, norm=True, **cosmo)**2 + 1)*f # b_tilde is exponential growth rate b_tilde = -f return(a_tilde, b_tilde)
[ "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def _int_growth(z, **cosmo):\n \"\"\" Returns integral of the linear growth factor from z=200 to z=z \"\"\"\n\n zmax = 200\n\n if hasattr(z, \"__len__\"):\n for zval in z:\n assert(zval < zmax)\n else:\n assert(z < zmax)\n\n y, yerr = scipy.integrate.quad(\n lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +\n cosmo['omega_lambda_0'])**(1.5),\n z, zmax)\n\n return(y)", "def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,\n Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):\n \"\"\" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)\n for 1 unknown, i.e. concentration, returned by a minimisation call \"\"\"\n\n # Fn 1 (LHS of Eqn 18)\n\n Y1 = np.log(2) - 0.5\n Yc = np.log(1+c) - c/(1+c)\n f1 = Y1/Yc\n\n # Fn 2 (RHS of Eqn 18)\n\n # Eqn 14 - Define the mean inner density\n rho_2 = 200 * c**3 * Y1 / Yc\n\n # Eqn 17 rearranged to solve for Formation Redshift\n # essentially when universe had rho_2 density\n zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *\n (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1\n\n # RHS of Eqn 19\n f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)\n\n # LHS - RHS should be zero for the correct concentration\n return(f1-f2)", "def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)", "def _compute(self, funcTilde, R, z, phi):\n \"\"\"\n NAME:\n _compute\n PURPOSE:\n evaluate the NxLxM density or potential\n INPUT:\n funcTidle - must be _rhoTilde or _phiTilde\n R - Cylindrical Galactocentric radius\n z - vertical height\n phi - azimuth\n OUTPUT:\n An NxLxM density or potential at (R,z, phi)\n HISTORY:\n 2016-05-18 - Written - Aladdin \n \"\"\"\n Acos, Asin = self._Acos, self._Asin\n N, L, M = Acos.shape \n r, theta, phi = bovy_coords.cyl_to_spher(R,z,phi)\n \n \n \n PP = lpmn(M-1,L-1,nu.cos(theta))[0].T ##Get the Legendre polynomials\n func_tilde = funcTilde(r, N, L) ## Tilde of the function of interest \n \n func = nu.zeros((N,L,M), float) ## The function of interest (density or potential)\n \n m = nu.arange(0, M)[nu.newaxis, nu.newaxis, :]\n mcos = nu.cos(m*phi)\n msin = nu.sin(m*phi)\n func = func_tilde[:,:,None]*(Acos[:,:,:]*mcos + Asin[:,:,:]*msin)*PP[None,:,:]\n return func", "def c_M_z(self, M, z):\n \"\"\"\n fitting function of http://moriond.in2p3.fr/J08/proceedings/duffy.pdf for the mass and redshift dependence of the concentration parameter\n\n :param M: halo mass in M_sun/h\n :type M: float or numpy array\n :param z: redshift\n :type z: float >0\n :return: concentration parameter as float\n \"\"\"\n # fitted parameter values\n A = 5.22\n B = -0.072\n C = -0.42\n M_pivot = 2.*10**12\n return A*(M/M_pivot)**B*(1+z)**C", "def astro_redshifts(min_z, max_z, nsamples):\n '''Sample the redshifts for sources, with redshift\n independent rate, using standard cosmology\n\n Parameters\n ----------\n min_z: float\n Minimum redshift\n max_z: float\n Maximum redshift\n nsamples: int\n Number of samples\n\n Returns\n -------\n z_astro: array\n nsamples of redshift, between min_z, max_z, by standard cosmology\n '''\n\n dz, fac = 0.001, 3.0\n # use interpolation instead of directly estimating all the pdfz for rndz\n V = quad(contracted_dVdc, 0., max_z)[0]\n zbins = np.arange(min_z, max_z + dz/2., dz)\n zcenter = (zbins[:-1] + zbins[1:]) / 2\n pdfz = cosmo.differential_comoving_volume(zcenter).value/(1+zcenter)/V\n\n int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=0)\n\n rndz = np.random.uniform(min_z, max_z, int(fac*nsamples))\n pdf_zs = int_pdf(rndz)\n maxpdf = max(pdf_zs)\n rndn = np.random.uniform(0, 1, int(fac*nsamples)) * maxpdf\n diff = pdf_zs - rndn\n idx = np.where(diff > 0)\n z_astro = rndz[idx]\n\n np.random.shuffle(z_astro)\n z_astro.resize(nsamples)\n\n return z_astro", "def _delta_sigma(**cosmo):\n \"\"\" Perturb best-fit constant of proportionality Ascaling for\n rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)\n\n Parameters\n ----------\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float\n The perturbed 'A' relation between rho_2 and rho_crit for the cosmology\n\n Raises\n ------\n\n \"\"\"\n\n M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)\n perturbed_A = (0.796/cosmo['sigma_8']) * \\\n (M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)\n return(perturbed_A)", "def MAH(z, zi, Mi, **cosmo):\n \"\"\" Calculate mass accretion history by looping function acc_rate\n over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to output MAH over. Note zi<z always\n zi : float\n Redshift\n Mi : float\n Halo mass at redshift 'zi'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (dMdt, Mz) : float / numpy arrays of equivalent size to 'z'\n Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'\n\n \"\"\"\n\n # Ensure that z is a 1D NumPy array\n z = np.array(z, ndmin=1, dtype=float)\n\n # Create a full array\n dMdt_array = np.empty_like(z)\n Mz_array = np.empty_like(z)\n\n for i_ind, zval in enumerate(z):\n # Solve the accretion rate and halo mass at each redshift step\n dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)\n\n dMdt_array[i_ind] = dMdt\n Mz_array[i_ind] = Mz\n\n return(dMdt_array, Mz_array)", "def cond_mi(x, y, z):\n '''\n compute and return the mutual information between x and y given z, I(x, y | z)\n\n inputs:\n -------\n x, y, z: iterables with discrete symbols\n\n output:\n -------\n mi: float\n\n implementation notes:\n ---------------------\n I(x, y | z) = H(x | z) - H(x | y, z)\n = H(x, z) - H(z) - ( H(x, y, z) - H(y,z) )\n = H(x, z) + H(y, z) - H(z) - H(x, y, z)\n '''\n # dict.values() returns a view object that has to be converted to a list before being converted to an array\n probXZ = symbols_to_prob(combine_symbols(x, z)).prob()\n probYZ = symbols_to_prob(combine_symbols(y, z)).prob()\n probXYZ =symbols_to_prob(combine_symbols(x, y, z)).prob()\n probZ = symbols_to_prob(z).prob()\n\n return entropy(prob=probXZ) + entropy(prob=probYZ) - entropy(prob=probXYZ) - entropy(prob=probZ)", "def _dens(self, R, z, phi=0., t=0.):\n \"\"\"\n NAME:\n _dens\n PURPOSE:\n evaluate the density at (R,z, phi)\n INPUT:\n R - Cylindrical Galactocentric radius\n z - vertical height\n phi - azimuth\n t - time\n OUTPUT:\n density at (R,z, phi)\n HISTORY:\n 2016-05-17 - Written - Aladdin \n \"\"\"\n if not self.isNonAxi and phi is None:\n phi= 0.\n return self._computeArray(self._rhoTilde, R,z,phi)", "def COM(z, M, **cosmo):\n \"\"\" Calculate concentration for halo of mass 'M' at redshift 'z'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to find concentration of halo\n M : float / numpy array\n Halo mass at redshift 'z'. Must be same size as 'z'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (c_array, sig_array, nu_array, zf_array) : float / numpy arrays\n of equivalent size to 'z' and 'M'. Variables are\n Concentration, Mass Variance 'sigma' this corresponds too,\n the dimnesionless fluctuation this represents and formation redshift\n\n \"\"\"\n # Check that z and M are arrays\n z = np.array(z, ndmin=1, dtype=float)\n M = np.array(M, ndmin=1, dtype=float)\n\n # Create array\n c_array = np.empty_like(z)\n sig_array = np.empty_like(z)\n nu_array = np.empty_like(z)\n zf_array = np.empty_like(z)\n\n for i_ind, (zval, Mval) in enumerate(_izip(z, M)):\n # Evaluate the indices at each redshift and mass combination\n # that you want a concentration for, different to MAH which\n # uses one a_tilde and b_tilde at the starting redshift only\n a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)\n\n # Minimize equation to solve for 1 unknown, 'c'\n c = scipy.optimize.brentq(_minimize_c, 2, 1000,\n args=(zval, a_tilde, b_tilde,\n cosmo['A_scaling'], cosmo['omega_M_0'],\n cosmo['omega_lambda_0']))\n\n if np.isclose(c, 0):\n print(\"Error solving for concentration with given redshift and \"\n \"(probably) too small a mass\")\n c = -1\n sig = -1\n nu = -1\n zf = -1\n else:\n # Calculate formation redshift for this concentration,\n # redshift at which the scale radius = virial radius: z_-2\n zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],\n omega_M_0=cosmo['omega_M_0'],\n omega_lambda_0=cosmo['omega_lambda_0'])\n\n R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)\n\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)\n nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))\n\n c_array[i_ind] = c\n sig_array[i_ind] = sig\n nu_array[i_ind] = nu\n zf_array[i_ind] = zf\n\n return(c_array, sig_array, nu_array, zf_array)" ]
[ 0.6879026889801025, 0.6802611947059631, 0.6461231708526611, 0.6325393915176392, 0.6288246512413025, 0.6253081560134888, 0.6240700483322144, 0.6204337477684021, 0.6187481880187988, 0.6157792210578918, 0.6153101325035095, 0.613068163394928 ]
Calculate accretion rate and mass history of a halo at any redshift 'z' with mass 'Mi' at a lower redshift 'z' Parameters ---------- z : float Redshift to solve acc_rate / mass history. Note zi<z zi : float Redshift Mi : float Halo mass at redshift 'zi' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (dMdt, Mz) : float Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
def acc_rate(z, zi, Mi, **cosmo): """ Calculate accretion rate and mass history of a halo at any redshift 'z' with mass 'Mi' at a lower redshift 'z' Parameters ---------- z : float Redshift to solve acc_rate / mass history. Note zi<z zi : float Redshift Mi : float Halo mass at redshift 'zi' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (dMdt, Mz) : float Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z' """ # Find parameters a_tilde and b_tilde for initial redshift # use Eqn 9 and 10 of Correa et al. (2015c) a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo) # Halo mass at z, in Msol # use Eqn 8 in Correa et al. (2015c) Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi))) # Accretion rate at z, Msol yr^-1 # use Eqn 11 from Correa et al. (2015c) dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\ (-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\ np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0']) return(dMdt, Mz)
[ "def MAH(z, zi, Mi, **cosmo):\n \"\"\" Calculate mass accretion history by looping function acc_rate\n over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to output MAH over. Note zi<z always\n zi : float\n Redshift\n Mi : float\n Halo mass at redshift 'zi'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (dMdt, Mz) : float / numpy arrays of equivalent size to 'z'\n Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'\n\n \"\"\"\n\n # Ensure that z is a 1D NumPy array\n z = np.array(z, ndmin=1, dtype=float)\n\n # Create a full array\n dMdt_array = np.empty_like(z)\n Mz_array = np.empty_like(z)\n\n for i_ind, zval in enumerate(z):\n # Solve the accretion rate and halo mass at each redshift step\n dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)\n\n dMdt_array[i_ind] = dMdt\n Mz_array[i_ind] = Mz\n\n return(dMdt_array, Mz_array)", "def c_M_z(self, M, z):\n \"\"\"\n fitting function of http://moriond.in2p3.fr/J08/proceedings/duffy.pdf for the mass and redshift dependence of the concentration parameter\n\n :param M: halo mass in M_sun/h\n :type M: float or numpy array\n :param z: redshift\n :type z: float >0\n :return: concentration parameter as float\n \"\"\"\n # fitted parameter values\n A = 5.22\n B = -0.072\n C = -0.42\n M_pivot = 2.*10**12\n return A*(M/M_pivot)**B*(1+z)**C", "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def COM(z, M, **cosmo):\n \"\"\" Calculate concentration for halo of mass 'M' at redshift 'z'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to find concentration of halo\n M : float / numpy array\n Halo mass at redshift 'z'. Must be same size as 'z'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (c_array, sig_array, nu_array, zf_array) : float / numpy arrays\n of equivalent size to 'z' and 'M'. Variables are\n Concentration, Mass Variance 'sigma' this corresponds too,\n the dimnesionless fluctuation this represents and formation redshift\n\n \"\"\"\n # Check that z and M are arrays\n z = np.array(z, ndmin=1, dtype=float)\n M = np.array(M, ndmin=1, dtype=float)\n\n # Create array\n c_array = np.empty_like(z)\n sig_array = np.empty_like(z)\n nu_array = np.empty_like(z)\n zf_array = np.empty_like(z)\n\n for i_ind, (zval, Mval) in enumerate(_izip(z, M)):\n # Evaluate the indices at each redshift and mass combination\n # that you want a concentration for, different to MAH which\n # uses one a_tilde and b_tilde at the starting redshift only\n a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)\n\n # Minimize equation to solve for 1 unknown, 'c'\n c = scipy.optimize.brentq(_minimize_c, 2, 1000,\n args=(zval, a_tilde, b_tilde,\n cosmo['A_scaling'], cosmo['omega_M_0'],\n cosmo['omega_lambda_0']))\n\n if np.isclose(c, 0):\n print(\"Error solving for concentration with given redshift and \"\n \"(probably) too small a mass\")\n c = -1\n sig = -1\n nu = -1\n zf = -1\n else:\n # Calculate formation redshift for this concentration,\n # redshift at which the scale radius = virial radius: z_-2\n zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],\n omega_M_0=cosmo['omega_M_0'],\n omega_lambda_0=cosmo['omega_lambda_0'])\n\n R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)\n\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)\n nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))\n\n c_array[i_ind] = c\n sig_array[i_ind] = sig\n nu_array[i_ind] = nu\n zf_array[i_ind] = zf\n\n return(c_array, sig_array, nu_array, zf_array)", "def c_DuttonMaccio(z, m, h=h):\n \"\"\"Concentration from c(M) relation in Dutton & Maccio (2014).\n\n Parameters\n ----------\n z : float or array_like\n Redshift(s) of halos.\n m : float or array_like\n Mass(es) of halos (m200 definition), in units of solar masses.\n h : float, optional\n Hubble parameter. Default is from Planck13.\n\n Returns\n ----------\n ndarray\n Concentration values (c200) for halos.\n\n References\n ----------\n Calculation from Planck-based results of simulations presented in:\n\n A.A. Dutton & A.V. Maccio, \"Cold dark matter haloes in the Planck era:\n evolution of structural parameters for Einasto and NFW profiles,\"\n Monthly Notices of the Royal Astronomical Society, Volume 441, Issue 4,\n p.3359-3374, 2014.\n \"\"\"\n\n z, m = _check_inputs(z, m)\n\n a = 0.52 + 0.385 * np.exp(-0.617 * (z**1.21)) # EQ 10\n b = -0.101 + 0.026 * z # EQ 11\n\n logc200 = a + b * np.log10(m * h / (10.**12)) # EQ 7\n\n concentration = 10.**logc200\n\n return concentration", "def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,\n **kwargs):\n r\"\"\"Returns the value of a cosmological quantity (e.g., age) at a redshift.\n\n Parameters\n ----------\n z : float\n The redshift.\n quantity : str\n The name of the quantity to get. The name may be any attribute of\n :py:class:`astropy.cosmology.FlatLambdaCDM`.\n strip_unit : bool, optional\n Just return the value of the quantity, sans units. Default is True.\n \\**kwargs :\n All other keyword args are passed to :py:func:`get_cosmology` to\n select a cosmology. If none provided, will use\n :py:attr:`DEFAULT_COSMOLOGY`.\n\n Returns\n -------\n float or astropy.units.quantity :\n The value of the quantity at the requested value. If ``strip_unit`` is\n ``True``, will return the value. Otherwise, will return the value with\n units.\n \"\"\"\n cosmology = get_cosmology(**kwargs)\n val = getattr(cosmology, quantity)(z)\n if strip_unit:\n val = val.value\n return val", "def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,\n Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):\n \"\"\" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)\n for 1 unknown, i.e. concentration, returned by a minimisation call \"\"\"\n\n # Fn 1 (LHS of Eqn 18)\n\n Y1 = np.log(2) - 0.5\n Yc = np.log(1+c) - c/(1+c)\n f1 = Y1/Yc\n\n # Fn 2 (RHS of Eqn 18)\n\n # Eqn 14 - Define the mean inner density\n rho_2 = 200 * c**3 * Y1 / Yc\n\n # Eqn 17 rearranged to solve for Formation Redshift\n # essentially when universe had rho_2 density\n zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *\n (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1\n\n # RHS of Eqn 19\n f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)\n\n # LHS - RHS should be zero for the correct concentration\n return(f1-f2)", "def Onu(self, z):\n \"\"\"\n Returns the sum of :func:`~classylss.binding.Background.Omega_ncdm`\n and :func:`~classylss.binding.Background.Omega_ur`.\n \"\"\"\n return self.bg.Omega_ncdm(z) + self.bg.Omega_ur(z)", "def cduffy(z, M, vir='200crit', relaxed=True):\n \"\"\" NFW conc from Duffy 08 Table 1 for halo mass and redshift\"\"\"\n\n if(vir == '200crit'):\n if relaxed:\n params = [6.71, -0.091, -0.44]\n else:\n params = [5.71, -0.084, -0.47]\n elif(vir == 'tophat'):\n if relaxed:\n params = [9.23, -0.090, -0.69]\n else:\n params = [7.85, -0.081, -0.71]\n elif(vir == '200mean'):\n if relaxed:\n params = [11.93, -0.090, -0.99]\n else:\n params = [10.14, -0.081, -1.01]\n else:\n print(\"Didn't recognise the halo boundary definition provided %s\"\n % (vir))\n\n return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))", "def astro_redshifts(min_z, max_z, nsamples):\n '''Sample the redshifts for sources, with redshift\n independent rate, using standard cosmology\n\n Parameters\n ----------\n min_z: float\n Minimum redshift\n max_z: float\n Maximum redshift\n nsamples: int\n Number of samples\n\n Returns\n -------\n z_astro: array\n nsamples of redshift, between min_z, max_z, by standard cosmology\n '''\n\n dz, fac = 0.001, 3.0\n # use interpolation instead of directly estimating all the pdfz for rndz\n V = quad(contracted_dVdc, 0., max_z)[0]\n zbins = np.arange(min_z, max_z + dz/2., dz)\n zcenter = (zbins[:-1] + zbins[1:]) / 2\n pdfz = cosmo.differential_comoving_volume(zcenter).value/(1+zcenter)/V\n\n int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=0)\n\n rndz = np.random.uniform(min_z, max_z, int(fac*nsamples))\n pdf_zs = int_pdf(rndz)\n maxpdf = max(pdf_zs)\n rndn = np.random.uniform(0, 1, int(fac*nsamples)) * maxpdf\n diff = pdf_zs - rndn\n idx = np.where(diff > 0)\n z_astro = rndz[idx]\n\n np.random.shuffle(z_astro)\n z_astro.resize(nsamples)\n\n return z_astro", "def cond_mi(x, y, z):\n '''\n compute and return the mutual information between x and y given z, I(x, y | z)\n\n inputs:\n -------\n x, y, z: iterables with discrete symbols\n\n output:\n -------\n mi: float\n\n implementation notes:\n ---------------------\n I(x, y | z) = H(x | z) - H(x | y, z)\n = H(x, z) - H(z) - ( H(x, y, z) - H(y,z) )\n = H(x, z) + H(y, z) - H(z) - H(x, y, z)\n '''\n # dict.values() returns a view object that has to be converted to a list before being converted to an array\n probXZ = symbols_to_prob(combine_symbols(x, z)).prob()\n probYZ = symbols_to_prob(combine_symbols(y, z)).prob()\n probXYZ =symbols_to_prob(combine_symbols(x, y, z)).prob()\n probZ = symbols_to_prob(z).prob()\n\n return entropy(prob=probXZ) + entropy(prob=probYZ) - entropy(prob=probXYZ) - entropy(prob=probZ)", "def profileMain(self, M, z):\n \"\"\"\n returns all needed parameter (in comoving units modulo h) to draw the profile of the main halo\n r200 in co-moving Mpc/h\n rho_s in h^2/Mpc^3 (co-moving)\n Rs in Mpc/h co-moving\n c unit less\n \"\"\"\n c = self.c_M_z(M, z)\n r200 = self.r200_M(M)\n rho0 = self.rho0_c(c)\n Rs = r200/c\n return r200, rho0, c, Rs" ]
[ 0.744875967502594, 0.6870329976081848, 0.6793551445007324, 0.668692946434021, 0.6536861658096313, 0.6517687439918518, 0.6454139947891235, 0.63826584815979, 0.6368201971054077, 0.6361081600189209, 0.6345521211624146, 0.6290093064308167 ]
Calculate mass accretion history by looping function acc_rate over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi' Parameters ---------- z : float / numpy array Redshift to output MAH over. Note zi<z always zi : float Redshift Mi : float Halo mass at redshift 'zi' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (dMdt, Mz) : float / numpy arrays of equivalent size to 'z' Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
def MAH(z, zi, Mi, **cosmo): """ Calculate mass accretion history by looping function acc_rate over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi' Parameters ---------- z : float / numpy array Redshift to output MAH over. Note zi<z always zi : float Redshift Mi : float Halo mass at redshift 'zi' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (dMdt, Mz) : float / numpy arrays of equivalent size to 'z' Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z' """ # Ensure that z is a 1D NumPy array z = np.array(z, ndmin=1, dtype=float) # Create a full array dMdt_array = np.empty_like(z) Mz_array = np.empty_like(z) for i_ind, zval in enumerate(z): # Solve the accretion rate and halo mass at each redshift step dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo) dMdt_array[i_ind] = dMdt Mz_array[i_ind] = Mz return(dMdt_array, Mz_array)
[ "def acc_rate(z, zi, Mi, **cosmo):\n \"\"\" Calculate accretion rate and mass history of a halo at any\n redshift 'z' with mass 'Mi' at a lower redshift 'z'\n\n Parameters\n ----------\n z : float\n Redshift to solve acc_rate / mass history. Note zi<z\n zi : float\n Redshift\n Mi : float\n Halo mass at redshift 'zi'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (dMdt, Mz) : float\n Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'\n\n \"\"\"\n # Find parameters a_tilde and b_tilde for initial redshift\n # use Eqn 9 and 10 of Correa et al. (2015c)\n a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)\n\n # Halo mass at z, in Msol\n # use Eqn 8 in Correa et al. (2015c)\n Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))\n\n # Accretion rate at z, Msol yr^-1\n # use Eqn 11 from Correa et al. (2015c)\n dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\\\n (-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\\\n np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])\n\n return(dMdt, Mz)", "def c_M_z(self, M, z):\n \"\"\"\n fitting function of http://moriond.in2p3.fr/J08/proceedings/duffy.pdf for the mass and redshift dependence of the concentration parameter\n\n :param M: halo mass in M_sun/h\n :type M: float or numpy array\n :param z: redshift\n :type z: float >0\n :return: concentration parameter as float\n \"\"\"\n # fitted parameter values\n A = 5.22\n B = -0.072\n C = -0.42\n M_pivot = 2.*10**12\n return A*(M/M_pivot)**B*(1+z)**C", "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def COM(z, M, **cosmo):\n \"\"\" Calculate concentration for halo of mass 'M' at redshift 'z'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to find concentration of halo\n M : float / numpy array\n Halo mass at redshift 'z'. Must be same size as 'z'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (c_array, sig_array, nu_array, zf_array) : float / numpy arrays\n of equivalent size to 'z' and 'M'. Variables are\n Concentration, Mass Variance 'sigma' this corresponds too,\n the dimnesionless fluctuation this represents and formation redshift\n\n \"\"\"\n # Check that z and M are arrays\n z = np.array(z, ndmin=1, dtype=float)\n M = np.array(M, ndmin=1, dtype=float)\n\n # Create array\n c_array = np.empty_like(z)\n sig_array = np.empty_like(z)\n nu_array = np.empty_like(z)\n zf_array = np.empty_like(z)\n\n for i_ind, (zval, Mval) in enumerate(_izip(z, M)):\n # Evaluate the indices at each redshift and mass combination\n # that you want a concentration for, different to MAH which\n # uses one a_tilde and b_tilde at the starting redshift only\n a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)\n\n # Minimize equation to solve for 1 unknown, 'c'\n c = scipy.optimize.brentq(_minimize_c, 2, 1000,\n args=(zval, a_tilde, b_tilde,\n cosmo['A_scaling'], cosmo['omega_M_0'],\n cosmo['omega_lambda_0']))\n\n if np.isclose(c, 0):\n print(\"Error solving for concentration with given redshift and \"\n \"(probably) too small a mass\")\n c = -1\n sig = -1\n nu = -1\n zf = -1\n else:\n # Calculate formation redshift for this concentration,\n # redshift at which the scale radius = virial radius: z_-2\n zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],\n omega_M_0=cosmo['omega_M_0'],\n omega_lambda_0=cosmo['omega_lambda_0'])\n\n R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)\n\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)\n nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))\n\n c_array[i_ind] = c\n sig_array[i_ind] = sig\n nu_array[i_ind] = nu\n zf_array[i_ind] = zf\n\n return(c_array, sig_array, nu_array, zf_array)", "def zlma(series, window=20, min_periods=None, kind=\"ema\"):\n \"\"\"\n John Ehlers' Zero lag (exponential) moving average\n https://en.wikipedia.org/wiki/Zero_lag_exponential_moving_average\n \"\"\"\n min_periods = window if min_periods is None else min_periods\n\n lag = (window - 1) // 2\n series = 2 * series - series.shift(lag)\n if kind in ['ewm', 'ema']:\n return wma(series, lag, min_periods)\n elif kind == \"hma\":\n return hma(series, lag, min_periods)\n return sma(series, lag, min_periods)", "def astro_redshifts(min_z, max_z, nsamples):\n '''Sample the redshifts for sources, with redshift\n independent rate, using standard cosmology\n\n Parameters\n ----------\n min_z: float\n Minimum redshift\n max_z: float\n Maximum redshift\n nsamples: int\n Number of samples\n\n Returns\n -------\n z_astro: array\n nsamples of redshift, between min_z, max_z, by standard cosmology\n '''\n\n dz, fac = 0.001, 3.0\n # use interpolation instead of directly estimating all the pdfz for rndz\n V = quad(contracted_dVdc, 0., max_z)[0]\n zbins = np.arange(min_z, max_z + dz/2., dz)\n zcenter = (zbins[:-1] + zbins[1:]) / 2\n pdfz = cosmo.differential_comoving_volume(zcenter).value/(1+zcenter)/V\n\n int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=0)\n\n rndz = np.random.uniform(min_z, max_z, int(fac*nsamples))\n pdf_zs = int_pdf(rndz)\n maxpdf = max(pdf_zs)\n rndn = np.random.uniform(0, 1, int(fac*nsamples)) * maxpdf\n diff = pdf_zs - rndn\n idx = np.where(diff > 0)\n z_astro = rndz[idx]\n\n np.random.shuffle(z_astro)\n z_astro.resize(nsamples)\n\n return z_astro", "def c_DuttonMaccio(z, m, h=h):\n \"\"\"Concentration from c(M) relation in Dutton & Maccio (2014).\n\n Parameters\n ----------\n z : float or array_like\n Redshift(s) of halos.\n m : float or array_like\n Mass(es) of halos (m200 definition), in units of solar masses.\n h : float, optional\n Hubble parameter. Default is from Planck13.\n\n Returns\n ----------\n ndarray\n Concentration values (c200) for halos.\n\n References\n ----------\n Calculation from Planck-based results of simulations presented in:\n\n A.A. Dutton & A.V. Maccio, \"Cold dark matter haloes in the Planck era:\n evolution of structural parameters for Einasto and NFW profiles,\"\n Monthly Notices of the Royal Astronomical Society, Volume 441, Issue 4,\n p.3359-3374, 2014.\n \"\"\"\n\n z, m = _check_inputs(z, m)\n\n a = 0.52 + 0.385 * np.exp(-0.617 * (z**1.21)) # EQ 10\n b = -0.101 + 0.026 * z # EQ 11\n\n logc200 = a + b * np.log10(m * h / (10.**12)) # EQ 7\n\n concentration = 10.**logc200\n\n return concentration", "def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,\n **kwargs):\n r\"\"\"Returns the value of a cosmological quantity (e.g., age) at a redshift.\n\n Parameters\n ----------\n z : float\n The redshift.\n quantity : str\n The name of the quantity to get. The name may be any attribute of\n :py:class:`astropy.cosmology.FlatLambdaCDM`.\n strip_unit : bool, optional\n Just return the value of the quantity, sans units. Default is True.\n \\**kwargs :\n All other keyword args are passed to :py:func:`get_cosmology` to\n select a cosmology. If none provided, will use\n :py:attr:`DEFAULT_COSMOLOGY`.\n\n Returns\n -------\n float or astropy.units.quantity :\n The value of the quantity at the requested value. If ``strip_unit`` is\n ``True``, will return the value. Otherwise, will return the value with\n units.\n \"\"\"\n cosmology = get_cosmology(**kwargs)\n val = getattr(cosmology, quantity)(z)\n if strip_unit:\n val = val.value\n return val", "def cduffy(z, M, vir='200crit', relaxed=True):\n \"\"\" NFW conc from Duffy 08 Table 1 for halo mass and redshift\"\"\"\n\n if(vir == '200crit'):\n if relaxed:\n params = [6.71, -0.091, -0.44]\n else:\n params = [5.71, -0.084, -0.47]\n elif(vir == 'tophat'):\n if relaxed:\n params = [9.23, -0.090, -0.69]\n else:\n params = [7.85, -0.081, -0.71]\n elif(vir == '200mean'):\n if relaxed:\n params = [11.93, -0.090, -0.99]\n else:\n params = [10.14, -0.081, -1.01]\n else:\n print(\"Didn't recognise the halo boundary definition provided %s\"\n % (vir))\n\n return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))", "def profileMain(self, M, z):\n \"\"\"\n returns all needed parameter (in comoving units modulo h) to draw the profile of the main halo\n r200 in co-moving Mpc/h\n rho_s in h^2/Mpc^3 (co-moving)\n Rs in Mpc/h co-moving\n c unit less\n \"\"\"\n c = self.c_M_z(M, z)\n r200 = self.r200_M(M)\n rho0 = self.rho0_c(c)\n Rs = r200/c\n return r200, rho0, c, Rs", "def _int_growth(z, **cosmo):\n \"\"\" Returns integral of the linear growth factor from z=200 to z=z \"\"\"\n\n zmax = 200\n\n if hasattr(z, \"__len__\"):\n for zval in z:\n assert(zval < zmax)\n else:\n assert(z < zmax)\n\n y, yerr = scipy.integrate.quad(\n lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +\n cosmo['omega_lambda_0'])**(1.5),\n z, zmax)\n\n return(y)", "def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)" ]
[ 0.6949988007545471, 0.6794018149375916, 0.6675459742546082, 0.6660123467445374, 0.6598291397094727, 0.6488460302352905, 0.646306037902832, 0.6451810002326965, 0.6433944702148438, 0.639427661895752, 0.6363394856452942, 0.63124018907547 ]
Calculate concentration for halo of mass 'M' at redshift 'z' Parameters ---------- z : float / numpy array Redshift to find concentration of halo M : float / numpy array Halo mass at redshift 'z'. Must be same size as 'z' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (c_array, sig_array, nu_array, zf_array) : float / numpy arrays of equivalent size to 'z' and 'M'. Variables are Concentration, Mass Variance 'sigma' this corresponds too, the dimnesionless fluctuation this represents and formation redshift
def COM(z, M, **cosmo): """ Calculate concentration for halo of mass 'M' at redshift 'z' Parameters ---------- z : float / numpy array Redshift to find concentration of halo M : float / numpy array Halo mass at redshift 'z'. Must be same size as 'z' cosmo : dict Dictionary of cosmological parameters, similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} Returns ------- (c_array, sig_array, nu_array, zf_array) : float / numpy arrays of equivalent size to 'z' and 'M'. Variables are Concentration, Mass Variance 'sigma' this corresponds too, the dimnesionless fluctuation this represents and formation redshift """ # Check that z and M are arrays z = np.array(z, ndmin=1, dtype=float) M = np.array(M, ndmin=1, dtype=float) # Create array c_array = np.empty_like(z) sig_array = np.empty_like(z) nu_array = np.empty_like(z) zf_array = np.empty_like(z) for i_ind, (zval, Mval) in enumerate(_izip(z, M)): # Evaluate the indices at each redshift and mass combination # that you want a concentration for, different to MAH which # uses one a_tilde and b_tilde at the starting redshift only a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo) # Minimize equation to solve for 1 unknown, 'c' c = scipy.optimize.brentq(_minimize_c, 2, 1000, args=(zval, a_tilde, b_tilde, cosmo['A_scaling'], cosmo['omega_M_0'], cosmo['omega_lambda_0'])) if np.isclose(c, 0): print("Error solving for concentration with given redshift and " "(probably) too small a mass") c = -1 sig = -1 nu = -1 zf = -1 else: # Calculate formation redshift for this concentration, # redshift at which the scale radius = virial radius: z_-2 zf = formationz(c, zval, Ascaling=cosmo['A_scaling'], omega_M_0=cosmo['omega_M_0'], omega_lambda_0=cosmo['omega_lambda_0']) R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo) sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo)) c_array[i_ind] = c sig_array[i_ind] = sig nu_array[i_ind] = nu zf_array[i_ind] = zf return(c_array, sig_array, nu_array, zf_array)
[ "def c_M_z(self, M, z):\n \"\"\"\n fitting function of http://moriond.in2p3.fr/J08/proceedings/duffy.pdf for the mass and redshift dependence of the concentration parameter\n\n :param M: halo mass in M_sun/h\n :type M: float or numpy array\n :param z: redshift\n :type z: float >0\n :return: concentration parameter as float\n \"\"\"\n # fitted parameter values\n A = 5.22\n B = -0.072\n C = -0.42\n M_pivot = 2.*10**12\n return A*(M/M_pivot)**B*(1+z)**C", "def c_DuttonMaccio(z, m, h=h):\n \"\"\"Concentration from c(M) relation in Dutton & Maccio (2014).\n\n Parameters\n ----------\n z : float or array_like\n Redshift(s) of halos.\n m : float or array_like\n Mass(es) of halos (m200 definition), in units of solar masses.\n h : float, optional\n Hubble parameter. Default is from Planck13.\n\n Returns\n ----------\n ndarray\n Concentration values (c200) for halos.\n\n References\n ----------\n Calculation from Planck-based results of simulations presented in:\n\n A.A. Dutton & A.V. Maccio, \"Cold dark matter haloes in the Planck era:\n evolution of structural parameters for Einasto and NFW profiles,\"\n Monthly Notices of the Royal Astronomical Society, Volume 441, Issue 4,\n p.3359-3374, 2014.\n \"\"\"\n\n z, m = _check_inputs(z, m)\n\n a = 0.52 + 0.385 * np.exp(-0.617 * (z**1.21)) # EQ 10\n b = -0.101 + 0.026 * z # EQ 11\n\n logc200 = a + b * np.log10(m * h / (10.**12)) # EQ 7\n\n concentration = 10.**logc200\n\n return concentration", "def _minimize_c(c, z=0, a_tilde=1, b_tilde=-1,\n Ascaling=900, omega_M_0=0.25, omega_lambda_0=0.75):\n \"\"\" Trial function to solve 2 eqns (17 and 18) from Correa et al. (2015c)\n for 1 unknown, i.e. concentration, returned by a minimisation call \"\"\"\n\n # Fn 1 (LHS of Eqn 18)\n\n Y1 = np.log(2) - 0.5\n Yc = np.log(1+c) - c/(1+c)\n f1 = Y1/Yc\n\n # Fn 2 (RHS of Eqn 18)\n\n # Eqn 14 - Define the mean inner density\n rho_2 = 200 * c**3 * Y1 / Yc\n\n # Eqn 17 rearranged to solve for Formation Redshift\n # essentially when universe had rho_2 density\n zf = (((1 + z)**3 + omega_lambda_0/omega_M_0) *\n (rho_2/Ascaling) - omega_lambda_0/omega_M_0)**(1/3) - 1\n\n # RHS of Eqn 19\n f2 = ((1 + zf - z)**a_tilde) * np.exp((zf - z) * b_tilde)\n\n # LHS - RHS should be zero for the correct concentration\n return(f1-f2)", "def c_Prada(z, m, h=h, Om_M=Om_M, Om_L=Om_L):\n \"\"\"Concentration from c(M) relation published in Prada et al. (2012).\n\n Parameters\n ----------\n z : float or array_like\n Redshift(s) of halos.\n m : float or array_like\n Mass(es) of halos (m200 definition), in units of solar masses.\n h : float, optional\n Hubble parameter. Default is from Planck13.\n Om_M : float, optional\n Matter density parameter. Default is from Planck13.\n Om_L : float, optional\n Cosmological constant density parameter. Default is from Planck13.\n\n Returns\n ----------\n ndarray\n Concentration values (c200) for halos.\n\n Notes\n ----------\n This c(M) relation is somewhat controversial, due to its upturn in\n concentration for high masses (normally we expect concentration to\n decrease with increasing mass). See the reference below for discussion.\n\n References\n ----------\n Calculation based on results of N-body simulations presented in:\n\n F. Prada, A.A. Klypin, A.J. Cuesta, J.E. Betancort-Rijo, and J.\n Primack, \"Halo concentrations in the standard Lambda cold dark matter\n cosmology,\" Monthly Notices of the Royal Astronomical Society, Volume\n 423, Issue 4, pp. 3018-3030, 2012.\n \"\"\"\n\n z, m = _check_inputs(z, m)\n\n # EQ 13\n x = (1. / (1. + z)) * (Om_L / Om_M)**(1. / 3.)\n\n # EQ 12\n intEQ12 = np.zeros(len(x)) # integral\n for i in range(len(x)):\n # v is integration variable\n temp = integrate.quad(lambda v: (v / (1 + v**3.))**(1.5), 0, x[i])\n intEQ12[i] = temp[0]\n\n Da = 2.5 * ((Om_M / Om_L)**(1. / 3.)) * (np.sqrt(1. + x**3.) /\n (x**(1.5))) * intEQ12\n\n # EQ 23\n y = (1.e+12) / (h * m)\n sigma = Da * (16.9 * y**0.41) / (1. + (1.102 * y**0.2) + (6.22 * y**0.333))\n\n # EQ 21 & 22 (constants)\n c0 = 3.681\n c1 = 5.033\n alpha = 6.948\n x0 = 0.424\n s0 = 1.047 # sigma_0^-1\n s1 = 1.646 # sigma_1^-1\n beta = 7.386\n x1 = 0.526\n\n # EQ 19 & 20\n cmin = c0 + (c1 - c0) * ((1. / np.pi) * np.arctan(alpha * (x - x0)) + 0.5)\n smin = s0 + (s1 - s0) * ((1. / np.pi) * np.arctan(beta * (x - x1)) + 0.5)\n\n # EQ 18\n cmin1393 = c0 + (c1 - c0) * ((1. / np.pi) * np.arctan(alpha *\n (1.393 - x0)) + 0.5)\n smin1393 = s0 + (s1 - s0) * ((1. / np.pi) * np.arctan(beta *\n (1.393 - x1)) + 0.5)\n B0 = cmin / cmin1393\n B1 = smin / smin1393\n\n # EQ 15\n sigma_prime = B1 * sigma\n\n # EQ 17\n A = 2.881\n b = 1.257\n c = 1.022\n d = 0.06\n\n # EQ 16\n Cs = A * ((sigma_prime / b)**c + 1.) * np.exp(d / (sigma_prime**2.))\n\n # EQ 14\n concentration = B0 * Cs\n\n return concentration", "def c_Duffy(z, m, h=h):\n \"\"\"Concentration from c(M) relation published in Duffy et al. (2008).\n\n Parameters\n ----------\n z : float or array_like\n Redshift(s) of halos.\n m : float or array_like\n Mass(es) of halos (m200 definition), in units of solar masses.\n h : float, optional\n Hubble parameter. Default is from Planck13.\n\n Returns\n ----------\n ndarray\n Concentration values (c200) for halos.\n\n References\n ----------\n Results from N-body simulations using WMAP5 cosmology, presented in:\n\n A.R. Duffy, J. Schaye, S.T. Kay, and C. Dalla Vecchia, \"Dark matter\n halo concentrations in the Wilkinson Microwave Anisotropy Probe year 5\n cosmology,\" Monthly Notices of the Royal Astronomical Society, Volume\n 390, Issue 1, pp. L64-L68, 2008.\n\n This calculation uses the parameters corresponding to the NFW model,\n the '200' halo definition, and the 'full' sample of halos spanning\n z = 0-2. This means the values of fitted parameters (A,B,C) = (5.71,\n -0.084,-0.47) in Table 1 of Duffy et al. (2008).\n \"\"\"\n\n z, m = _check_inputs(z, m)\n\n M_pivot = 2.e12 / h # [M_solar]\n\n A = 5.71\n B = -0.084\n C = -0.47\n\n concentration = A * ((m / M_pivot)**B) * (1 + z)**C\n\n return concentration", "def cduffy(z, M, vir='200crit', relaxed=True):\n \"\"\" NFW conc from Duffy 08 Table 1 for halo mass and redshift\"\"\"\n\n if(vir == '200crit'):\n if relaxed:\n params = [6.71, -0.091, -0.44]\n else:\n params = [5.71, -0.084, -0.47]\n elif(vir == 'tophat'):\n if relaxed:\n params = [9.23, -0.090, -0.69]\n else:\n params = [7.85, -0.081, -0.71]\n elif(vir == '200mean'):\n if relaxed:\n params = [11.93, -0.090, -0.99]\n else:\n params = [10.14, -0.081, -1.01]\n else:\n print(\"Didn't recognise the halo boundary definition provided %s\"\n % (vir))\n\n return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2]))", "def _delta_sigma(**cosmo):\n \"\"\" Perturb best-fit constant of proportionality Ascaling for\n rho_crit - rho_2 relation for unknown cosmology (Correa et al 2015c)\n\n Parameters\n ----------\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n float\n The perturbed 'A' relation between rho_2 and rho_crit for the cosmology\n\n Raises\n ------\n\n \"\"\"\n\n M8_cosmo = cp.perturbation.radius_to_mass(8, **cosmo)\n perturbed_A = (0.796/cosmo['sigma_8']) * \\\n (M8_cosmo/2.5e14)**((cosmo['n']-0.963)/6)\n return(perturbed_A)", "def MAH(z, zi, Mi, **cosmo):\n \"\"\" Calculate mass accretion history by looping function acc_rate\n over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to output MAH over. Note zi<z always\n zi : float\n Redshift\n Mi : float\n Halo mass at redshift 'zi'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (dMdt, Mz) : float / numpy arrays of equivalent size to 'z'\n Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'\n\n \"\"\"\n\n # Ensure that z is a 1D NumPy array\n z = np.array(z, ndmin=1, dtype=float)\n\n # Create a full array\n dMdt_array = np.empty_like(z)\n Mz_array = np.empty_like(z)\n\n for i_ind, zval in enumerate(z):\n # Solve the accretion rate and halo mass at each redshift step\n dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)\n\n dMdt_array[i_ind] = dMdt\n Mz_array[i_ind] = Mz\n\n return(dMdt_array, Mz_array)", "def astro_redshifts(min_z, max_z, nsamples):\n '''Sample the redshifts for sources, with redshift\n independent rate, using standard cosmology\n\n Parameters\n ----------\n min_z: float\n Minimum redshift\n max_z: float\n Maximum redshift\n nsamples: int\n Number of samples\n\n Returns\n -------\n z_astro: array\n nsamples of redshift, between min_z, max_z, by standard cosmology\n '''\n\n dz, fac = 0.001, 3.0\n # use interpolation instead of directly estimating all the pdfz for rndz\n V = quad(contracted_dVdc, 0., max_z)[0]\n zbins = np.arange(min_z, max_z + dz/2., dz)\n zcenter = (zbins[:-1] + zbins[1:]) / 2\n pdfz = cosmo.differential_comoving_volume(zcenter).value/(1+zcenter)/V\n\n int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=0)\n\n rndz = np.random.uniform(min_z, max_z, int(fac*nsamples))\n pdf_zs = int_pdf(rndz)\n maxpdf = max(pdf_zs)\n rndn = np.random.uniform(0, 1, int(fac*nsamples)) * maxpdf\n diff = pdf_zs - rndn\n idx = np.where(diff > 0)\n z_astro = rndz[idx]\n\n np.random.shuffle(z_astro)\n z_astro.resize(nsamples)\n\n return z_astro", "def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,\n **kwargs):\n r\"\"\"Returns the value of a cosmological quantity (e.g., age) at a redshift.\n\n Parameters\n ----------\n z : float\n The redshift.\n quantity : str\n The name of the quantity to get. The name may be any attribute of\n :py:class:`astropy.cosmology.FlatLambdaCDM`.\n strip_unit : bool, optional\n Just return the value of the quantity, sans units. Default is True.\n \\**kwargs :\n All other keyword args are passed to :py:func:`get_cosmology` to\n select a cosmology. If none provided, will use\n :py:attr:`DEFAULT_COSMOLOGY`.\n\n Returns\n -------\n float or astropy.units.quantity :\n The value of the quantity at the requested value. If ``strip_unit`` is\n ``True``, will return the value. Otherwise, will return the value with\n units.\n \"\"\"\n cosmology = get_cosmology(**kwargs)\n val = getattr(cosmology, quantity)(z)\n if strip_unit:\n val = val.value\n return val", "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def profileMain(self, M, z):\n \"\"\"\n returns all needed parameter (in comoving units modulo h) to draw the profile of the main halo\n r200 in co-moving Mpc/h\n rho_s in h^2/Mpc^3 (co-moving)\n Rs in Mpc/h co-moving\n c unit less\n \"\"\"\n c = self.c_M_z(M, z)\n r200 = self.r200_M(M)\n rho0 = self.rho0_c(c)\n Rs = r200/c\n return r200, rho0, c, Rs" ]
[ 0.8011312484741211, 0.7321945428848267, 0.7050241827964783, 0.6927163600921631, 0.6824684739112854, 0.681324303150177, 0.6761322617530823, 0.669061541557312, 0.6628347039222717, 0.6624162793159485, 0.6606134176254272, 0.6575406193733215 ]
Run commah code on halo of mass 'Mi' at redshift 'zi' with accretion and profile history at higher redshifts 'z' This is based on Correa et al. (2015a,b,c) Parameters ---------- cosmology : str or dict Can be named cosmology, default WMAP7 (aka DRAGONS), or DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15 or dictionary similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} zi : float / numpy array, optional Redshift at which halo has mass 'Mi'. If float then all halo masses 'Mi' are assumed to be at this redshift. If array but Mi is float, then this halo mass is used across all starting redshifts. If both Mi and zi are arrays then they have to be the same size for one - to - one correspondence between halo mass and the redshift at which it has that mass. Default is 0. Mi : float / numpy array, optional Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi' are solved for this halo mass. If array but zi is float, then this redshift is applied to all halo masses. If both Mi and zi are arrays then they have to be the same size for one - to - one correspondence between halo mass and the redshift at which it has that mass. Default is 1e12 Msol. z : float / numpy array, optional Redshift to solve commah code at. Must have zi<z else these steps are skipped. Default is False, meaning commah is solved at z=zi com : bool, optional If true then solve for concentration-mass, default is True. mah : bool, optional If true then solve for accretion rate and halo mass history, default is True. filename : bool / str, optional If str is passed this is used as a filename for output of commah verbose : bool, optional If true then give comments, default is None. retcosmo : bool, optional Return cosmological parameters used as a dict if retcosmo = True, default is None. Returns ------- dataset : structured dataset dataset contains structured columns of size (size(Mi) > size(z)) by size(z) If mah = True and com = False then columns are ('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float) where 'zi' is the starting redshift, 'Mi' is halo mass at zi 'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr] and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive at starting redshift 'zi' If mah = False and com = True then columns are ('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float) where 'zi' is the starting redshift, 'Mi' is halo mass at zi 'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo at the redshift 'z', 'sig' is the mass variance 'sigma', 'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi', 'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi' If mah = True and com = True then columns are: ('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float), ('c',float),('sig',float),('nu',float),('zf',float) file : structured dataset with name 'filename' if passed Raises ------ Output -1 If com = False and mah = False as user has to select something. Output -1 If 'zi' and 'Mi' are arrays of unequal size. Impossible to match corresponding masses and redshifts of output. Examples -------- Examples should be written in doctest format, and should illustrate how to use the function. >>> import examples >>> examples.runcommands() # A series of ways to query structured dataset >>> examples.plotcommands() # Examples to plot data
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True, filename=None, verbose=None, retcosmo=None): """ Run commah code on halo of mass 'Mi' at redshift 'zi' with accretion and profile history at higher redshifts 'z' This is based on Correa et al. (2015a,b,c) Parameters ---------- cosmology : str or dict Can be named cosmology, default WMAP7 (aka DRAGONS), or DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15 or dictionary similar in format to: {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275, 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0, 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6} zi : float / numpy array, optional Redshift at which halo has mass 'Mi'. If float then all halo masses 'Mi' are assumed to be at this redshift. If array but Mi is float, then this halo mass is used across all starting redshifts. If both Mi and zi are arrays then they have to be the same size for one - to - one correspondence between halo mass and the redshift at which it has that mass. Default is 0. Mi : float / numpy array, optional Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi' are solved for this halo mass. If array but zi is float, then this redshift is applied to all halo masses. If both Mi and zi are arrays then they have to be the same size for one - to - one correspondence between halo mass and the redshift at which it has that mass. Default is 1e12 Msol. z : float / numpy array, optional Redshift to solve commah code at. Must have zi<z else these steps are skipped. Default is False, meaning commah is solved at z=zi com : bool, optional If true then solve for concentration-mass, default is True. mah : bool, optional If true then solve for accretion rate and halo mass history, default is True. filename : bool / str, optional If str is passed this is used as a filename for output of commah verbose : bool, optional If true then give comments, default is None. retcosmo : bool, optional Return cosmological parameters used as a dict if retcosmo = True, default is None. Returns ------- dataset : structured dataset dataset contains structured columns of size (size(Mi) > size(z)) by size(z) If mah = True and com = False then columns are ('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float) where 'zi' is the starting redshift, 'Mi' is halo mass at zi 'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr] and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive at starting redshift 'zi' If mah = False and com = True then columns are ('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float) where 'zi' is the starting redshift, 'Mi' is halo mass at zi 'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo at the redshift 'z', 'sig' is the mass variance 'sigma', 'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi', 'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi' If mah = True and com = True then columns are: ('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float), ('c',float),('sig',float),('nu',float),('zf',float) file : structured dataset with name 'filename' if passed Raises ------ Output -1 If com = False and mah = False as user has to select something. Output -1 If 'zi' and 'Mi' are arrays of unequal size. Impossible to match corresponding masses and redshifts of output. Examples -------- Examples should be written in doctest format, and should illustrate how to use the function. >>> import examples >>> examples.runcommands() # A series of ways to query structured dataset >>> examples.plotcommands() # Examples to plot data """ # Check user choices... if not com and not mah: print("User has to choose com=True and / or mah=True ") return(-1) # Convert arrays / lists to np.array # and inflate redshift / mass axis # to match each other for later loop results = _checkinput(zi, Mi, z=z, verbose=verbose) # Return if results is -1 if(results == -1): return(-1) # If not, unpack the returned iterable else: zi, Mi, z, lenz, lenm, lenzout = results # At this point we will have lenm objects to iterate over # Get the cosmological parameters for the given cosmology cosmo = getcosmo(cosmology) # Create output file if desired if filename: print("Output to file %r" % (filename)) fout = open(filename, 'wb') # Create the structured dataset try: if mah and com: if verbose: print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, " "zf") if filename: fout.write(_getcosmoheader(cosmo)+'\n') fout.write("# Initial z - Initial Halo - Output z - " " Accretion - Final Halo - concentration - " " Mass - Peak - Formation z "+'\n') fout.write("# - mass - -" " rate - mass - - " " Variance - Height - "+'\n') fout.write("# - (M200) - - " " (dM/dt) - (M200) - - " " (sigma) - (nu) - "+'\n') fout.write("# - [Msol] - - " " [Msol/yr] - [Msol] - - " " - - "+'\n') dataset = np.zeros((lenm, lenzout), dtype=[('zi', float), ('Mi', float), ('z', float), ('dMdt', float), ('Mz', float), ('c', float), ('sig', float), ('nu', float), ('zf', float)]) elif mah: if verbose: print("Output requested is zi, Mi, z, dMdt, Mz") if filename: fout.write(_getcosmoheader(cosmo)+'\n') fout.write("# Initial z - Initial Halo - Output z -" " Accretion - Final Halo "+'\n') fout.write("# - mass - -" " rate - mass "+'\n') fout.write("# - (M200) - -" " (dm/dt) - (M200) "+'\n') fout.write("# - [Msol] - -" " [Msol/yr] - [Msol] "+'\n') dataset = np.zeros((lenm, lenzout), dtype=[('zi', float), ('Mi', float), ('z', float), ('dMdt', float), ('Mz', float)]) else: if verbose: print("Output requested is zi, Mi, z, c, sig, nu, zf") if filename: fout.write(_getcosmoheader(cosmo)+'\n') fout.write("# Initial z - Initial Halo - Output z - " " concentration - " " Mass - Peak - Formation z "+'\n') fout.write("# - mass - -" " -" " Variance - Height - "+'\n') fout.write("# - (M200) - - " " - " " (sigma) - (nu) - "+'\n') fout.write("# - [Msol] - - " " - " " - - "+'\n') dataset = np.zeros((lenm, lenzout), dtype=[('zi', float), ('Mi', float), ('z', float), ('c', float), ('sig', float), ('nu', float), ('zf', float)]) # Now loop over the combination of initial redshift and halo mamss for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)): if verbose: print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval)) # For a given halo mass Mi at redshift zi need to know # output redshifts 'z' # Check that all requested redshifts are greater than # input redshift, except if z is False, in which case # only solve z at zi, i.e. remove a loop if z is False: ztemp = np.array(zval, ndmin=1, dtype=float) else: ztemp = np.array(z[z >= zval], dtype=float) # Loop over the output redshifts if ztemp.size: # Return accretion rates and halo mass progenitors at # redshifts 'z' for object of mass Mi at zi dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo) if mah and com: # More expensive to return concentrations c, sig, nu, zf = COM(ztemp, Mz, **cosmo) # Save all arrays for j_ind, j_val in enumerate(ztemp): dataset[i_ind, j_ind] =\ (zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind]) if filename: fout.write( "{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format( zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])) elif mah: # Save only MAH arrays for j_ind, j_val in enumerate(ztemp): dataset[i_ind, j_ind] =\ (zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind]) if filename: fout.write("{}, {}, {}, {}, {} \n".format( zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])) else: # Output only COM arrays c, sig, nu, zf = COM(ztemp, Mz, **cosmo) # For any halo mass Mi at redshift zi # solve for c, sig, nu and zf for j_ind, j_val in enumerate(ztemp): dataset[i_ind, j_ind] =\ (zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind]) if filename: fout.write("{}, {}, {}, {}, {}, {}, {} \n".format( zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])) # Make sure to close the file if it was opened finally: fout.close() if filename else None if retcosmo: return(dataset, cosmo) else: return(dataset)
[ "def runcommand(cosmology='WMAP5'):\n \"\"\" Example interface commands \"\"\"\n\n # Return the WMAP5 cosmology concentration predicted for\n # z=0 range of masses\n Mi = [1e8, 1e9, 1e10]\n zi = 0\n print(\"Concentrations for haloes of mass %s at z=%s\" % (Mi, zi))\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)\n\n print(output['c'].flatten())\n\n # Return the WMAP5 cosmology concentration predicted for\n # z=0 range of masses AND cosmological parameters\n Mi = [1e8, 1e9, 1e10]\n zi = 0\n print(\"Concentrations for haloes of mass %s at z=%s\" % (Mi, zi))\n output, cosmo = commah.run(cosmology=cosmology, zi=zi, Mi=Mi,\n retcosmo=True)\n\n print(output['c'].flatten())\n print(cosmo)\n\n # Return the WMAP5 cosmology concentration predicted for MW\n # mass (2e12 Msol) across redshift\n Mi = 2e12\n z = [0, 0.5, 1, 1.5, 2, 2.5]\n output = commah.run(cosmology=cosmology, zi=0, Mi=Mi, z=z)\n for zval in z:\n print(\"M(z=0)=%s has c(z=%s)=%s\"\n % (Mi, zval, output[output['z'] == zval]['c'].flatten()))\n\n # Return the WMAP5 cosmology concentration predicted for MW\n # mass (2e12 Msol) across redshift\n Mi = 2e12\n zi = [0, 0.5, 1, 1.5, 2, 2.5]\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)\n for zval in zi:\n print(\"M(z=%s)=%s has concentration %s\"\n % (zval, Mi, output[(output['zi'] == zval) &\n (output['z'] == zval)]['c'].flatten()))\n\n # Return the WMAP5 cosmology concentration and\n # rarity of high-z cluster\n Mi = 2e14\n zi = 6\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi)\n print(\"Concentrations for haloes of mass %s at z=%s\" % (Mi, zi))\n print(output['c'].flatten())\n print(\"Mass variance sigma of haloes of mass %s at z=%s\" % (Mi, zi))\n print(output['sig'].flatten())\n print(\"Fluctuation for haloes of mass %s at z=%s\" % (Mi, zi))\n print(output['nu'].flatten())\n\n # Return the WMAP5 cosmology accretion rate prediction\n # for haloes at range of redshift and mass\n Mi = [1e8, 1e9, 1e10]\n zi = [0]\n z = [0, 0.5, 1, 1.5, 2, 2.5]\n output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi, z=z)\n for Mval in Mi:\n print(\"dM/dt for halo of mass %s at z=%s across redshift %s is: \"\n % (Mval, zi, z))\n print(output[output['Mi'] == Mval]['dMdt'].flatten())\n\n # Return the WMAP5 cosmology Halo Mass History for haloes with M(z=0) = 1e8\n M = [1e8]\n z = [0, 0.5, 1, 1.5, 2, 2.5]\n print(\"Halo Mass History for z=0 mass of %s across z=%s\" % (M, z))\n output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)\n print(output['Mz'].flatten())\n\n # Return the WMAP5 cosmology formation redshifts for haloes at\n # range of redshift and mass\n M = [1e8, 1e9, 1e10]\n z = [0]\n print(\"Formation Redshifts for haloes of mass %s at z=%s\" % (M, z))\n output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z)\n for Mval in M:\n print(output[output['Mi'] == Mval]['zf'].flatten())\n\n return(\"Done\")", "def getcosmo(cosmology):\n \"\"\" Find cosmological parameters for named cosmo in cosmology.py list \"\"\"\n\n defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),\n 'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),\n 'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),\n 'wmap1_lss': cg.WMAP1_2dF_mean(),\n 'wmap3_mean': cg.WMAP3_mean(),\n 'wmap5_ml': cg.WMAP5_ML(),\n 'wmap5_lss': cg.WMAP5_BAO_SN_mean(),\n 'wmap7_lss': cg.WMAP7_BAO_H0_mean(),\n 'planck13': cg.Planck_2013(),\n 'planck15': cg.Planck_2015()}\n\n if isinstance(cosmology, dict):\n # User providing their own variables\n cosmo = cosmology\n if 'A_scaling' not in cosmology.keys():\n A_scaling = getAscaling(cosmology, newcosmo=True)\n cosmo.update({'A_scaling': A_scaling})\n\n # Add extra variables by hand that cosmolopy requires\n # note that they aren't used (set to zero)\n for paramnames in cg.WMAP5_mean().keys():\n if paramnames not in cosmology.keys():\n cosmo.update({paramnames: 0})\n elif cosmology.lower() in defaultcosmologies.keys():\n # Load by name of cosmology instead\n cosmo = defaultcosmologies[cosmology.lower()]\n A_scaling = getAscaling(cosmology)\n cosmo.update({'A_scaling': A_scaling})\n else:\n print(\"You haven't passed a dict of cosmological parameters \")\n print(\"OR a recognised cosmology, you gave %s\" % (cosmology))\n # No idea why this has to be done by hand but should be O_k = 0\n cosmo = cp.distance.set_omega_k_0(cosmo)\n\n # Use the cosmology as **cosmo passed to cosmolopy routines\n return(cosmo)", "def plotcommand(cosmology='WMAP5', plotname=None):\n \"\"\" Example ways to interrogate the dataset and plot the commah output \"\"\"\n\n # Plot the c-M relation as a functon of redshift\n xarray = 10**(np.arange(1, 15, 0.2))\n yval = 'c'\n\n # Specify the redshift range\n zarray = np.arange(0, 5, 0.5)\n\n xtitle = r\"Halo Mass (M$_{sol}$)\"\n ytitle = r\"Concentration\"\n linelabel = \"z=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n plt.ylim([2, 30])\n\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray)\n\n # Access the column yval from the data file\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray, label=linelabel+str(zval), color=colors[zind])\n # Overplot the D08 predictions in black\n ax.plot(xarray, commah.commah.cduffy(zval, xarray), color=\"black\")\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n leg = ax.legend(loc=1)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_CM_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_CM_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the c-z relation as a function of mass (so always Mz=M0)\n xarray = 10**(np.arange(0, 1, 0.05)) - 1\n yval = 'c'\n\n # Specify the mass range\n zarray = 10**np.arange(6, 14, 2)\n\n xtitle = r\"Redshift\"\n ytitle = r\"NFW Concentration\"\n linelabel = r\"log$_{10}$ M$_{z}$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)\n\n # Access the column yval from the data file\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colours\n ax.plot(xarray, yarray,\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n leg = ax.legend(loc=1)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_Cz_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_Cz_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the zf-z relation for different masses (so always Mz=M0)\n xarray = 10**(np.arange(0, 1, 0.05)) - 1\n yval = 'zf'\n\n # Specify the mass range\n zarray = 10**np.arange(6, 14, 2)\n\n xtitle = r\"Redshift\"\n ytitle = r\"Formation Redshift\"\n linelabel = r\"log$_{10}$ M$_{z}$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray,\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n leg = ax.legend(loc=2)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_zfz_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_zfz_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the dM/dt-z relation for different masses (so always Mz=M0)\n xarray = 10**(np.arange(0, 1, 0.05)) - 1\n yval = 'dMdt'\n\n # Specify the mass range\n zarray = 10**np.arange(10, 14, 0.5)\n\n xtitle = r\"log$_{10}$ (1+z)\"\n ytitle = r\"log$_{10}$ Accretion Rate M$_{sol}$ yr$^{-1}$\"\n linelabel = r\"log$_{10}$ M$_z$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n cosmo = commah.getcosmo(cosmology)\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval,\n com=False, mah=True)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(np.log10(xarray+1.), np.log10(yarray),\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n # Plot the semi-analytic approximate formula from Correa et al 2015b\n semianalytic_approx = 71.6 * (zval / 1e12) * (cosmo['h'] / 0.7) *\\\n (-0.24 + 0.75 * (xarray + 1)) * np.sqrt(\n cosmo['omega_M_0'] * (xarray + 1)**3 + cosmo['omega_lambda_0'])\n\n ax.plot(np.log10(xarray + 1), np.log10(semianalytic_approx),\n color='black')\n\n leg = ax.legend(loc=2)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_dMdtz_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_dMdtz_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the dMdt-M relation as a function of redshift\n xarray = 10**(np.arange(10, 14, 0.5))\n yval = 'dMdt'\n\n # Specify the redshift range\n zarray = np.arange(0, 5, 0.5)\n\n xtitle = r\"Halo Mass M$_{sol}$\"\n ytitle = r\"Accretion Rate M$_{sol}$ yr$^{-1}$\"\n linelabel = \"z=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,\n com=False, mah=True)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray, label=linelabel+str(zval),\n color=colors[zind],)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n leg = ax.legend(loc=2)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_MAH_M_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_MAH_M_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the (dM/M)dt-M relation as a function of redshift\n xarray = 10**(np.arange(10, 14, 0.5))\n yval = 'dMdt'\n\n # Specify the redshift range\n zarray = np.arange(0, 5, 0.5)\n\n xtitle = r\"Halo Mass M$_{sol}$\"\n ytitle = r\"Specific Accretion Rate yr$^{-1}$\"\n linelabel = \"z=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray,\n mah=True, com=False)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray/xarray, label=linelabel+str(zval),\n color=colors[zind],)\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n leg = ax.legend(loc=1)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_specificMAH_M_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_specificMAH_M_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the Mz-z relation as a function of mass\n # (so mass is decreasing to zero as z-> inf)\n xarray = 10**(np.arange(0, 1, 0.05)) - 1\n yval = 'Mz'\n\n # Specify the mass range\n zarray = 10**np.arange(10, 14, 0.5)\n\n xtitle = r\"Redshift\"\n ytitle = r\"M(z) (M$_{sol}$)\"\n linelabel = r\"log$_{10}$ M$_{0}$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, yarray,\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n ax.set_yscale('log')\n\n leg = ax.legend(loc=1)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_Mzz_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_Mzz_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n # Plot the Mz/M0-z relation as a function of mass\n xarray = 10**(np.arange(0, 1, 0.02)) - 1\n yval = 'Mz'\n\n # Specify the mass range\n zarray = 10**np.arange(10, 14, 0.5)\n\n xtitle = r\"Redshift\"\n ytitle = r\"log$_{10}$ M(z)/M$_{0}$\"\n linelabel = r\"log$_{10}$ M$_{0}$(M$_{sol}$)=\"\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel(xtitle)\n ax.set_ylabel(ytitle)\n colors = cm.rainbow(np.linspace(0, 1, len(zarray)))\n\n for zind, zval in enumerate(zarray):\n output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray)\n\n yarray = output[yval].flatten()\n\n # Plot each line in turn with different colour\n ax.plot(xarray, np.log10(yarray/zval),\n label=linelabel+\"{0:.1f}\".format(np.log10(zval)),\n color=colors[zind],)\n\n leg = ax.legend(loc=3)\n # Make box totally transparent\n leg.get_frame().set_alpha(0)\n leg.get_frame().set_edgecolor('white')\n for label in leg.get_texts():\n label.set_fontsize('small') # the font size\n for label in leg.get_lines():\n label.set_linewidth(4) # the legend line width\n\n if plotname:\n fig.tight_layout(pad=0.2)\n print(\"Plotting to '%s_MzM0z_relation.png'\" % (plotname))\n fig.savefig(plotname+\"_MzM0z_relation.png\", dpi=fig.dpi*5)\n else:\n plt.show()\n\n return(\"Done\")", "def COM(z, M, **cosmo):\n \"\"\" Calculate concentration for halo of mass 'M' at redshift 'z'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to find concentration of halo\n M : float / numpy array\n Halo mass at redshift 'z'. Must be same size as 'z'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (c_array, sig_array, nu_array, zf_array) : float / numpy arrays\n of equivalent size to 'z' and 'M'. Variables are\n Concentration, Mass Variance 'sigma' this corresponds too,\n the dimnesionless fluctuation this represents and formation redshift\n\n \"\"\"\n # Check that z and M are arrays\n z = np.array(z, ndmin=1, dtype=float)\n M = np.array(M, ndmin=1, dtype=float)\n\n # Create array\n c_array = np.empty_like(z)\n sig_array = np.empty_like(z)\n nu_array = np.empty_like(z)\n zf_array = np.empty_like(z)\n\n for i_ind, (zval, Mval) in enumerate(_izip(z, M)):\n # Evaluate the indices at each redshift and mass combination\n # that you want a concentration for, different to MAH which\n # uses one a_tilde and b_tilde at the starting redshift only\n a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)\n\n # Minimize equation to solve for 1 unknown, 'c'\n c = scipy.optimize.brentq(_minimize_c, 2, 1000,\n args=(zval, a_tilde, b_tilde,\n cosmo['A_scaling'], cosmo['omega_M_0'],\n cosmo['omega_lambda_0']))\n\n if np.isclose(c, 0):\n print(\"Error solving for concentration with given redshift and \"\n \"(probably) too small a mass\")\n c = -1\n sig = -1\n nu = -1\n zf = -1\n else:\n # Calculate formation redshift for this concentration,\n # redshift at which the scale radius = virial radius: z_-2\n zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],\n omega_M_0=cosmo['omega_M_0'],\n omega_lambda_0=cosmo['omega_lambda_0'])\n\n R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)\n\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)\n nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))\n\n c_array[i_ind] = c\n sig_array[i_ind] = sig\n nu_array[i_ind] = nu\n zf_array[i_ind] = zf\n\n return(c_array, sig_array, nu_array, zf_array)", "def astro_redshifts(min_z, max_z, nsamples):\n '''Sample the redshifts for sources, with redshift\n independent rate, using standard cosmology\n\n Parameters\n ----------\n min_z: float\n Minimum redshift\n max_z: float\n Maximum redshift\n nsamples: int\n Number of samples\n\n Returns\n -------\n z_astro: array\n nsamples of redshift, between min_z, max_z, by standard cosmology\n '''\n\n dz, fac = 0.001, 3.0\n # use interpolation instead of directly estimating all the pdfz for rndz\n V = quad(contracted_dVdc, 0., max_z)[0]\n zbins = np.arange(min_z, max_z + dz/2., dz)\n zcenter = (zbins[:-1] + zbins[1:]) / 2\n pdfz = cosmo.differential_comoving_volume(zcenter).value/(1+zcenter)/V\n\n int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=0)\n\n rndz = np.random.uniform(min_z, max_z, int(fac*nsamples))\n pdf_zs = int_pdf(rndz)\n maxpdf = max(pdf_zs)\n rndn = np.random.uniform(0, 1, int(fac*nsamples)) * maxpdf\n diff = pdf_zs - rndn\n idx = np.where(diff > 0)\n z_astro = rndz[idx]\n\n np.random.shuffle(z_astro)\n z_astro.resize(nsamples)\n\n return z_astro", "def _getcosmoheader(cosmo):\n \"\"\" Output the cosmology to a string for writing to file \"\"\"\n\n cosmoheader = (\"# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, \"\n \"sigma8:{3:.3f}, ns:{4:.2f}\".format(\n cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],\n cosmo['sigma_8'], cosmo['n']))\n\n return(cosmoheader)", "def xspec_cosmo(H0=None,q0=None,lambda_0=None):\n \"\"\"\n Define the Cosmology in use within the XSpec models. See Xspec manual for help:\n\n http://heasarc.nasa.gov/xanadu/xspec/manual/XScosmo.html\n \n All parameters can be modified or just a single parameter\n\n :param H0: the hubble constant\n :param q0:\n :param lambda_0:\n :return: Either none or the current setting (H_0, q_0, lambda_0)\n \"\"\"\n\n current_settings = _xspec.get_xscosmo()\n\n if (H0 is None) and (q0 is None) and (lambda_0 is None):\n\n return current_settings\n\n\n else:\n\n # ok, we will see what was changed by the used\n\n user_inputs = [H0, q0, lambda_0]\n\n for i, current_setting in enumerate(current_settings):\n\n if user_inputs[i] is None:\n\n # the user didn't modify this,\n # so lets keep what was already set\n\n user_inputs[i] = current_setting\n\n\n # pass this to xspec\n\n _xspec.set_xscosmo(*user_inputs)", "def MAH(z, zi, Mi, **cosmo):\n \"\"\" Calculate mass accretion history by looping function acc_rate\n over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'\n\n Parameters\n ----------\n z : float / numpy array\n Redshift to output MAH over. Note zi<z always\n zi : float\n Redshift\n Mi : float\n Halo mass at redshift 'zi'\n cosmo : dict\n Dictionary of cosmological parameters, similar in format to:\n {'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,\n 'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,\n 'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}\n\n Returns\n -------\n (dMdt, Mz) : float / numpy arrays of equivalent size to 'z'\n Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'\n\n \"\"\"\n\n # Ensure that z is a 1D NumPy array\n z = np.array(z, ndmin=1, dtype=float)\n\n # Create a full array\n dMdt_array = np.empty_like(z)\n Mz_array = np.empty_like(z)\n\n for i_ind, zval in enumerate(z):\n # Solve the accretion rate and halo mass at each redshift step\n dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)\n\n dMdt_array[i_ind] = dMdt\n Mz_array[i_ind] = Mz\n\n return(dMdt_array, Mz_array)", "def cosmological_quantity_from_redshift(z, quantity, strip_unit=True,\n **kwargs):\n r\"\"\"Returns the value of a cosmological quantity (e.g., age) at a redshift.\n\n Parameters\n ----------\n z : float\n The redshift.\n quantity : str\n The name of the quantity to get. The name may be any attribute of\n :py:class:`astropy.cosmology.FlatLambdaCDM`.\n strip_unit : bool, optional\n Just return the value of the quantity, sans units. Default is True.\n \\**kwargs :\n All other keyword args are passed to :py:func:`get_cosmology` to\n select a cosmology. If none provided, will use\n :py:attr:`DEFAULT_COSMOLOGY`.\n\n Returns\n -------\n float or astropy.units.quantity :\n The value of the quantity at the requested value. If ``strip_unit`` is\n ``True``, will return the value. Otherwise, will return the value with\n units.\n \"\"\"\n cosmology = get_cosmology(**kwargs)\n val = getattr(cosmology, quantity)(z)\n if strip_unit:\n val = val.value\n return val", "def _deriv_growth(z, **cosmo):\n \"\"\" Returns derivative of the linear growth factor at z\n for a given cosmology **cosmo \"\"\"\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)", "def DRAGONS(flat=False, extras=True):\n \"\"\"DRAGONS cosmology assumes WMAP7 + BAO + H_0 mean from\n Komatsu et al. (2011) ApJS 192 18K (arxiv:1001.4538v1)\n\n Parameters\n ----------\n\n flat: boolean\n\n If True, sets omega_lambda_0 = 1 - omega_M_0 to ensure omega_k_0\n = 0 exactly. Also sets omega_k_0 = 0 explicitly.\n\n extras: boolean\n\n If True, sets neutrino number N_nu = 0, neutrino density\n omega_n_0 = 0.0, Helium mass fraction Y_He = 0.24.\n\n \"\"\"\n omega_c_0 = 0.2292\n omega_b_0 = 0.0458\n cosmo = {'omega_b_0': omega_b_0,\n 'omega_M_0': omega_b_0 + omega_c_0,\n 'omega_lambda_0': 0.725,\n 'h': 0.702,\n 'n': 0.963,\n 'sigma_8': 0.816,\n 'tau': 0.088,\n 'z_reion': 10.6,\n 't_0': 13.76,\n }\n if flat:\n cosmo['omega_lambda_0'] = 1 - cosmo['omega_M_0']\n cosmo['omega_k_0'] = 0.0\n if extras:\n add_extras(cosmo)\n return cosmo", "def get_cosmology(cosmology=None, **kwargs):\n r\"\"\"Gets an astropy cosmology class.\n\n Parameters\n ----------\n cosmology : str or astropy.cosmology.FlatLambdaCDM, optional\n The name of the cosmology to use. For the list of options, see\n :py:attr:`astropy.cosmology.parameters.available`. If None, and no\n other keyword arguments are provided, will default to\n :py:attr:`DEFAULT_COSMOLOGY`. If an instance of\n :py:class:`astropy.cosmology.FlatLambdaCDM`, will just return that.\n \\**kwargs :\n If any other keyword arguments are provided they will be passed to\n :py:attr:`astropy.cosmology.FlatLambdaCDM` to create a custom\n cosmology.\n\n Returns\n -------\n astropy.cosmology.FlatLambdaCDM\n The cosmology to use.\n\n Examples\n --------\n Use the default:\n\n >>> from pycbc.cosmology import get_cosmology\n >>> get_cosmology()\n FlatLambdaCDM(name=\"Planck15\", H0=67.7 km / (Mpc s), Om0=0.307,\n Tcmb0=2.725 K, Neff=3.05, m_nu=[0. 0. 0.06] eV,\n Ob0=0.0486)\n\n Use properties measured by WMAP instead:\n\n >>> get_cosmology(\"WMAP9\")\n FlatLambdaCDM(name=\"WMAP9\", H0=69.3 km / (Mpc s), Om0=0.286, Tcmb0=2.725 K,\n Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.0463)\n\n Create your own cosmology (see :py:class:`astropy.cosmology.FlatLambdaCDM`\n for details on the default values used):\n\n >>> get_cosmology(H0=70., Om0=0.3)\n FlatLambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Tcmb0=0 K, Neff=3.04, m_nu=None,\n Ob0=None)\n\n \"\"\"\n if kwargs and cosmology is not None:\n raise ValueError(\"if providing custom cosmological parameters, do \"\n \"not provide a `cosmology` argument\")\n if isinstance(cosmology, astropy.cosmology.FlatLambdaCDM):\n # just return\n return cosmology\n if kwargs:\n cosmology = astropy.cosmology.FlatLambdaCDM(**kwargs)\n else:\n if cosmology is None:\n cosmology = DEFAULT_COSMOLOGY\n if cosmology not in astropy.cosmology.parameters.available:\n raise ValueError(\"unrecognized cosmology {}\".format(cosmology))\n cosmology = getattr(astropy.cosmology, cosmology)\n return cosmology" ]
[ 0.7582313418388367, 0.7539766430854797, 0.7074388265609741, 0.7062006592750549, 0.6907287240028381, 0.6845294833183289, 0.6767992377281189, 0.6750814318656921, 0.6739107966423035, 0.6689210534095764, 0.665745198726654, 0.6627588272094727 ]
Load a configuration and keep it alive for the given context :param config_path: path to a configuration file
def load(config_path: str): """ Load a configuration and keep it alive for the given context :param config_path: path to a configuration file """ # we bind the config to _ to keep it alive if os.path.splitext(config_path)[1] in ('.yaml', '.yml'): _ = load_yaml_configuration(config_path, translator=PipelineTranslator()) elif os.path.splitext(config_path)[1] == '.py': _ = load_python_configuration(config_path) else: raise ValueError('Unknown configuration extension: %r' % os.path.splitext(config_path)[1]) yield
[ "def load(self, config_file=None, path=None):\n \"\"\"Loads the configuration from a specific file\n\n :param config_file: the name of the config file\n :param path: the path to the config file\n \"\"\"\n if config_file is None:\n if path is None:\n path, config_file = split(resource_filename(__name__, CONFIG_FILE))\n else:\n config_file = CONFIG_FILE\n super(Config, self).load(config_file, path)", "def load(self, config_path=None):\n \"\"\" Load and parse the configuration file using pyyaml\n\n :param config_path: An optional file path, file handle, or byte string\n for the configuration file.\n\n \"\"\"\n\n self._reset_config()\n\n if not config_path:\n config_path = os.path.join(\n appdirs.user_config_dir(self._app_name),\n self._config_file\n )\n\n try:\n # First check if it's file like. If it is, pyyaml can load it.\n # I'm checking type instead of catching exceptions to keep the\n # exception handling simple\n if hasattr(config_path, 'read'):\n config = yaml.safe_load(config_path)\n else:\n # If it isn't, it's a path. We have to open it first, otherwise\n # pyyaml will try to read it as yaml\n with open(config_path, 'rb') as config_file:\n config = yaml.safe_load(config_file)\n except EnvironmentError:\n raise exceptions.ConfigNotFoundError('Couldn\\'t read config at {config_path}'.format(\n config_path=str(config_path),\n ))\n except yaml.YAMLError:\n raise exceptions.ConfigSyntaxError('Error parsing YAML')\n\n if 'printer' in config:\n self._printer_config = config['printer']\n self._printer_name = self._printer_config.pop('type').title()\n\n if not self._printer_name or not hasattr(printer, self._printer_name):\n raise exceptions.ConfigSyntaxError(\n 'Printer type \"{printer_name}\" is invalid'.format(\n printer_name=self._printer_name,\n )\n )\n\n self._has_loaded = True", "def load_config(self, config_path=None):\n \"\"\"Load configuration from the specified path, or self.config_path\"\"\"\n if config_path is None:\n config_path = self.config_path\n else:\n self.config_path = config_path\n\n config = ConfigParser.SafeConfigParser(self.DEFAULT_SUBSTITUTIONS,\n allow_no_value=True)\n # Avoid the configparser automatically lowercasing keys\n config.optionxform = str\n self.initialize_config(config)\n try:\n with open(config_path) as f:\n config.readfp(f)\n except (IOError, ConfigParser.Error):\n _log.exception(\"Ignoring config from %s due to error.\", config_path)\n return False\n\n self.config = config\n self.reload_modules()\n return True", "def setup_configuration(config_path):\n \"\"\"Loads the core configuration from the specified path and uses its content for further setup\n\n :param config_path: Path to the core config file\n \"\"\"\n if config_path is not None:\n config_path, config_file = filesystem.separate_folder_path_and_file_name(config_path)\n global_config.load(config_file=config_file, path=config_path)\n else:\n global_config.load(path=config_path)\n\n # Initialize libraries\n core_singletons.library_manager.initialize()", "def load_config(config_file, default_config_file, **kwargs):\n '''load the configuration\n\n Configuration variables are delivered to applications exclusively through\n the environment. They get into the environment either from a specified\n configuration file, from a default configuration file, from an environment\n variable, or from a kwarg specified to this function.\n\n Configuration variables are applied with the following precedence (lowest\n to highest):\n\n - config file: the format of the config file is a typical environment file\n with individual lines like DATALAKE_FOO=bar. By convention, all variables\n start with either DATALAKE_ or AWS_.\n\n - environment variables: The variable names are the same as what would be\n written in a config file.\n\n - kwargs: additional configuration variables to apply, subject to some\n conventions. Specifically, kwargs are lowercase. A kwarg called `foo`\n maps to a configuration variable called `DATALAKE_FOO`. The only\n exception to this is a kwarg that starts with `aws_`. That is, a kwarg\n called `aws_baz` would map to a configuration variable called `AWS_BAZ`.\n\n\n Args:\n\n - config_file: the configuration file to load. If it is None,\n default_config_file will be examined. If it is not None and does not\n exist an InsufficientConfiguration exception is thrown.\n\n - default_config_file: the file to try if config_file is None. If\n default_config_file is None or does not exist, it is simply ignored. No\n exceptions are thrown.\n\n - kwargs: key=value pairs.\n\n '''\n if config_file and not os.path.exists(config_file):\n msg = 'config file {} does not exist'.format(config_file)\n raise InsufficientConfiguration(msg)\n\n if config_file is None and \\\n default_config_file is not None and \\\n os.path.exists(default_config_file):\n config_file = default_config_file\n\n if config_file is not None:\n load_dotenv(config_file)\n\n _update_environment(**kwargs)", "def load(self, file_path):\n ''' Load configuration from a specific file '''\n self.clear()\n self.__config = self.read_file(file_path)", "def _load(self, file_path: Text) -> None:\n \"\"\"\n Load the configuration from a plain Python file. This file is executed\n on its own.\n\n Only keys matching the CONFIG_ATTR will be loaded. Basically, it's\n CONFIG_KEYS_LIKE_THIS.\n\n :param file_path: Path to the file to load\n \"\"\"\n\n # noinspection PyUnresolvedReferences\n module_ = types.ModuleType('settings')\n module_.__file__ = file_path\n\n try:\n with open(file_path, encoding='utf-8') as f:\n exec(compile(f.read(), file_path, 'exec'), module_.__dict__)\n except IOError as e:\n e.strerror = 'Unable to load configuration file ({})'\\\n .format(e.strerror)\n raise\n\n for key in dir(module_):\n if CONFIG_ATTR.match(key):\n self[key] = getattr(module_, key)", "def load(config_file):\n \"\"\"\n Processes and loads config file.\n \"\"\"\n with open(config_file, \"r\") as f:\n\n def env_get():\n return dict(os.environ)\n tmpl = Template(f.read())\n return Config(yaml.load(tmpl.render(**env_get())))", "def import_config(config_path):\n \"\"\"Import a Config from a given path, relative to the current directory.\n\n The module specified by the config file must contain a variable called `configuration` that is\n assigned to a Config object.\n \"\"\"\n if not os.path.isfile(config_path):\n raise ConfigBuilderError(\n 'Could not find config file: ' + config_path)\n loader = importlib.machinery.SourceFileLoader(config_path, config_path)\n module = loader.load_module()\n\n if not hasattr(module, 'config') or not isinstance(module.config, Config):\n raise ConfigBuilderError(\n 'Could not load config file \"{}\": config files must contain '\n 'a variable called \"config\" that is '\n 'assigned to a Config object.'.format(config_path))\n return module.config", "def load_config(self, config_path=None):\n \"\"\"\n Load application configuration from a file and merge it with the default\n configuration.\n\n If the ``FEDORA_MESSAGING_CONF`` environment variable is set to a\n filesystem path, the configuration will be loaded from that location.\n Otherwise, the path defaults to ``/etc/fedora-messaging/config.toml``.\n \"\"\"\n self.loaded = True\n config = copy.deepcopy(DEFAULTS)\n\n if config_path is None:\n if \"FEDORA_MESSAGING_CONF\" in os.environ:\n config_path = os.environ[\"FEDORA_MESSAGING_CONF\"]\n else:\n config_path = \"/etc/fedora-messaging/config.toml\"\n\n if os.path.exists(config_path):\n _log.info(\"Loading configuration from {}\".format(config_path))\n with open(config_path) as fd:\n try:\n file_config = toml.load(fd)\n for key in file_config:\n config[key.lower()] = file_config[key]\n except toml.TomlDecodeError as e:\n msg = \"Failed to parse {}: error at line {}, column {}: {}\".format(\n config_path, e.lineno, e.colno, e.msg\n )\n raise exceptions.ConfigurationException(msg)\n else:\n _log.info(\"The configuration file, {}, does not exist.\".format(config_path))\n\n self.update(config)\n self._validate()\n return self", "def load_config(self, filepath=None):\n \"\"\"\n checks if the file is a valid config file\n Args:\n filepath:\n\n \"\"\"\n\n # load config or default if invalid\n\n def load_settings(filepath):\n \"\"\"\n loads a old_gui settings file (a json dictionary)\n - path_to_file: path to file that contains the dictionary\n\n Returns:\n - instruments: depth 1 dictionary where keys are instrument names and values are instances of instruments\n - scripts: depth 1 dictionary where keys are script names and values are instances of scripts\n - probes: depth 1 dictionary where to be decided....?\n \"\"\"\n\n instruments_loaded = {}\n probes_loaded = {}\n scripts_loaded = {}\n\n if filepath and os.path.isfile(filepath):\n in_data = load_b26_file(filepath)\n\n instruments = in_data['instruments'] if 'instruments' in in_data else {}\n scripts = in_data['scripts'] if 'scripts' in in_data else {}\n probes = in_data['probes'] if 'probes' in in_data else {}\n\n try:\n instruments_loaded, failed = Instrument.load_and_append(instruments)\n if len(failed) > 0:\n print(('WARNING! Following instruments could not be loaded: ', failed))\n\n scripts_loaded, failed, instruments_loaded = Script.load_and_append(\n script_dict=scripts,\n instruments=instruments_loaded,\n log_function=self.log,\n data_path=self.gui_settings['data_folder'])\n\n if len(failed) > 0:\n print(('WARNING! Following scripts could not be loaded: ', failed))\n\n probes_loaded, failed, instruments_loadeds = Probe.load_and_append(\n probe_dict=probes,\n probes=probes_loaded,\n instruments=instruments_loaded)\n\n self.log('Successfully loaded from previous save.')\n except ImportError:\n self.log('Could not load instruments or scripts from file.')\n self.log('Opening with blank GUI.')\n return instruments_loaded, scripts_loaded, probes_loaded\n\n config = None\n\n try:\n config = load_b26_file(filepath)\n config_settings = config['gui_settings']\n if config_settings['gui_settings'] != filepath:\n print((\n 'WARNING path to settings file ({:s}) in config file is different from path of settings file ({:s})'.format(\n config_settings['gui_settings'], filepath)))\n config_settings['gui_settings'] = filepath\n except Exception as e:\n if filepath:\n self.log('The filepath was invalid --- could not load settings. Loading blank GUI.')\n config_settings = self._DEFAULT_CONFIG\n\n\n for x in self._DEFAULT_CONFIG.keys():\n if x in config_settings:\n if not os.path.exists(config_settings[x]):\n try:\n os.makedirs(config_settings[x])\n except Exception:\n config_settings[x] = self._DEFAULT_CONFIG[x]\n os.makedirs(config_settings[x])\n print(('WARNING: failed validating or creating path: set to default path'.format(config_settings[x])))\n else:\n config_settings[x] = self._DEFAULT_CONFIG[x]\n os.makedirs(config_settings[x])\n print(('WARNING: path {:s} not specified set to default {:s}'.format(x, config_settings[x])))\n\n # check if file_name is a valid filename\n if filepath is not None and os.path.exists(os.path.dirname(filepath)):\n config_settings['gui_settings'] = filepath\n\n self.gui_settings = config_settings\n\n if(config):\n self.gui_settings_hidden = config['gui_settings_hidden']\n else:\n self.gui_settings_hidden['script_source_folder'] = ''\n\n self.instruments, self.scripts, self.probes = load_settings(filepath)\n\n\n self.refresh_tree(self.tree_gui_settings, self.gui_settings)\n self.refresh_tree(self.tree_scripts, self.scripts)\n self.refresh_tree(self.tree_settings, self.instruments)\n\n self._hide_parameters(filepath)", "def load(self, filepath):\n # type: (str) -> None\n \"\"\"Load configuration from existing file.\n\n :param str filepath: Path to existing config file.\n :raises: ValueError if supplied config file is invalid.\n \"\"\"\n try:\n self._config.read(filepath)\n import ast\n self.connection.timeout = \\\n self._config.getint(\"Connection\", \"timeout\")\n self.connection.verify = \\\n self._config.getboolean(\"Connection\", \"verify\")\n self.connection.cert = \\\n self._config.get(\"Connection\", \"cert\")\n\n self.proxies.proxies = \\\n ast.literal_eval(self._config.get(\"Proxies\", \"proxies\"))\n self.proxies.use_env_settings = \\\n self._config.getboolean(\"Proxies\", \"env_settings\")\n\n self.redirect_policy.allow = \\\n self._config.getboolean(\"RedirectPolicy\", \"allow\")\n self.redirect_policy.max_redirects = \\\n self._config.getint(\"RedirectPolicy\", \"max_redirects\")\n\n except (ValueError, EnvironmentError, NoOptionError):\n error = \"Supplied config file incompatible.\"\n raise_with_traceback(ValueError, error)\n finally:\n self._clear_config()" ]
[ 0.7743517160415649, 0.7668439149856567, 0.7407531142234802, 0.7327948808670044, 0.7264970541000366, 0.7258405089378357, 0.7225270867347717, 0.7186398506164551, 0.7185409665107727, 0.715469479560852, 0.7152398228645325, 0.7120245695114136 ]
Replaces instances of repeat n: by for __VAR_i in range(n): where __VAR_i is a string that does not appear elsewhere in the code sample.
def transform_source(text): '''Replaces instances of repeat n: by for __VAR_i in range(n): where __VAR_i is a string that does not appear elsewhere in the code sample. ''' loop_keyword = 'repeat' nb = text.count(loop_keyword) if nb == 0: return text var_names = get_unique_variable_names(text, nb) toks = tokenize.generate_tokens(StringIO(text).readline) result = [] replacing_keyword = False for toktype, tokvalue, _, _, _ in toks: if toktype == tokenize.NAME and tokvalue == loop_keyword: result.extend([ (tokenize.NAME, 'for'), (tokenize.NAME, var_names.pop()), (tokenize.NAME, 'in'), (tokenize.NAME, 'range'), (tokenize.OP, '(') ]) replacing_keyword = True elif replacing_keyword and tokvalue == ':': result.extend([ (tokenize.OP, ')'), (tokenize.OP, ':') ]) replacing_keyword = False else: result.append((toktype, tokvalue)) return tokenize.untokenize(result)
[ "def varReplace(basedir, raw, vars, lookup_fatal=True, depth=0, expand_lists=False):\n ''' Perform variable replacement of $variables in string raw using vars dictionary '''\n # this code originally from yum\n\n if (depth > 20):\n raise errors.AnsibleError(\"template recursion depth exceeded\")\n\n done = [] # Completed chunks to return\n\n while raw:\n m = _varFind(basedir, raw, vars, lookup_fatal, depth)\n if not m:\n done.append(raw)\n break\n\n # Determine replacement value (if unknown variable then preserve\n # original)\n\n replacement = m['replacement']\n if expand_lists and isinstance(replacement, (list, tuple)):\n replacement = \",\".join(replacement)\n if isinstance(replacement, (str, unicode)):\n replacement = varReplace(basedir, replacement, vars, lookup_fatal, depth=depth+1, expand_lists=expand_lists)\n if replacement is None:\n replacement = raw[m['start']:m['end']]\n\n start, end = m['start'], m['end']\n done.append(raw[:start]) # Keep stuff leading up to token\n done.append(unicode(replacement)) # Append replacement value\n raw = raw[end:] # Continue with remainder of string\n\n return ''.join(done)", "def var_replace(self, text):\n \"\"\"Replaces all instances of @VAR with their values in the specified text.\n \"\"\"\n result = text\n for var in self._vardict:\n result = result.replace(\"@{}\".format(var), self._vardict[var])\n return result", "function variableReplacementBuilder(v) {\n var pattern,\n name = v.name,\n from = v.from,\n to = v.to;\n\n if (util.isUndefined(to)) {\n grunt.log.error().error('\"to\" must be defined in a variable replacement instruction');\n return false;\n }\n\n if (util.isUndefined(name) && util.isUndefined(from)) {\n grunt.log.error().error('one of \"name\" or \"from\" must be defined in a variable replacement instruction');\n return false;\n }\n\n if (!util.isUndefined(name) && !util.isString(name) && !util.isRegex(name)) {\n grunt.log.error().error('\"name\" must be a string or a regex in a variable replacement instruction');\n return false;\n }\n\n if (!util.isUndefined(from) && !util.isString(from) && !util.isNumber(from) && !util.isBoolean(from)) {\n grunt.log.error().error('\"from\" must be a string, a number or a boolean in a variable replacement instruction');\n return false;\n }\n\n if (!util.isString(to) && !util.isNumber(to) && !util.isBoolean(to)) {\n grunt.log.error().error('\"to\" must be a string, a number or a boolean in a variable replacement instruction');\n return false;\n }\n\n pattern = buildVariableReplacementPattern(name, from, to);\n\n return {\n pattern: pattern,\n replacement: function (match, p1, p2, p3) {\n grunt.log\n .debug('matching variable replacement'.bold)\n .debug(('match=' + match).cyan)\n .debug(('captured groups=' + grunt.log.wordlist([p1, p2, p3])).cyan);\n\n return p1 + to + p3;\n }\n };\n }", "def expand_variables_to_segments(v, Nt):\n ''' expands contextual variables v, by repeating each instance as specified in Nt '''\n N_v = len(np.atleast_1d(v[0]))\n return np.concatenate([np.full((Nt[i], N_v), v[i]) for i in np.arange(len(v))])", "private static void fixUninitializedVarDeclarations(Node n, Node containingBlock) {\n // Inner loop structure must already have logic to initialize its\n // variables. In particular FOR-IN structures must not be modified.\n if (NodeUtil.isLoopStructure(n)) {\n return;\n }\n\n if ((n.isVar() || n.isLet()) && n.hasOneChild()) {\n Node name = n.getFirstChild();\n // It isn't initialized.\n if (!name.hasChildren()) {\n Node srcLocation = name;\n name.addChildToBack(NodeUtil.newUndefinedNode(srcLocation));\n containingBlock.addChildToFront(n.detach());\n }\n return;\n }\n\n for (Node c = n.getFirstChild(); c != null; c = c.getNext()) {\n fixUninitializedVarDeclarations(c, containingBlock);\n }\n }", "private static function ReplaceVars(){\n $oThis = self::CreateInstanceIfNotExists();\n\n $aP = array();\n $iOffset = 0;\n\n while(preg_match('/(\\{\\$(.*?)\\})/i', $oThis->sBuffer, $aMatches, PREG_OFFSET_CAPTURE, $iOffset)){\n $mVar = $aMatches[2][0];\n\n if(Storage::Has($mVar)){\n $mTo = Storage::Get($mVar);\n\n if(is_bool($mTo))\n $mTo = ($mTo) ? \"true\" : \"false\";\n\n $mTo = html_entity_decode($mTo, ENT_NOQUOTES/*, Config::Read(\"l10n.charset\")*/);\n $oThis->sBuffer = str_replace($aMatches[0][0], $mTo, $oThis->sBuffer);\n }\n\n $iOffset = $aMatches[0][1]+strlen($aMatches[0][0]);\n }\n }", "def replace_wrep(t:str) -> str:\n \"Replace word repetitions in `t`.\"\n def _replace_wrep(m:Collection[str]) -> str:\n c,cc = m.groups()\n return f' {TK_WREP} {len(cc.split())+1} {c} '\n re_wrep = re.compile(r'(\\b\\w+\\W+)(\\1{3,})')\n return re_wrep.sub(_replace_wrep, t)", "def set_var_slice(self, name, start, count, var):\n \"\"\"\n Overwrite the values in variable name with data\n from var, in the range (start:start+count).\n Start, count can be integers for rank 1, and can be\n tuples of integers for higher ranks.\n For some implementations it can be equivalent and more efficient to do:\n `get_var(name)[start[0]:start[0]+count[0], ..., start[n]:start[n]+count[n]] = var`\n \"\"\"\n tmp = self.get_var(name).copy()\n # sometimes we want to slice in 1 dimension, sometimes in more\n # always slice in arrays\n start = np.atleast_1d(start)\n count = np.atleast_1d(count)\n slices = [np.s_[i:(i+n)] for i,n in zip(start, count)]\n tmp[slices] = var\n self.set_var(name, tmp)", "def replace_rep(t:str) -> str:\n \"Replace repetitions at the character level in `t`.\"\n def _replace_rep(m:Collection[str]) -> str:\n c,cc = m.groups()\n return f' {TK_REP} {len(cc)+1} {c} '\n re_rep = re.compile(r'(\\S)(\\1{3,})')\n return re_rep.sub(_replace_rep, t)", "def addrs_for_name(self, n):\n \"\"\"\n Returns addresses that contain expressions that contain a variable named `n`.\n \"\"\"\n if n not in self._name_mapping:\n return\n\n self._mark_updated_mapping(self._name_mapping, n)\n\n to_discard = set()\n for e in self._name_mapping[n]:\n try:\n if n in self[e].object.variables: yield e\n else: to_discard.add(e)\n except KeyError:\n to_discard.add(e)\n self._name_mapping[n] -= to_discard", "def varvalu(self, varn=None):\n '''\n $foo\n $foo.bar\n $foo.bar()\n $foo[0]\n $foo.bar(10)\n '''\n\n self.ignore(whitespace)\n\n if varn is None:\n varn = self.varname()\n\n varv = s_ast.VarValue(kids=[varn])\n\n # handle derefs and calls...\n while self.more():\n\n if self.nextstr('.'):\n varv = self.varderef(varv)\n continue\n\n if self.nextstr('('):\n varv = self.varcall(varv)\n continue\n\n #if self.nextstr('['):\n #varv = self.varslice(varv)\n\n break\n\n return varv", "def _rebind_variables(self, new_inputs):\n \"\"\"\n Return self._expr with all variables rebound to the indices implied by\n new_inputs.\n \"\"\"\n expr = self._expr\n\n # If we have 11+ variables, some of our variable names may be\n # substrings of other variable names. For example, we might have x_1,\n # x_10, and x_100. By enumerating in reverse order, we ensure that\n # every variable name which is a substring of another variable name is\n # processed after the variable of which it is a substring. This\n # guarantees that the substitution of any given variable index only\n # ever affects exactly its own index. For example, if we have variables\n # with indices going up to 100, we will process all of the x_1xx names\n # before x_1x, which will be before x_1, so the substitution of x_1\n # will not affect x_1x, which will not affect x_1xx.\n for idx, input_ in reversed(list(enumerate(self.inputs))):\n old_varname = \"x_%d\" % idx\n # Temporarily rebind to x_temp_N so that we don't overwrite the\n # same value multiple times.\n temp_new_varname = \"x_temp_%d\" % new_inputs.index(input_)\n expr = expr.replace(old_varname, temp_new_varname)\n # Clear out the temp variables now that we've finished iteration.\n return expr.replace(\"_temp_\", \"_\")" ]
[ 0.7174105644226074, 0.706699550151825, 0.6893872618675232, 0.6843791604042053, 0.6755014657974243, 0.6703273057937622, 0.6679925918579102, 0.665018618106842, 0.6648368239402771, 0.6638390421867371, 0.6626368165016174, 0.6604301333427429 ]
returns a list of possible variables names that are not found in the original text.
def get_unique_variable_names(text, nb): '''returns a list of possible variables names that are not found in the original text.''' base_name = '__VAR_' var_names = [] i = 0 j = 0 while j < nb: tentative_name = base_name + str(i) if text.count(tentative_name) == 0 and tentative_name not in ALL_NAMES: var_names.append(tentative_name) ALL_NAMES.append(tentative_name) j += 1 i += 1 return var_names
[ "def get_variables(text):\n \"\"\"Extracts variables that can be used in templating engines.\n\n Each variable is defined on a single line in the following way:\n\n ~ var: text\n\n The ~ must be at the start of a newline, followed by at least one\n space. var can be any sequence of characters that does not contain\n a \":\". text can be any sequence of characters.\n\n RETURNS:\n text -- str; text with all variable definitions removed\n variables -- dict; variable to value mappings\n \"\"\"\n variables = {var: value for var, value in re_vars.findall(text)}\n text = re_vars.sub('', text)\n return text, variables", "public String[] getExcluded()\n {\n return\n IteratorUtils.toArray(\n IteratorUtils.transformedIterator(\n excludedVars_.iterator(),\n VarNamePattern::toString),\n String.class);\n }", "private void extractVariableNames() {\n if (expression == null) {\n throw new IllegalArgumentException(\"The expression was null\");\n }\n\n for (final List<String> exp_list : JEXL_ENGINE.getVariables(expression)) {\n for (final String variable : exp_list) {\n names.add(variable);\n }\n }\n }", "private void checkForUnknownVariables(TokenList tokens) {\n TokenList.Token t = tokens.getFirst();\n while( t != null ) {\n if( t.getType() == Type.WORD )\n throw new ParseError(\"Unknown variable on right side. \"+t.getWord());\n t = t.next;\n }\n }", "def _varFind(basedir, text, vars, lookup_fatal, depth=0):\n ''' Searches for a variable in text and finds its replacement in vars\n\n The variables can have two formats;\n - simple, $ followed by alphanumerics and/or underscores\n - complex, ${ followed by alphanumerics, underscores, periods, braces and brackets, ended by a }\n\n Examples:\n - $variable: simple variable that will have vars['variable'] as its replacement\n - ${variable.complex}: complex variable that will have vars['variable']['complex'] as its replacement\n - $variable.complex: simple variable, identical to the first, .complex ignored\n\n Complex variables are broken into parts by separating on periods, except if enclosed in {}.\n ${variable.{fully.qualified.domain}} would be parsed as two parts, variable and fully.qualified.domain,\n whereas ${variable.fully.qualified.domain} would be parsed as four parts.\n\n Returns a dict(replacement=<value in vars>, start=<index into text where the variable stated>,\n end=<index into text where the variable ends>)\n or None if no variable could be found in text. If replacement is None, it should be replaced with the\n original data in the caller.\n '''\n\n start = text.find(\"$\")\n if start == -1:\n return None\n # $ as last character\n if start + 1 == len(text):\n return None\n # Escaped var\n if start > 0 and text[start - 1] == '\\\\':\n return {'replacement': '$', 'start': start - 1, 'end': start + 1}\n\n var_start = start + 1\n if text[var_start] == '{':\n is_complex = True\n brace_level = 1\n var_start += 1\n else:\n is_complex = False\n brace_level = 1\n # is_lookup is true for $FILE(...) and friends\n is_lookup = False\n lookup_plugin_name = None\n end = var_start\n # part_start is an index of where the current part started\n part_start = var_start\n space = vars\n while end < len(text) and (((is_lookup or is_complex) and brace_level > 0) or (not is_complex and not is_lookup)):\n if text[end].isalnum() or text[end] == '_':\n pass\n elif not is_complex and not is_lookup and text[end] == '(' and text[part_start:end].isupper():\n is_lookup = True\n lookup_plugin_name = text[part_start:end]\n part_start = end + 1\n elif is_lookup and text[end] == '(':\n brace_level += 1\n elif is_lookup and text[end] == ')':\n brace_level -= 1\n elif is_lookup:\n # lookups are allowed arbitrary contents\n pass\n elif is_complex and text[end] == '{':\n brace_level += 1\n elif is_complex and text[end] == '}':\n brace_level -= 1\n elif is_complex and text[end] in ('$', '[', ']'):\n pass\n elif is_complex and text[end] == '.':\n if brace_level == 1:\n space = _varFindLimitSpace(basedir, vars, space, text[part_start:end], lookup_fatal, depth)\n part_start = end + 1\n else:\n # This breaks out of the loop on non-variable name characters\n break\n end += 1\n var_end = end\n # Handle \"This has $ in it\"\n if var_end == part_start:\n return {'replacement': None, 'start': start, 'end': end}\n\n # Handle lookup plugins\n if is_lookup:\n # When basedir is None, handle lookup plugins later\n if basedir is None:\n return {'replacement': None, 'start': start, 'end': end}\n var_end -= 1\n \tfrom cirruscluster.ext.ansible import utils\n args = text[part_start:var_end]\n if lookup_plugin_name == 'LOOKUP':\n lookup_plugin_name, args = args.split(\",\", 1)\n args = args.strip()\n # args have to be templated\n args = varReplace(basedir, args, vars, depth=depth+1, expand_lists=True)\n instance = utils.plugins.lookup_loader.get(lookup_plugin_name.lower(), basedir=basedir)\n if instance is not None:\n try:\n replacement = instance.run(args, inject=vars)\n except errors.AnsibleError:\n if not lookup_fatal:\n replacement = None\n else:\n raise\n else:\n replacement = None\n return {'replacement': replacement, 'start': start, 'end': end}\n\n if is_complex:\n var_end -= 1\n if text[var_end] != '}' or brace_level != 0:\n return None\n space = _varFindLimitSpace(basedir, vars, space, text[part_start:var_end], lookup_fatal, depth)\n return {'replacement': space, 'start': start, 'end': end}", "def _var_names(var_names, data):\n \"\"\"Handle var_names input across arviz.\n\n Parameters\n ----------\n var_names: str, list, or None\n data : xarray.Dataset\n Posterior data in an xarray\n Returns\n -------\n var_name: list or None\n \"\"\"\n if var_names is not None:\n\n if isinstance(var_names, str):\n var_names = [var_names]\n\n if isinstance(data, (list, tuple)):\n all_vars = []\n for dataset in data:\n dataset_vars = list(dataset.data_vars)\n for var in dataset_vars:\n if var not in all_vars:\n all_vars.append(var)\n else:\n all_vars = list(data.data_vars)\n\n excluded_vars = [i[1:] for i in var_names if i.startswith(\"~\") and i not in all_vars]\n\n all_vars_tilde = [i for i in all_vars if i.startswith(\"~\")]\n\n if all_vars_tilde:\n warnings.warn(\n \"\"\"ArviZ treats '~' as a negation character for variable selection.\n Your model has variables names starting with '~', {0}. Please double check\n your results to ensure all variables are included\"\"\".format(\n \", \".join(all_vars_tilde)\n )\n )\n\n if excluded_vars:\n var_names = [i for i in all_vars if i not in excluded_vars]\n\n return var_names", "def extract_variables(href):\n \"\"\"Return a list of variable names used in a URI template.\"\"\"\n\n patterns = [re.sub(r'\\*|:\\d+', '', pattern)\n for pattern in re.findall(r'{[\\+#\\./;\\?&]?([^}]+)*}', href)]\n variables = []\n for pattern in patterns:\n for part in pattern.split(\",\"):\n if not part in variables:\n variables.append(part)\n\n return variables", "def _variable_components(self, variable):\n \"\"\"Get all components (sub-categories) of a variable\n\n For `variable='foo'`, return `['foo|bar']`, but don't include\n `'foo|bar|baz'`, which is a sub-sub-category\"\"\"\n var_list = pd.Series(self.data.variable.unique())\n return var_list[pattern_match(var_list, '{}|*'.format(variable), 0)]", "def optional_names(self):\n \"\"\"\n Get a list of the variables that are defined, but not required\n \"\"\"\n for name, var in self.items():\n if var.get('optional', False):\n yield name", "def input_variables(self, exclude_specials=True):\n \"\"\"\n Get all variables that have never been written to.\n\n :return: A list of variables that are never written to.\n \"\"\"\n\n def has_write_access(accesses):\n return any(acc for acc in accesses if acc.access_type == 'write')\n\n def has_read_access(accesses):\n return any(acc for acc in accesses if acc.access_type == 'read')\n\n input_variables = [ ]\n\n for variable, accesses in self._variable_accesses.items():\n if not has_write_access(accesses) and has_read_access(accesses):\n if not exclude_specials or not variable.category:\n input_variables.append(variable)\n\n return input_variables", "private Map<PyExpr, PyExpr> collectVarNameListAndToPyExprMap() {\n Map<PyExpr, PyExpr> nodePyVarToPyExprMap = new LinkedHashMap<>();\n for (Map.Entry<String, MsgSubstUnitNode> entry : msgNode.getVarNameToRepNodeMap().entrySet()) {\n MsgSubstUnitNode substUnitNode = entry.getValue();\n PyExpr substPyExpr = null;\n\n if (substUnitNode instanceof MsgPlaceholderNode) {\n SoyNode phInitialNode = ((AbstractParentSoyNode<?>) substUnitNode).getChild(0);\n\n if (phInitialNode instanceof PrintNode\n || phInitialNode instanceof CallNode\n || phInitialNode instanceof RawTextNode) {\n substPyExpr =\n PyExprUtils.concatPyExprs(genPyExprsVisitor.exec(phInitialNode)).toPyString();\n }\n\n // when the placeholder is generated by HTML tags\n if (phInitialNode instanceof MsgHtmlTagNode) {\n substPyExpr =\n PyExprUtils.concatPyExprs(\n genPyExprsVisitor.execOnChildren((ParentSoyNode<?>) phInitialNode))\n .toPyString();\n }\n } else if (substUnitNode instanceof MsgPluralNode) {\n // Translates {@link MsgPluralNode#pluralExpr} into a Python lookup expression.\n // Note that {@code pluralExpr} represents the soy expression of the {@code plural} attr,\n // i.e. the {@code $numDrafts} in {@code {plural $numDrafts}...{/plural}}.\n substPyExpr = translateToPyExprVisitor.exec(((MsgPluralNode) substUnitNode).getExpr());\n } else if (substUnitNode instanceof MsgSelectNode) {\n substPyExpr = translateToPyExprVisitor.exec(((MsgSelectNode) substUnitNode).getExpr());\n }\n\n if (substPyExpr != null) {\n nodePyVarToPyExprMap.put(new PyStringExpr(\"'\" + entry.getKey() + \"'\"), substPyExpr);\n }\n }\n\n return nodePyVarToPyExprMap;\n }", "def _postfix_varlist(self, line):\n \"\"\"Returns a dictionary of the global variable names (keys) and their local \n name for the lambda function.\n \"\"\"\n els = line.split()\n result = {}\n if len(els) >= 2:\n defvars = [v for v in els if \"=\" in v]\n varlist = []\n for dvar in defvars:\n gvar, lvar = dvar.split(\"=\")\n if lvar != \"\":\n result[lvar] = gvar\n return result" ]
[ 0.7655473351478577, 0.7517821192741394, 0.7387765645980835, 0.737966775894165, 0.7293704152107239, 0.7250773906707764, 0.7249011397361755, 0.722808301448822, 0.7225084900856018, 0.7214174270629883, 0.719914436340332, 0.7185519933700562 ]
Get a release by tag
def tag(self, tag): """Get a release by tag """ url = '%s/tags/%s' % (self, tag) response = self.http.get(url, auth=self.auth) response.raise_for_status() return response.json()
[ "public static function get_release_by_tag(\n\t\t$project,\n\t\t$tag,\n\t\t$args = []\n\t) {\n\t\t$request_url = sprintf(\n\t\t\tself::API_ROOT . 'repos/%s/releases/tags/%s',\n\t\t\t$project,\n\t\t\t$tag\n\t\t);\n\n\t\t$args['per_page'] = 100;\n\n\t\tlist( $body, $headers ) = self::request( $request_url, $args );\n\n\t\treturn $body;\n\t}", "public function getReleaseByTagName(string $tag): array\n {\n return $this->getApi()->request($this->getApi()->sprintf('/repos/:owner/:repo/releases/tags/:tag',\n $this->getRepositories()->getOwner(), $this->getRepositories()->getRepo(), $tag));\n }", "func (s *RepositoriesService) GetReleaseByTag(ctx context.Context, owner, repo, tag string) (*RepositoryRelease, *Response, error) {\n\tu := fmt.Sprintf(\"repos/%s/%s/releases/tags/%s\", owner, repo, tag)\n\treturn s.getSingleRelease(ctx, u)\n}", "public function getByTag($user, $repo, $tag)\n\t{\n\t\t// Build the request path.\n\t\t$path = \"/repos/$user/$repo/releases/tags/$tag\";\n\n\t\t// Send the request.\n\t\treturn $this->processResponse($this->client->get($this->fetchUrl($path)));\n\t}", "def get_release(self, id):\n \"\"\"\n :calls: `GET /repos/:owner/:repo/releases/:id <https://developer.github.com/v3/repos/releases/#get-a-single-release>`_\n :param id: int (release id), str (tag name)\n :rtype: None or :class:`github.GitRelease.GitRelease`\n \"\"\"\n if isinstance(id, int):\n headers, data = self._requester.requestJsonAndCheck(\n \"GET\",\n self.url + \"/releases/\" + str(id)\n )\n return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)\n elif isinstance(id, (str, unicode)):\n headers, data = self._requester.requestJsonAndCheck(\n \"GET\",\n self.url + \"/releases/tags/\" + id\n )\n return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)", "def release_to_tag(self, release_id):\n \"\"\"\n Shortcut to translate a release identifier to a tag name.\n\n :param release_id: A :attr:`Release.identifier` value (a string).\n :returns: A tag name (a string).\n :raises: :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't\n 'tags'.\n \"\"\"\n self.ensure_release_scheme('tags')\n return self.releases[release_id].revision.tag", "public Release updateRelease(Object projectIdOrPath, String tagName, String releaseNotes) throws GitLabApiException {\n Form formData = new GitLabApiForm().withParam(\"description\", releaseNotes);\n Response response = put(Response.Status.OK, formData.asMap(),\n \"projects\", getProjectIdOrPath(projectIdOrPath), \"repository\", \"tags\", tagName, \"release\");\n return (response.readEntity(Release.class));\n }", "public Release createRelease(Object projectIdOrPath, String tagName, String releaseNotes) throws GitLabApiException {\n Form formData = new GitLabApiForm().withParam(\"description\", releaseNotes);\n Response response = post(Response.Status.CREATED, formData.asMap(),\n \"projects\", getProjectIdOrPath(projectIdOrPath), \"repository\", \"tags\", tagName, \"release\");\n return (response.readEntity(Release.class));\n }", "def find_package_by_tag(tag)\n if tag.has_ref?\n find_package(tag.repo, tag.ref)\n else\n find_package(tag.repo)\n end\n end", "async function getReleaseInfo(context, childTags) {\n const tagShas = [];\n\n const releasesBySha = await fetchAllReleases(context, release => {\n if (childTags.has(release.tag_name)) {\n // put in reverse order\n // later releases come first,\n // but we want to iterate beginning oldest releases first\n tagShas.unshift(release.target_commitish);\n // tagSha.push(release.target_commitish);\n }\n });\n\n return {releasesBySha, tagShas};\n}", "function tag() {\n var v = 'v' + pkg.version;\n var message = 'Release ' + v;\n\n return gulp.src([\"./package.json\", \"./bower.json\"])\n .pipe(git.commit(message))\n .pipe(git.tag(v, message))\n ;\n // .pipe(git.push(remote, branch, {args: '--tags'}))\n}", "function tagRelease(done) {\n fs.readFile(config.pkg, 'utf-8', (err, content) => {\n if (err) {\n done(err);\n return;\n }\n\n const pkg = JSON.parse(content);\n const version = pkg.version;\n git.tag(`v${version}`, `release: tag version ${version}`, done);\n });\n}" ]
[ 0.8034539818763733, 0.7926703095436096, 0.7689692378044128, 0.7525080442428589, 0.749724268913269, 0.7357184886932373, 0.7194510698318481, 0.7184587121009827, 0.716830849647522, 0.7133541107177734, 0.7089478373527527, 0.70732581615448 ]
Assets for a given release
def release_assets(self, release): """Assets for a given release """ release = self.as_id(release) return self.get_list(url='%s/%s/assets' % (self, release))
[ "def asset(self, id):\n \"\"\"Returns a single Asset.\n\n :param int id: (required), id of the asset\n :returns: :class:`Asset <github3.repos.release.Asset>`\n \"\"\"\n data = None\n if int(id) > 0:\n url = self._build_url('releases', 'assets', str(id),\n base_url=self._api)\n data = self._json(self._get(url, headers=Release.CUSTOM_HEADERS),\n 200)\n return Asset(data, self) if data else None", "def get_assets(self):\n \"\"\"\n :calls: `GET /repos/:owner/:repo/releases/:release_id/assets <https://developer.github.com/v3/repos/releases/#list-assets-for-a-release>`_\n :rtype: :class:`github.PaginatedList.PaginatedList`\n \"\"\"\n return github.PaginatedList.PaginatedList(\n github.GitReleaseAsset.GitReleaseAsset,\n self._requester,\n self.url + \"/assets\",\n None\n )", "def iter_assets(self, number=-1, etag=None):\n \"\"\"Iterate over the assets available for this release.\n\n :param int number: (optional), Number of assets to return\n :param str etag: (optional), last ETag header sent\n :returns: generator of :class:`Asset <Asset>` objects\n \"\"\"\n url = self._build_url('assets', base_url=self._api)\n return self._iter(number, url, Asset, etag=etag)", "function uploadToSentry(projectSlug, orgSlug, releaseVersion, assets) {\n var releaseEndpoint = makeUrl(projectSlug, orgSlug);\n var uploadEndpoint = releaseEndpoint + releaseVersion + '/files/';\n\n createRelease(releaseEndpoint, releaseVersion)\n .then(function() {\n return Promise.all(assets.map(uploadFile.bind(null,\n uploadEndpoint,\n releaseVersion\n )));\n })\n .catch(function(e) {\n console.log('Release failed with error: ', e);\n });\n}", "def by_release(cls, session, package_name, version):\n \"\"\"\n Get release files for a given package\n name and for a given version.\n\n :param session: SQLAlchemy session\n :type session: :class:`sqlalchemy.Session`\n\n :param package_name: package name\n :type package_name: unicode\n\n :param version: version\n :type version: unicode\n\n :return: release files\n :rtype: generator of :class:`pyshop.models.ReleaseFile`\n \"\"\"\n return cls.find(session,\n join=(Release, Package),\n where=(Package.name == package_name,\n Release.version == version,\n ))", "public function getListAssets($user, $repo, $releaseId, $page = 0, $limit = 0)\n\t{\n\t\t// Build the request path.\n\t\t$path = '/repos/' . $user . '/' . $repo . '/releases/' . (int) $releaseId . '/assets';\n\n\t\t// Send the request.\n\t\treturn $this->processResponse($this->client->get($this->fetchUrl($path, $page, $limit)));\n\t}", "def upload_asset(self, path, label=\"\", content_type=\"\"):\n \"\"\"\n :calls: `POST https://<upload_url>/repos/:owner/:repo/releases/:release_id/assets?name=foo.zip <https://developer.github.com/v3/repos/releases/#upload-a-release-asset>`_\n :rtype: :class:`github.GitReleaseAsset.GitReleaseAsset`\n \"\"\"\n assert isinstance(path, (str, unicode)), path\n assert isinstance(label, (str, unicode)), label\n\n post_parameters = {\n \"name\": basename(path),\n \"label\": label\n }\n headers = {}\n if len(content_type) > 0:\n headers[\"Content-Type\"] = content_type\n resp_headers, data = self._requester.requestBlobAndCheck(\n \"POST\",\n self.upload_url.split(\"{?\")[0],\n parameters=post_parameters,\n headers=headers,\n input=path\n )\n return github.GitReleaseAsset.GitReleaseAsset(self._requester, resp_headers, data, completed=True)", "def release(self, release_id):\n \"\"\"\n A list of :class:`yarg.release.Release` objects for each file in a\n release.\n\n :param release_id: A pypi release id.\n\n >>> package = yarg.get('yarg')\n >>> last_release = yarg.releases[-1]\n >>> package.release(last_release)\n [<Release 0.1.0>, <Release 0.1.0>]\n \"\"\"\n if release_id not in self.release_ids:\n return None\n return [Release(release_id, r) for r in self._releases[release_id]]", "def asset(url=None):\n \"\"\"\n Asset helper\n Generates path to a static asset based on configuration base path and\n support for versioning. Will easily allow you to move your assets away to\n a CDN without changing templates. Versioning allows you to cache your asset\n changes forever by the webserver.\n\n :param url: string - relative path to asset\n :return: string - full versioned url\n \"\"\"\n\n # fallback to url_for('static') if assets path not configured\n url = url.lstrip('/')\n assets_path = app.config.get('ASSETS_PATH')\n if not assets_path:\n url_for = app.jinja_env.globals.get('url_for')\n url = url_for('static', filename=url)\n else:\n assets_path = assets_path.rstrip('/')\n url = assets_path + '/' + url\n\n version = app.config.get('ASSETS_VERSION')\n if not version:\n return url\n\n sign = '?'\n if sign in url:\n sign = '&'\n\n pattern = '{url}{sign}v{version}'\n return pattern.format(url=url, sign=sign, version=version)", "def show_release_file(root, request):\n \"\"\"\n Download a release file.\n Must be used with :func:`pyshop.helpers.download.renderer_factory`\n to download the release file.\n\n :return: download informations\n :rtype: dict\n \"\"\"\n settings = request.registry.settings\n whlify = asbool(settings.get('pyshop.mirror.wheelify', '0'))\n session = DBSession()\n\n f = ReleaseFile.by_id(session, int(request.matchdict['file_id']))\n whlify = whlify and f.package_type == 'sdist'\n\n filename = f.filename_whlified if whlify else f.filename\n url = f.url\n if url and url.startswith('http://pypi.python.org'):\n url = 'https' + url[4:]\n\n rv = {'url': url,\n 'filename': filename,\n 'original': f.filename,\n 'whlify': whlify\n }\n f.downloads += 1\n f.release.downloads += 1\n f.release.package.downloads += 1\n session.add(f.release.package)\n session.add(f.release)\n session.add(f)\n request.response.etag = f.md5_digest\n request.response.cache_control = 'max-age=31557600, public'\n request.response.date = datetime.datetime.utcnow()\n return rv", "function genAsset(name, type, compress, ct) {\n\t\t\t\tvar a = path.resolve(options.distAssetDir, name);\n\t\t\t\tcmd('git archive -o \"' + a + '\" --format=' + type\n\t\t\t\t\t\t+ (compress ? ' -' + options.distAssetCompressRatio : '') + ' HEAD:' + options.distDir);\n\t\t\t\tif (rbot.log.verboseEnabled) {\n\t\t\t\t\trbot.log.verbose('Created ' + a + ' (size: ' + fs.statSync(a).size + ')');\n\t\t\t\t}\n\t\t\t\tdistAssets.push({\n\t\t\t\t\tpath : a,\n\t\t\t\t\tname : name,\n\t\t\t\t\tcontentType : ct\n\t\t\t\t});\n\t\t\t\treturn a;\n\t\t\t}", "public function getReleaseAssets(string $id): array\n {\n return $this->getApi()->request($this->getApi()->sprintf('/repos/:owner/:repo/releases/:id/assets',\n $this->getRepositories()->getOwner(), $this->getRepositories()->getRepo(), $id));\n }" ]
[ 0.7340524792671204, 0.7329351902008057, 0.7163358926773071, 0.6988852024078369, 0.6982651948928833, 0.6958836317062378, 0.6937362551689148, 0.6861414313316345, 0.6811293959617615, 0.6800277829170227, 0.6789299249649048, 0.6773079633712769 ]
Upload a file to a release :param filename: filename to upload :param content_type: optional content type :return: json object from github
def upload(self, release, filename, content_type=None): """Upload a file to a release :param filename: filename to upload :param content_type: optional content type :return: json object from github """ release = self.as_id(release) name = os.path.basename(filename) if not content_type: content_type, _ = mimetypes.guess_type(name) if not content_type: raise ValueError('content_type not known') inputs = {'name': name} url = '%s%s/%s/assets' % (self.uploads_url, urlsplit(self.api_url).path, release) info = os.stat(filename) size = info[stat.ST_SIZE] response = self.http.post( url, data=stream_upload(filename), auth=self.auth, params=inputs, headers={'content-type': content_type, 'content-length': str(size)}) response.raise_for_status() return response.json()
[ "def upload_asset(self, content_type, name, asset):\n \"\"\"Upload an asset to this release.\n\n All parameters are required.\n\n :param str content_type: The content type of the asset. Wikipedia has\n a list of common media types\n :param str name: The name of the file\n :param asset: The file or bytes object to upload.\n :returns: :class:`Asset <Asset>`\n \"\"\"\n headers = Release.CUSTOM_HEADERS.copy()\n headers.update({'Content-Type': content_type})\n url = self.upload_urlt.expand({'name': name})\n r = self._post(url, data=asset, json=False, headers=headers,\n verify=False)\n if r.status_code in (201, 202):\n return Asset(r.json(), self)\n raise GitHubError(r)", "def upload(self, payload=None, content_type=None):\n \"\"\"\n Upload the archive at `path` with content type `content_type`\n returns (int): upload status code\n \"\"\"\n # platform - prefer the value passed in to func over config\n payload = payload or self.config.payload\n content_type = content_type or self.config.content_type\n\n if payload is None:\n raise ValueError('Specify a file to upload.')\n\n if not os.path.exists(payload):\n raise IOError('Cannot upload %s: File does not exist.' % payload)\n\n upload_results = client.upload(\n self.config, self.connection, payload, content_type)\n\n # return api response\n return upload_results", "def upload_asset(self, path, label=\"\", content_type=\"\"):\n \"\"\"\n :calls: `POST https://<upload_url>/repos/:owner/:repo/releases/:release_id/assets?name=foo.zip <https://developer.github.com/v3/repos/releases/#upload-a-release-asset>`_\n :rtype: :class:`github.GitReleaseAsset.GitReleaseAsset`\n \"\"\"\n assert isinstance(path, (str, unicode)), path\n assert isinstance(label, (str, unicode)), label\n\n post_parameters = {\n \"name\": basename(path),\n \"label\": label\n }\n headers = {}\n if len(content_type) > 0:\n headers[\"Content-Type\"] = content_type\n resp_headers, data = self._requester.requestBlobAndCheck(\n \"POST\",\n self.upload_url.split(\"{?\")[0],\n parameters=post_parameters,\n headers=headers,\n input=path\n )\n return github.GitReleaseAsset.GitReleaseAsset(self._requester, resp_headers, data, completed=True)", "def upload_file(self, metadata, filename, signer=None, sign_password=None,\n filetype='sdist', pyversion='source', keystore=None):\n \"\"\"\n Upload a release file to the index.\n\n :param metadata: A :class:`Metadata` instance defining at least a name\n and version number for the file to be uploaded.\n :param filename: The pathname of the file to be uploaded.\n :param signer: The identifier of the signer of the file.\n :param sign_password: The passphrase for the signer's\n private key used for signing.\n :param filetype: The type of the file being uploaded. This is the\n distutils command which produced that file, e.g.\n ``sdist`` or ``bdist_wheel``.\n :param pyversion: The version of Python which the release relates\n to. For code compatible with any Python, this would\n be ``source``, otherwise it would be e.g. ``3.2``.\n :param keystore: The path to a directory which contains the keys\n used in signing. If not specified, the instance's\n ``gpg_home`` attribute is used instead.\n :return: The HTTP response received from PyPI upon submission of the\n request.\n \"\"\"\n self.check_credentials()\n if not os.path.exists(filename):\n raise DistlibException('not found: %s' % filename)\n metadata.validate()\n d = metadata.todict()\n sig_file = None\n if signer:\n if not self.gpg:\n logger.warning('no signing program available - not signed')\n else:\n sig_file = self.sign_file(filename, signer, sign_password,\n keystore)\n with open(filename, 'rb') as f:\n file_data = f.read()\n md5_digest = hashlib.md5(file_data).hexdigest()\n sha256_digest = hashlib.sha256(file_data).hexdigest()\n d.update({\n ':action': 'file_upload',\n 'protcol_version': '1',\n 'filetype': filetype,\n 'pyversion': pyversion,\n 'md5_digest': md5_digest,\n 'sha256_digest': sha256_digest,\n })\n files = [('content', os.path.basename(filename), file_data)]\n if sig_file:\n with open(sig_file, 'rb') as f:\n sig_data = f.read()\n files.append(('gpg_signature', os.path.basename(sig_file),\n sig_data))\n shutil.rmtree(os.path.dirname(sig_file))\n request = self.encode_request(d.items(), files)\n return self.send_request(request)", "def send_file(request, filename, content_type='image/jpeg'):\n \"\"\" \n Send a file through Django without loading the whole file into \n memory at once. The FileWrapper will turn the file object into an \n iterator for chunks of 8KB. \n \"\"\"\n wrapper = FixedFileWrapper(file(filename, 'rb'))\n response = HttpResponse(wrapper, content_type=content_type)\n response['Content-Length'] = os.path.getsize(filename)\n return response", "def upload(self, content, content_type, filename=None):\n \"\"\" Upload content to the home server and recieve a MXC url.\n\n Args:\n content (bytes): The data of the content.\n content_type (str): The mimetype of the content.\n filename (str): Optional. Filename of the content.\n\n Raises:\n MatrixUnexpectedResponse: If the homeserver gave a strange response\n MatrixRequestError: If the upload failed for some reason.\n \"\"\"\n try:\n response = self.api.media_upload(content, content_type, filename)\n if \"content_uri\" in response:\n return response[\"content_uri\"]\n else:\n raise MatrixUnexpectedResponse(\n \"The upload was successful, but content_uri wasn't found.\"\n )\n except MatrixRequestError as e:\n raise MatrixRequestError(\n code=e.code,\n content=\"Upload failed: %s\" % e\n )", "def upload(self, filepath, filename=None):\n \"\"\"Upload content.\n\n :param filepath: path to the file that should be chunked and uploaded\n :param filename: name of the file on the server, defaults to the\n last part of the ``filepath`` if not set\n :returns: The server's response, with all JSON decoded.\n :raises: ``requests.exceptions.HTTPError`` If the server responds with\n an HTTP 4XX or 5XX message.\n :raises nailgun.entities.APIResponseError: If the response has a status\n other than \"success\".\n\n .. _POST a Multipart-Encoded File:\n http://docs.python-requests.org/en/latest/user/quickstart/#post-a-multipart-encoded-file\n .. _POST Multiple Multipart-Encoded Files:\n http://docs.python-requests.org/en/latest/user/advanced/#post-multiple-multipart-encoded-files\n\n \"\"\"\n if not filename:\n filename = os.path.basename(filepath)\n\n content_upload = self.create()\n\n try:\n offset = 0\n content_chunk_size = 2 * 1024 * 1024\n\n with open(filepath, 'rb') as contentfile:\n chunk = contentfile.read(content_chunk_size)\n while len(chunk) > 0:\n data = {'offset': offset,\n 'content': chunk}\n content_upload.update(data)\n\n offset += len(chunk)\n chunk = contentfile.read(content_chunk_size)\n\n size = 0\n checksum = hashlib.sha256()\n with open(filepath, 'rb') as contentfile:\n contents = contentfile.read()\n size = len(contents)\n checksum.update(contents)\n\n uploads = [{'id': content_upload.upload_id, 'name': filename,\n 'size': size, 'checksum': checksum.hexdigest()}]\n # pylint:disable=no-member\n json = self.repository.import_uploads(uploads)\n finally:\n content_upload.delete()\n\n return json", "def upload_file(self, filename, file_type=FILE_TYPE_FREESURFER_DIRECTORY):\n \"\"\"Create an anatomy object on local disk from the given file.\n Currently, only Freesurfer anatomy directories are supported. Expects a\n tar file.\n\n Parameters\n ----------\n filename : string\n Name of the (uploaded) file\n file_type : string\n File type (currently expects FILE_TYPE_FREESURFER_DIRECTORY)\n\n Returns\n -------\n SubjectHandle\n Handle for created subject in database\n \"\"\"\n # We currently only support one file type (i.e., FREESURFER_DIRECTORY).\n if file_type != FILE_TYPE_FREESURFER_DIRECTORY:\n raise ValueError('Unsupported file type: ' + file_type)\n return self.upload_freesurfer_archive(filename)", "def upload(self, login, package_name, release, basename, fd, distribution_type,\n description='', md5=None, size=None, dependencies=None, attrs=None, channels=('main',), callback=None):\n '''\n Upload a new distribution to a package release.\n\n :param login: the login of the package owner\n :param package_name: the name of the package\n :param version: the version string of the release\n :param basename: the basename of the distribution to download\n :param fd: a file like object to upload\n :param distribution_type: pypi or conda or ipynb, etc\n :param description: (optional) a short description about the file\n :param attrs: any extra attributes about the file (eg. build=1, pyversion='2.7', os='osx')\n\n '''\n url = '%s/stage/%s/%s/%s/%s' % (self.domain, login, package_name, release, quote(basename))\n if attrs is None:\n attrs = {}\n if not isinstance(attrs, dict):\n raise TypeError('argument attrs must be a dictionary')\n\n payload = dict(distribution_type=distribution_type, description=description, attrs=attrs,\n dependencies=dependencies, channels=channels)\n\n data, headers = jencode(payload)\n res = self.session.post(url, data=data, headers=headers)\n self._check_response(res)\n obj = res.json()\n\n s3url = obj['post_url']\n s3data = obj['form_data']\n\n if md5 is None:\n _hexmd5, b64md5, size = compute_hash(fd, size=size)\n elif size is None:\n spos = fd.tell()\n fd.seek(0, os.SEEK_END)\n size = fd.tell() - spos\n fd.seek(spos)\n\n s3data['Content-Length'] = size\n s3data['Content-MD5'] = b64md5\n\n data_stream, headers = stream_multipart(s3data, files={'file':(basename, fd)},\n callback=callback)\n\n request_method = self.session if s3url.startswith(self.domain) else requests\n s3res = request_method.post(\n s3url, data=data_stream,\n verify=self.session.verify, timeout=10 * 60 * 60,\n headers=headers\n )\n\n if s3res.status_code != 201:\n logger.info(s3res.text)\n logger.info('')\n logger.info('')\n raise errors.BinstarError('Error uploading package', s3res.status_code)\n\n url = '%s/commit/%s/%s/%s/%s' % (self.domain, login, package_name, release, quote(basename))\n payload = dict(dist_id=obj['dist_id'])\n data, headers = jencode(payload)\n res = self.session.post(url, data=data, headers=headers)\n self._check_response(res)\n\n return res.json()", "def upload_file(self, container, src_file_path, dst_name=None, put=True,\n content_type=None):\n \"\"\"Upload a single file.\"\"\"\n if not os.path.exists(src_file_path):\n raise RuntimeError('file not found: ' + src_file_path)\n if not dst_name:\n dst_name = os.path.basename(src_file_path)\n if not content_type:\n content_type = \"application/octet.stream\"\n headers = dict(self._base_headers)\n if content_type:\n headers[\"content-length\"] = content_type\n else:\n headers[\"content-length\"] = \"application/octet.stream\"\n headers[\"content-length\"] = str(os.path.getsize(src_file_path))\n headers['content-disposition'] = 'attachment; filename=' + dst_name\n if put:\n method = 'PUT'\n url = self.make_url(container, dst_name, None)\n else:\n method = 'POST'\n url = self.make_url(container, None, None)\n with open(src_file_path, 'rb') as up_file:\n try:\n rsp = requests.request(method, url, headers=headers,\n data=up_file, timeout=self._timeout)\n except requests.exceptions.ConnectionError as e:\n RestHttp._raise_conn_error(e)\n\n return self._handle_response(rsp)", "def upload(self, data, callback=None, content_type=None,\n size=None):\n '''\n Upload a multi-part file for content to ingest. Returns a\n temporary upload id that can be used as a datstream location.\n\n :param data: content string, file-like object, or iterable with\n content to be uploaded\n :param callback: optional callback method to monitor the upload;\n see :mod:`requests-toolbelt` documentation for more\n details: https://toolbelt.readthedocs.org/en/latest/user.html#uploading-data\n :param content_type: optional content type of the data\n :param size: optional size of the data; required when using an\n iterable for the data\n\n :returns: upload id on success\n '''\n url = 'upload'\n # fedora only expects content uploaded as multipart file;\n # make string content into a file-like object so requests.post\n # sends it the way Fedora expects.\n # NOTE: checking for both python 2.x next method and\n # python 3.x __next__ to test if data is iteraable\n if not hasattr(data, 'read') and \\\n not (hasattr(data, '__next__') or hasattr(data, 'next')):\n data = six.BytesIO(force_bytes(data))\n\n # if data is an iterable, wrap in a readable iterator that\n # requests-toolbelt can read data from\n elif not hasattr(data, 'read') and \\\n (hasattr(data, '__next__') or hasattr(data, 'next')):\n if size is None:\n raise Exception('Cannot upload iterable with unknown size')\n data = ReadableIterator(data, size)\n\n # use requests-toolbelt multipart encoder to avoid reading\n # the full content of large files into memory\n menc = MultipartEncoder(fields={'file': ('file', data, content_type)})\n\n if callback is not None:\n menc = MultipartEncoderMonitor(menc, callback)\n\n headers = {'Content-Type': menc.content_type}\n\n if size:\n # latest version of requests requires str or bytes, not int\n if not isinstance(size, six.string_types):\n size = str(size)\n headers['Content-Length'] = size\n\n try:\n response = self.post(url, data=menc, headers=headers)\n except OverflowError:\n # Python __len__ uses integer so it is limited to system maxint,\n # and requests and requests-toolbelt use len() throughout.\n # This results in an overflow error when trying to upload a file\n # larger than system maxint (2GB on 32-bit OSes).\n # See http://bugs.python.org/issue12159\n msg = 'upload content larger than system maxint (32-bit OS limitation)'\n logger.error('OverflowError: %s', msg)\n raise OverflowError(msg)\n\n if response.status_code == requests.codes.accepted:\n return response.text.strip()", "def upload_from_filename(\n self, filename, content_type=None, client=None, predefined_acl=None\n ):\n \"\"\"Upload this blob's contents from the content of a named file.\n\n The content type of the upload will be determined in order\n of precedence:\n\n - The value passed in to this method (if not :data:`None`)\n - The value stored on the current blob\n - The value given by ``mimetypes.guess_type``\n - The default value ('application/octet-stream')\n\n .. note::\n The effect of uploading to an existing blob depends on the\n \"versioning\" and \"lifecycle\" policies defined on the blob's\n bucket. In the absence of those policies, upload will\n overwrite any existing contents.\n\n See the `object versioning\n <https://cloud.google.com/storage/docs/object-versioning>`_ and\n `lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_\n API documents for details.\n\n If :attr:`user_project` is set on the bucket, bills the API request\n to that project.\n\n :type filename: str\n :param filename: The path to the file.\n\n :type content_type: str\n :param content_type: Optional type of content being uploaded.\n\n :type client: :class:`~google.cloud.storage.client.Client`\n :param client: (Optional) The client to use. If not passed, falls back\n to the ``client`` stored on the blob's bucket.\n\n :type predefined_acl: str\n :param predefined_acl: (Optional) predefined access control list\n \"\"\"\n content_type = self._get_content_type(content_type, filename=filename)\n\n with open(filename, \"rb\") as file_obj:\n total_bytes = os.fstat(file_obj.fileno()).st_size\n self.upload_from_file(\n file_obj,\n content_type=content_type,\n client=client,\n size=total_bytes,\n predefined_acl=predefined_acl,\n )" ]
[ 0.802365243434906, 0.7317085266113281, 0.7304291129112244, 0.729613184928894, 0.7226841449737549, 0.7197074890136719, 0.7160930037498474, 0.7031177282333374, 0.7029310464859009, 0.6901597380638123, 0.6828121542930603, 0.6824163794517517 ]
Validate ``tag_name`` with the latest tag from github If ``tag_name`` is a valid candidate, return the latest tag from github
def validate_tag(self, tag_name, prefix=None): """Validate ``tag_name`` with the latest tag from github If ``tag_name`` is a valid candidate, return the latest tag from github """ new_version = semantic_version(tag_name) current = self.latest() if current: tag_name = current['tag_name'] if prefix: tag_name = tag_name[len(prefix):] tag_name = semantic_version(tag_name) if tag_name >= new_version: what = 'equal to' if tag_name == new_version else 'older than' raise GithubException( 'Your local version "%s" is %s ' 'the current github version "%s".\n' 'Bump the local version to ' 'continue.' % ( str(new_version), what, str(tag_name) ) ) return current
[ "def get_version_from_tag(tag_name: str) -> Optional[str]:\n \"\"\"Get git hash from tag\n\n :param tag_name: Name of the git tag (i.e. 'v1.0.0')\n :return: sha1 hash of the commit\n \"\"\"\n\n debug('get_version_from_tag({})'.format(tag_name))\n check_repo()\n for i in repo.tags:\n if i.name == tag_name:\n return i.commit.hexsha\n return None", "def get_tag_hash(self, tag_name):\n \"\"\"Fetch the commit hash that was tagged with ``tag_name``.\n\n Args:\n tag_name (str): the name of the tag\n\n Returns:\n str: the commit hash linked by the tag\n\n \"\"\"\n tag_object = get_single_item_from_sequence(\n sequence=self._github_repository.tags(),\n condition=lambda tag: tag.name == tag_name,\n no_item_error_message='No tag \"{}\" exist'.format(tag_name),\n too_many_item_error_message='Too many tags \"{}\" found'.format(tag_name),\n )\n\n return tag_object.commit.sha", "def find_tag_by_name(repo, tag_name, safe=True):\n \"\"\"Find tag by name in a github Repository\n\n Parameters\n ----------\n repo: :class:`github.Repository` instance\n\n tag_name: str\n Short name of tag (not a fully qualified ref).\n\n safe: bool, optional\n Defaults to `True`. When `True`, `None` is returned on failure. When\n `False`, an exception will be raised upon failure.\n\n Returns\n -------\n gh : :class:`github.GitRef` instance or `None`\n\n Raises\n ------\n github.UnknownObjectException\n If git tag name does not exist in repo.\n \"\"\"\n tagfmt = 'tags/{ref}'.format(ref=tag_name)\n\n try:\n ref = repo.get_git_ref(tagfmt)\n if ref and ref.ref:\n return ref\n except github.UnknownObjectException:\n if not safe:\n raise\n\n return None", "def validatetag(context):\n \"Check to make sure that a tag exists for the current HEAD and it looks like a valid version number\"\n # Validate that a Git tag exists for the current commit HEAD\n result = context.run(\"git describe --exact-match --tags $(git log -n1 --pretty='%h')\")\n tag = result.stdout.rstrip()\n\n # Validate that the Git tag appears to be a valid version number\n ver_regex = re.compile('(\\d+)\\.(\\d+)\\.(\\d+)')\n match = ver_regex.fullmatch(tag)\n if match is None:\n print('Tag {!r} does not appear to be a valid version number'.format(tag))\n sys.exit(-1)\n else:\n print('Tag {!r} appears to be a valid version number'.format(tag))", "def tag_(name, repository, tag='latest', force=False):\n '''\n .. versionchanged:: 2018.3.0\n The repository and tag must now be passed separately using the\n ``repository`` and ``tag`` arguments, rather than together in the (now\n deprecated) ``image`` argument.\n\n Tag an image into a repository and return ``True``. If the tag was\n unsuccessful, an error will be raised.\n\n name\n ID of image\n\n repository\n Repository name for the image to be built\n\n .. versionadded:: 2018.3.0\n\n tag : latest\n Tag name for the image to be built\n\n .. versionadded:: 2018.3.0\n\n image\n .. deprecated:: 2018.3.0\n Use both ``repository`` and ``tag`` instead\n\n force : False\n Force apply tag\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion docker.tag 0123456789ab myrepo/mycontainer mytag\n '''\n if not isinstance(repository, six.string_types):\n repository = six.text_type(repository)\n if not isinstance(tag, six.string_types):\n tag = six.text_type(tag)\n\n image_id = inspect_image(name)['Id']\n response = _client_wrapper('tag',\n image_id,\n repository=repository,\n tag=tag,\n force=force)\n _clear_context()\n # Only non-error return case is a True return, so just return the response\n return response", "async function verifyTagName(tagName, execaOpts) {\n try {\n return (await execa('git', ['check-ref-format', `refs/tags/${tagName}`], execaOpts)).code === 0;\n } catch (error) {\n debug(error);\n }\n}", "def get_git_tag(hash_, git_path='git'):\n \"\"\"Returns the name of the current git tag\n \"\"\"\n tag, status = call((git_path, 'describe', '--exact-match',\n '--tags', hash_), returncode=True)\n if status == 0:\n return tag\n else:\n return None", "def tag(self):\n '''\n :param tag:\n Checks out specified tag. If set to ``None`` the latest\n tag will be checked out\n :returns:\n A list of all tags, sorted as version numbers, ascending\n '''\n\n tag = self.m(\n 'getting git tags',\n cmdd=dict(\n cmd='git tag -l --sort=\"version:refname\"',\n cwd=self.local\n ),\n verbose=False,\n )\n if tag.get('returncode') == 0:\n return tag.get('stdout')", "def github_tags_newer(github_repo, versions_file, update_majors):\n \"\"\"\n Get new tags from a github repository. Cannot use github API because it\n doesn't support chronological ordering of tags.\n @param github_repo: the github repository, e.g. 'drupal/drupal/'.\n @param versions_file: the file path where the versions database can be found.\n @param update_majors: major versions to update. If you want to update\n the 6.x and 7.x branch, you would supply a list which would look like\n ['6', '7']\n @return: a boolean value indicating whether an update is needed\n @raise MissingMajorException: A new version from a newer major branch is\n exists, but will not be downloaded due to it not being in majors.\n \"\"\"\n github_repo = _github_normalize(github_repo)\n vf = VersionsFile(versions_file)\n current_highest = vf.highest_version_major(update_majors)\n\n tags_url = '%s%stags' % (GH, github_repo)\n resp = requests.get(tags_url)\n bs = BeautifulSoup(resp.text, 'lxml')\n\n gh_versions = []\n for header in bs.find_all('h4'):\n tag = header.findChild('a')\n if not tag:\n continue # Ignore learn more header.\n gh_versions.append(tag.text.strip())\n\n newer = _newer_tags_get(current_highest, gh_versions)\n\n return len(newer) > 0", "def check_existing_git_tag(repo, t_tag, **kwargs):\n \"\"\"\n Check for a pre-existng tag in the github repo.\n\n Parameters\n ----------\n repo : github.Repository.Repository\n repo to inspect for an existing tagsdf\n t_tag: codekit.pygithub.TargetTag\n dict repesenting a target git tag\n\n Returns\n -------\n insync : `bool`\n True if tag exists and is in sync. False if tag does not exist.\n\n Raises\n ------\n GitTagExistsError\n If tag exists but is not in sync.\n \"\"\"\n\n assert isinstance(repo, github.Repository.Repository), type(repo)\n assert isinstance(t_tag, codekit.pygithub.TargetTag), type(t_tag)\n\n debug(\"looking for existing tag: {tag} in repo: {repo}\".format(\n repo=repo.full_name,\n tag=t_tag.name,\n ))\n\n # find ref/tag by name\n e_ref = pygithub.find_tag_by_name(repo, t_tag.name)\n if not e_ref:\n debug(\" not found: {tag}\".format(tag=t_tag.name))\n return False\n\n # find tag object pointed to by the ref\n try:\n e_tag = repo.get_git_tag(e_ref.object.sha)\n except github.RateLimitExceededException:\n raise\n except github.GithubException as e:\n msg = \"error getting tag: {tag} [{sha}]\".format(\n tag=e_tag.tag,\n sha=e_tag.sha,\n )\n raise pygithub.CaughtRepositoryError(repo, e, msg) from None\n\n debug(\" found existing: {tag} [{sha}]\".format(\n tag=e_tag.tag,\n sha=e_tag.sha,\n ))\n\n if cmp_existing_git_tag(t_tag, e_tag, **kwargs):\n return True\n\n yikes = GitTagExistsError(textwrap.dedent(\"\"\"\\\n tag: {tag} already exists in repo: {repo}\n with conflicting values:\n existing:\n sha: {e_sha}\n message: {e_message}\n tagger: {e_tagger}\n target:\n sha: {t_sha}\n message: {t_message}\n tagger: {t_tagger}\\\n \"\"\").format(\n tag=t_tag.name,\n repo=repo.full_name,\n e_sha=e_tag.object.sha,\n e_message=e_tag.message,\n e_tagger=e_tag.tagger,\n t_sha=t_tag.sha,\n t_message=t_tag.message,\n t_tagger=t_tag.tagger,\n ))\n\n raise yikes", "def git_tag(tag_name, push=False):\n \"\"\"\n Tag the repo using an annotated tag.\n \"\"\"\n with chdir(get_root()):\n result = run_command('git tag -a {} -m \"{}\"'.format(tag_name, tag_name), capture=True)\n\n if push:\n if result.code != 0:\n return result\n return run_command('git push origin {}'.format(tag_name), capture=True)\n\n return result", "def get_tag(self, tag_name, **kwargs):\n \"\"\"get a tag by name\n\n Args:\n tag_name (string): name of tag to get\n\n Returns:\n dictionary of the response\n\n \"\"\"\n return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX,\n tag_name,\n **kwargs)" ]
[ 0.724653959274292, 0.7182732820510864, 0.7106755375862122, 0.7020395398139954, 0.6872637867927551, 0.680760383605957, 0.6798170208930969, 0.6793562769889832, 0.6791114211082458, 0.6761107444763184, 0.6759423613548279, 0.6710719466209412 ]
Return the full reddit URL associated with the usernote. Arguments: subreddit: the subreddit name for the note (PRAW Subreddit object)
def full_url(self): """Return the full reddit URL associated with the usernote. Arguments: subreddit: the subreddit name for the note (PRAW Subreddit object) """ if self.link == '': return None else: return Note._expand_url(self.link, self.subreddit)
[ "def _expand_url(short_link, subreddit=None):\n \"\"\"Convert a usernote's URL short-hand into a full reddit URL.\n\n Arguments:\n subreddit: the subreddit the URL is for (PRAW Subreddit object or str)\n short_link: the compressed link from a usernote (str)\n\n Returns a String of the full URL.\n \"\"\"\n # Some URL structures for notes\n message_scheme = 'https://reddit.com/message/messages/{}'\n comment_scheme = 'https://reddit.com/r/{}/comments/{}/-/{}'\n post_scheme = 'https://reddit.com/r/{}/comments/{}/'\n\n if short_link == '':\n return None\n else:\n parts = short_link.split(',')\n\n if parts[0] == 'm':\n return message_scheme.format(parts[1])\n if parts[0] == 'l' and subreddit:\n if len(parts) > 2:\n return comment_scheme.format(subreddit, parts[1], parts[2])\n else:\n return post_scheme.format(subreddit, parts[1])\n elif not subreddit:\n raise ValueError('Subreddit name must be provided')\n else:\n return None", "def get_subreddit(self, subreddit_name, *args, **kwargs):\n \"\"\"Return a Subreddit object for the subreddit_name specified.\n\n The additional parameters are passed directly into the\n :class:`.Subreddit` constructor.\n\n \"\"\"\n sr_name_lower = subreddit_name.lower()\n if sr_name_lower == 'random':\n return self.get_random_subreddit()\n elif sr_name_lower == 'randnsfw':\n return self.get_random_subreddit(nsfw=True)\n return objects.Subreddit(self, subreddit_name, *args, **kwargs)", "def add_subreddit(self, subreddit, _delete=False, *args, **kwargs):\n \"\"\"Add a subreddit to the multireddit.\n\n :param subreddit: The subreddit name or Subreddit object to add\n\n The additional parameters are passed directly into\n :meth:`~praw.__init__.BaseReddit.request_json`.\n\n \"\"\"\n subreddit = six.text_type(subreddit)\n url = self.reddit_session.config['multireddit_add'].format(\n user=self._author, multi=self.name, subreddit=subreddit)\n method = 'DELETE' if _delete else 'PUT'\n # The modhash isn't necessary for OAuth requests\n if not self.reddit_session._use_oauth:\n self.reddit_session.http.headers['x-modhash'] = \\\n self.reddit_session.modhash\n data = {'model': dumps({'name': subreddit})}\n try:\n self.reddit_session.request(url, data=data, method=method,\n *args, **kwargs)\n finally:\n # The modhash isn't necessary for OAuth requests\n if not self.reddit_session._use_oauth:\n del self.reddit_session.http.headers['x-modhash']", "def get_my_subreddits(self, *args, **kwargs):\n \"\"\"Return a get_content generator of subreddits.\n\n The subreddits generated are those that hat the session's user is\n subscribed to.\n\n The additional parameters are passed directly into\n :meth:`.get_content`. Note: the `url` parameter cannot be altered.\n\n \"\"\"\n return self.get_content(self.config['my_subreddits'], *args, **kwargs)", "def _netid_subscription_url(netid, subscription_codes):\n \"\"\"\n Return UWNetId resource for provided netid and subscription\n code or code list\n \"\"\"\n return \"{0}/{1}/subscription/{2}\".format(\n url_base(), netid,\n (','.join([str(n) for n in subscription_codes])\n if isinstance(subscription_codes, (list, tuple))\n else subscription_codes))", "def open_subreddit_page(self, name):\n \"\"\"\n Open an instance of the subreddit page for the given subreddit name.\n \"\"\"\n from .subreddit_page import SubredditPage\n\n with self.term.loader('Loading subreddit'):\n page = SubredditPage(self.reddit, self.term, self.config,\n self.oauth, name)\n if not self.term.loader.exception:\n return page", "def get_traffic(self, subreddit):\n \"\"\"Return the json dictionary containing traffic stats for a subreddit.\n\n :param subreddit: The subreddit whose /about/traffic page we will\n collect.\n\n \"\"\"\n url = self.config['subreddit_traffic'].format(\n subreddit=six.text_type(subreddit))\n return self.request_json(url)", "def remove_subreddit(self, subreddit, *args, **kwargs):\n \"\"\"Remove a subreddit from the user's multireddit.\"\"\"\n return self.add_subreddit(subreddit, True, *args, **kwargs)", "def quick_url(comment):\n \"\"\"Return the URL for the comment without fetching its submission.\"\"\"\n def to_id(fullname):\n return fullname.split('_', 1)[1]\n return ('http://www.reddit.com/r/{}/comments/{}/_/{}?context=3'\n .format(comment.subreddit.display_name, to_id(comment.link_id),\n comment.id))", "def get_comments(self, subreddit, gilded_only=False, *args, **kwargs):\n \"\"\"Return a get_content generator for comments in the given subreddit.\n\n :param gilded_only: If True only return gilded comments.\n\n The additional parameters are passed directly into\n :meth:`.get_content`. Note: the `url` parameter cannot be altered.\n\n \"\"\"\n key = 'sub_comments_gilded' if gilded_only else 'subreddit_comments'\n url = self.config[key].format(subreddit=six.text_type(subreddit))\n return self.get_content(url, *args, **kwargs)", "def submit(self, subreddit, title, text=None, url=None, captcha=None,\n save=None, send_replies=None, resubmit=None, **kwargs):\n \"\"\"Submit a new link to the given subreddit.\n\n Accepts either a Subreddit object or a str containing the subreddit's\n display name.\n\n :param resubmit: If True, submit the link even if it has already been\n submitted.\n :param save: If True the new Submission will be saved after creation.\n :param send_replies: If True, inbox replies will be received when\n people comment on the submission. If set to None, the default of\n True for text posts and False for link posts will be used.\n\n :returns: The newly created Submission object if the reddit instance\n can access it. Otherwise, return the url to the submission.\n\n This function may result in a captcha challenge. PRAW will\n automatically prompt you for a response. See :ref:`handling-captchas`\n if you want to manually handle captchas.\n\n \"\"\"\n if isinstance(text, six.string_types) == bool(url):\n raise TypeError('One (and only one) of text or url is required!')\n data = {'sr': six.text_type(subreddit),\n 'title': title}\n if text or text == '':\n data['kind'] = 'self'\n data['text'] = text\n else:\n data['kind'] = 'link'\n data['url'] = url\n if captcha:\n data.update(captcha)\n if resubmit is not None:\n data['resubmit'] = resubmit\n if save is not None:\n data['save'] = save\n if send_replies is not None:\n data['sendreplies'] = send_replies\n result = self.request_json(self.config['submit'], data=data,\n retry_on_error=False)\n url = result['data']['url']\n # Clear the OAuth setting when attempting to fetch the submission\n if self._use_oauth:\n self._use_oauth = False\n if url.startswith(self.config.oauth_url):\n url = self.config.api_url + url[len(self.config.oauth_url):]\n try:\n return self.get_submission(url)\n except errors.Forbidden:\n # While the user may be able to submit to a subreddit,\n # that does not guarantee they have read access.\n return url", "def create_subreddit(self, name, title, description='', language='en',\n subreddit_type='public', content_options='any',\n over_18=False, default_set=True, show_media=False,\n domain='', wikimode='disabled', captcha=None,\n **kwargs):\n \"\"\"Create a new subreddit.\n\n :returns: The json response from the server.\n\n This function may result in a captcha challenge. PRAW will\n automatically prompt you for a response. See :ref:`handling-captchas`\n if you want to manually handle captchas.\n\n \"\"\"\n data = {'name': name,\n 'title': title,\n 'description': description,\n 'lang': language,\n 'type': subreddit_type,\n 'link_type': content_options,\n 'over_18': 'on' if over_18 else 'off',\n 'allow_top': 'on' if default_set else 'off',\n 'show_media': 'on' if show_media else 'off',\n 'wikimode': wikimode,\n 'domain': domain}\n if captcha:\n data.update(captcha)\n return self.request_json(self.config['site_admin'], data=data)" ]
[ 0.7570649981498718, 0.730120837688446, 0.690046489238739, 0.6770223379135132, 0.6761775612831116, 0.6745516061782837, 0.669142484664917, 0.6685365438461304, 0.6644787788391113, 0.6626833081245422, 0.6609413623809814, 0.6605386734008789 ]
Convert a reddit URL into the short-hand used by usernotes. Arguments: link: a link to a comment, submission, or message (str) Returns a String of the shorthand URL
def _compress_url(link): """Convert a reddit URL into the short-hand used by usernotes. Arguments: link: a link to a comment, submission, or message (str) Returns a String of the shorthand URL """ comment_re = re.compile(r'/comments/([A-Za-z\d]{2,})(?:/[^\s]+/([A-Za-z\d]+))?') message_re = re.compile(r'/message/messages/([A-Za-z\d]+)') matches = re.findall(comment_re, link) if len(matches) == 0: matches = re.findall(message_re, link) if len(matches) == 0: return None else: return 'm,' + matches[0] else: if matches[0][1] == '': return 'l,' + matches[0][0] else: return 'l,' + matches[0][0] + ',' + matches[0][1]
[ "def _expand_url(short_link, subreddit=None):\n \"\"\"Convert a usernote's URL short-hand into a full reddit URL.\n\n Arguments:\n subreddit: the subreddit the URL is for (PRAW Subreddit object or str)\n short_link: the compressed link from a usernote (str)\n\n Returns a String of the full URL.\n \"\"\"\n # Some URL structures for notes\n message_scheme = 'https://reddit.com/message/messages/{}'\n comment_scheme = 'https://reddit.com/r/{}/comments/{}/-/{}'\n post_scheme = 'https://reddit.com/r/{}/comments/{}/'\n\n if short_link == '':\n return None\n else:\n parts = short_link.split(',')\n\n if parts[0] == 'm':\n return message_scheme.format(parts[1])\n if parts[0] == 'l' and subreddit:\n if len(parts) > 2:\n return comment_scheme.format(subreddit, parts[1], parts[2])\n else:\n return post_scheme.format(subreddit, parts[1])\n elif not subreddit:\n raise ValueError('Subreddit name must be provided')\n else:\n return None", "def quick_url(comment):\n \"\"\"Return the URL for the comment without fetching its submission.\"\"\"\n def to_id(fullname):\n return fullname.split('_', 1)[1]\n return ('http://www.reddit.com/r/{}/comments/{}/_/{}?context=3'\n .format(comment.subreddit.display_name, to_id(comment.link_id),\n comment.id))", "def _fast_permalink(self):\n \"\"\"Return the short permalink to the comment.\"\"\"\n if hasattr(self, 'link_id'): # from /r or /u comments page\n sid = self.link_id.split('_')[1]\n else: # from user's /message page\n sid = self.context.split('/')[4]\n return urljoin(self.reddit_session.config['comments'], '{0}/_/{1}'\n .format(sid, self.id))", "def full_url(self):\n \"\"\"Return the full reddit URL associated with the usernote.\n\n Arguments:\n subreddit: the subreddit name for the note (PRAW Subreddit object)\n \"\"\"\n if self.link == '':\n return None\n else:\n return Note._expand_url(self.link, self.subreddit)", "def lsst_doc_shortlink_role(name, rawtext, text, lineno, inliner,\n options=None, content=None):\n \"\"\"Link to LSST documents given their handle using LSST's ls.st link\n shortener.\n\n Example::\n\n :ldm:`151`\n \"\"\"\n options = options or {}\n content = content or []\n node = nodes.reference(\n text='{0}-{1}'.format(name.upper(), text),\n refuri='https://ls.st/{0}-{1}'.format(name, text),\n **options)\n return [node], []", "def _replace_link_brackets(msg_body):\n \"\"\"\n Normalize links i.e. replace '<', '>' wrapping the link with some symbols\n so that '>' closing the link couldn't be mistakenly taken for quotation\n marker.\n\n Converts msg_body into a unicode\n \"\"\"\n if isinstance(msg_body, bytes):\n msg_body = msg_body.decode('utf8')\n\n def link_wrapper(link):\n newline_index = msg_body[:link.start()].rfind(\"\\n\")\n if msg_body[newline_index + 1] == \">\":\n return link.group()\n else:\n return \"@@%s@@\" % link.group(1)\n\n msg_body = re.sub(RE_LINK, link_wrapper, msg_body)\n return msg_body", "def submit_link(self, title, url):\n \"\"\"Submit link to this subreddit (POST). Calls :meth:`narwal.Reddit.submit_link`.\n \n :param title: title of submission\n :param url: url submission links to\n \"\"\"\n return self._reddit.submit_link(self.display_name, title, url)", "def submit_link(self, sr, title, url, follow=True):\n \"\"\"Login required. POSTs a link submission. Returns :class:`things.Link` object if ``follow=True`` (default), or the string permalink of the new submission otherwise.\n \n Argument ``follow`` exists because reddit only returns the permalink after POSTing a submission. In order to get detailed info on the new submission, we need to make another request. If you don't want to make that additional request, just set ``follow=False``.\n \n See https://github.com/reddit/reddit/wiki/API%3A-submit.\n \n URL: ``http://www.reddit.com/api/submit/``\n \n :param sr: name of subreddit to submit to\n :param title: title of submission\n :param url: submission link\n :param follow: set to ``True`` to follow retrieved permalink to return detailed :class:`things.Link` object. ``False`` to just return permalink.\n :type follow: bool\n \"\"\"\n return self._submit(sr, title, 'link', url=url, follow=follow)", "def autolink(self, link, is_email=False):\n \"\"\"Rendering a given link or email address.\n\n :param link: link content or email address.\n :param is_email: whether this is an email or not.\n \"\"\"\n text = link = escape_link(link)\n if is_email:\n link = 'mailto:%s' % link\n return '<a href=\"%s\">%s</a>' % (link, text)", "def linkify(\n text: Union[str, bytes],\n shorten: bool = False,\n extra_params: Union[str, Callable[[str], str]] = \"\",\n require_protocol: bool = False,\n permitted_protocols: List[str] = [\"http\", \"https\"],\n) -> str:\n \"\"\"Converts plain text into HTML with links.\n\n For example: ``linkify(\"Hello http://tornadoweb.org!\")`` would return\n ``Hello <a href=\"http://tornadoweb.org\">http://tornadoweb.org</a>!``\n\n Parameters:\n\n * ``shorten``: Long urls will be shortened for display.\n\n * ``extra_params``: Extra text to include in the link tag, or a callable\n taking the link as an argument and returning the extra text\n e.g. ``linkify(text, extra_params='rel=\"nofollow\" class=\"external\"')``,\n or::\n\n def extra_params_cb(url):\n if url.startswith(\"http://example.com\"):\n return 'class=\"internal\"'\n else:\n return 'class=\"external\" rel=\"nofollow\"'\n linkify(text, extra_params=extra_params_cb)\n\n * ``require_protocol``: Only linkify urls which include a protocol. If\n this is False, urls such as www.facebook.com will also be linkified.\n\n * ``permitted_protocols``: List (or set) of protocols which should be\n linkified, e.g. ``linkify(text, permitted_protocols=[\"http\", \"ftp\",\n \"mailto\"])``. It is very unsafe to include protocols such as\n ``javascript``.\n \"\"\"\n if extra_params and not callable(extra_params):\n extra_params = \" \" + extra_params.strip()\n\n def make_link(m: typing.Match) -> str:\n url = m.group(1)\n proto = m.group(2)\n if require_protocol and not proto:\n return url # not protocol, no linkify\n\n if proto and proto not in permitted_protocols:\n return url # bad protocol, no linkify\n\n href = m.group(1)\n if not proto:\n href = \"http://\" + href # no proto specified, use http\n\n if callable(extra_params):\n params = \" \" + extra_params(href).strip()\n else:\n params = extra_params\n\n # clip long urls. max_len is just an approximation\n max_len = 30\n if shorten and len(url) > max_len:\n before_clip = url\n if proto:\n proto_len = len(proto) + 1 + len(m.group(3) or \"\") # +1 for :\n else:\n proto_len = 0\n\n parts = url[proto_len:].split(\"/\")\n if len(parts) > 1:\n # Grab the whole host part plus the first bit of the path\n # The path is usually not that interesting once shortened\n # (no more slug, etc), so it really just provides a little\n # extra indication of shortening.\n url = (\n url[:proto_len]\n + parts[0]\n + \"/\"\n + parts[1][:8].split(\"?\")[0].split(\".\")[0]\n )\n\n if len(url) > max_len * 1.5: # still too long\n url = url[:max_len]\n\n if url != before_clip:\n amp = url.rfind(\"&\")\n # avoid splitting html char entities\n if amp > max_len - 5:\n url = url[:amp]\n url += \"...\"\n\n if len(url) >= len(before_clip):\n url = before_clip\n else:\n # full url is visible on mouse-over (for those who don't\n # have a status bar, such as Safari by default)\n params += ' title=\"%s\"' % href\n\n return u'<a href=\"%s\"%s>%s</a>' % (href, params, url)\n\n # First HTML-escape so that our strings are all safe.\n # The regex is modified to avoid character entites other than &amp; so\n # that we won't pick up &quot;, etc.\n text = _unicode(xhtml_escape(text))\n return _URL_RE.sub(make_link, text)", "function linkToSrc(shortPath, lineNumber) {\n var splitPath = shortPath.split(\"/\");\n return '<a href=\"{{srcroot}}' + shortPath+'\">' + splitPath[splitPath.length - 1] + '</a>, <a href=\"{{srcroot}}' + shortPath+'#L'+lineNumber+'\">' + lineNumber + '</a>';\n \n}", "def link(self, text, as_html=True) -> str:\n \"\"\"\n Generate URL for using in text messages with HTML or MD parse mode\n\n :param text: link label\n :param as_html: generate as HTML\n :return: str\n \"\"\"\n try:\n url = self.url\n except TypeError: # URL is not accessible\n if as_html:\n return md.quote_html(text)\n return md.escape_md(text)\n\n if as_html:\n return md.hlink(text, url)\n return md.link(text, url)" ]
[ 0.7899641990661621, 0.7254191040992737, 0.7252077460289001, 0.710456371307373, 0.6831409335136414, 0.680858314037323, 0.6777580976486206, 0.6740667819976807, 0.6739614605903625, 0.6738519072532654, 0.672898530960083, 0.6724253296852112 ]
Convert a usernote's URL short-hand into a full reddit URL. Arguments: subreddit: the subreddit the URL is for (PRAW Subreddit object or str) short_link: the compressed link from a usernote (str) Returns a String of the full URL.
def _expand_url(short_link, subreddit=None): """Convert a usernote's URL short-hand into a full reddit URL. Arguments: subreddit: the subreddit the URL is for (PRAW Subreddit object or str) short_link: the compressed link from a usernote (str) Returns a String of the full URL. """ # Some URL structures for notes message_scheme = 'https://reddit.com/message/messages/{}' comment_scheme = 'https://reddit.com/r/{}/comments/{}/-/{}' post_scheme = 'https://reddit.com/r/{}/comments/{}/' if short_link == '': return None else: parts = short_link.split(',') if parts[0] == 'm': return message_scheme.format(parts[1]) if parts[0] == 'l' and subreddit: if len(parts) > 2: return comment_scheme.format(subreddit, parts[1], parts[2]) else: return post_scheme.format(subreddit, parts[1]) elif not subreddit: raise ValueError('Subreddit name must be provided') else: return None
[ "def _compress_url(link):\n \"\"\"Convert a reddit URL into the short-hand used by usernotes.\n\n Arguments:\n link: a link to a comment, submission, or message (str)\n\n Returns a String of the shorthand URL\n \"\"\"\n comment_re = re.compile(r'/comments/([A-Za-z\\d]{2,})(?:/[^\\s]+/([A-Za-z\\d]+))?')\n message_re = re.compile(r'/message/messages/([A-Za-z\\d]+)')\n matches = re.findall(comment_re, link)\n\n if len(matches) == 0:\n matches = re.findall(message_re, link)\n\n if len(matches) == 0:\n return None\n else:\n return 'm,' + matches[0]\n else:\n if matches[0][1] == '':\n return 'l,' + matches[0][0]\n else:\n return 'l,' + matches[0][0] + ',' + matches[0][1]", "def full_url(self):\n \"\"\"Return the full reddit URL associated with the usernote.\n\n Arguments:\n subreddit: the subreddit name for the note (PRAW Subreddit object)\n \"\"\"\n if self.link == '':\n return None\n else:\n return Note._expand_url(self.link, self.subreddit)", "def _fast_permalink(self):\n \"\"\"Return the short permalink to the comment.\"\"\"\n if hasattr(self, 'link_id'): # from /r or /u comments page\n sid = self.link_id.split('_')[1]\n else: # from user's /message page\n sid = self.context.split('/')[4]\n return urljoin(self.reddit_session.config['comments'], '{0}/_/{1}'\n .format(sid, self.id))", "def quick_url(comment):\n \"\"\"Return the URL for the comment without fetching its submission.\"\"\"\n def to_id(fullname):\n return fullname.split('_', 1)[1]\n return ('http://www.reddit.com/r/{}/comments/{}/_/{}?context=3'\n .format(comment.subreddit.display_name, to_id(comment.link_id),\n comment.id))", "def shortened_url(url)\n raise \"Only String accepted: #{url}\" unless url.class == String\n short_url = MongoidShortener::ShortenedUrl.generate(url)\n short_url\n end", "def shorten(self, long_url, custom_ending=None, is_secret=False):\n \"\"\"\n Creates a short url if valid\n\n :param str long_url: The url to shorten.\n :param custom_ending: The custom url to create if available.\n :type custom_ending: str or None\n :param bool is_secret: if not public, it's secret\n :return: a short link\n :rtype: str\n \"\"\"\n params = {\n 'url': long_url,\n 'is_secret': 'true' if is_secret else 'false',\n 'custom_ending': custom_ending\n }\n data, r = self._make_request(self.api_shorten_endpoint, params)\n if r.status_code == 400:\n if custom_ending is not None:\n raise exceptions.CustomEndingUnavailable(custom_ending)\n raise exceptions.BadApiRequest\n elif r.status_code == 403:\n raise exceptions.QuotaExceededError\n action = data.get('action')\n short_url = data.get('result')\n if action == 'shorten' and short_url is not None:\n return short_url\n raise exceptions.DebugTempWarning", "def random_post(subreddit, apikey):\n \"\"\"Gets a random post from a subreddit and returns a title and shortlink to it.\"\"\"\n subreddit = '/r/random' if subreddit is None else '/r/%s' % subreddit\n urlstr = 'http://reddit.com%s/random?%s' % (subreddit, time.time())\n url = get(urlstr, headers={'User-Agent': 'CslBot/1.0'}).url\n return '** %s - %s' % (get_title(url, apikey), get_short(url, apikey))", "def get_short_url(self, entry):\n \"\"\"\n Return the short url in HTML.\n \"\"\"\n try:\n short_url = entry.short_url\n except NoReverseMatch:\n short_url = entry.get_absolute_url()\n return format_html('<a href=\"{url}\" target=\"blank\">{url}</a>',\n url=short_url)", "def short_url(self, long_url):\n \"\"\"\n 长链接转短链接\n\n :param long_url: 长链接\n :return: 返回的结果数据\n \"\"\"\n data = {\n 'appid': self.appid,\n 'long_url': long_url,\n }\n return self._post('tools/shorturl', data=data)", "def extract_links_from_reference(self, short_id):\n \"\"\"\n Return a dictionary with supplement files (pdf, csv, zip, ipynb, html\n and so on) extracted from supplement page.\n\n @return: @see CourseraOnDemand._extract_links_from_text\n \"\"\"\n logging.debug('Gathering resource URLs for short_id <%s>.', short_id)\n\n try:\n dom = get_page(self._session, OPENCOURSE_REFERENCE_ITEM_URL,\n json=True,\n course_id=self._course_id,\n short_id=short_id)\n\n resource_content = {}\n\n # Supplement content has structure as follows:\n # 'linked' {\n # 'openCourseAssets.v1' [ {\n # 'definition' {\n # 'value'\n\n for asset in dom['linked']['openCourseAssets.v1']:\n value = asset['definition']['value']\n # Supplement lecture types are known to contain both <asset> tags\n # and <a href> tags (depending on the course), so we extract\n # both of them.\n extend_supplement_links(\n resource_content, self._extract_links_from_text(value))\n\n instructions = (IN_MEMORY_MARKER + self._markup_to_html(value),\n 'resources')\n extend_supplement_links(\n resource_content, {IN_MEMORY_EXTENSION: [instructions]})\n\n return resource_content\n except requests.exceptions.HTTPError as exception:\n logging.error('Could not download supplement %s: %s',\n short_id, exception)\n if is_debug_run():\n logging.exception('Could not download supplement %s: %s',\n short_id, exception)\n return None", "function linkToSrc(shortPath, lineNumber) {\n var splitPath = shortPath.split(\"/\");\n return '<a href=\"{{srcroot}}' + shortPath+'\">' + splitPath[splitPath.length - 1] + '</a>, <a href=\"{{srcroot}}' + shortPath+'#L'+lineNumber+'\">' + lineNumber + '</a>';\n \n}", "def get_short_url(self):\r\n \"\"\" Returns short version of topic url (without page number) \"\"\"\r\n return reverse('post_short_url', args=(self.forum.slug, self.slug, self.id))" ]
[ 0.7953359484672546, 0.7889899015426636, 0.7214460968971252, 0.7140364646911621, 0.6838879585266113, 0.6834974884986877, 0.6769272685050964, 0.6733359098434448, 0.670393168926239, 0.6687145233154297, 0.6640609502792358, 0.6619897484779358 ]
Get the JSON stored on the usernotes wiki page. Returns a dict representation of the usernotes (with the notes BLOB decoded). Raises: RuntimeError if the usernotes version is incompatible with this version of puni.
def get_json(self): """Get the JSON stored on the usernotes wiki page. Returns a dict representation of the usernotes (with the notes BLOB decoded). Raises: RuntimeError if the usernotes version is incompatible with this version of puni. """ try: usernotes = self.subreddit.wiki[self.page_name].content_md notes = json.loads(usernotes) except NotFound: self._init_notes() else: if notes['ver'] != self.schema: raise RuntimeError( 'Usernotes schema is v{0}, puni requires v{1}'. format(notes['ver'], self.schema) ) self.cached_json = self._expand_json(notes) return self.cached_json
[ "def _expand_json(self, j):\n \"\"\"Decompress the BLOB portion of the usernotes.\n\n Arguments:\n j: the JSON returned from the wiki page (dict)\n\n Returns a Dict with the 'blob' key removed and a 'users' key added\n \"\"\"\n decompressed_json = copy.copy(j)\n decompressed_json.pop('blob', None) # Remove BLOB portion of JSON\n\n # Decode and decompress JSON\n compressed_data = base64.b64decode(j['blob'])\n original_json = zlib.decompress(compressed_data).decode('utf-8')\n\n decompressed_json['users'] = json.loads(original_json) # Insert users\n\n return decompressed_json", "def _init_notes(self):\n \"\"\"Set up the UserNotes page with the initial JSON schema.\"\"\"\n self.cached_json = {\n 'ver': self.schema,\n 'users': {},\n 'constants': {\n 'users': [x.name for x in self.subreddit.moderator()],\n 'warnings': Note.warnings\n }\n }\n\n self.set_json('Initializing JSON via puni', True)", "def set_json(self, reason='', new_page=False):\n \"\"\"Send the JSON from the cache to the usernotes wiki page.\n\n Arguments:\n reason: the change reason that will be posted to the wiki changelog\n (str)\n Raises:\n OverflowError if the new JSON data is greater than max_page_size\n \"\"\"\n compressed_json = json.dumps(self._compress_json(self.cached_json))\n\n if len(compressed_json) > self.max_page_size:\n raise OverflowError(\n 'Usernotes page is too large (>{0} characters)'.\n format(self.max_page_size)\n )\n\n if new_page:\n self.subreddit.wiki.create(\n self.page_name,\n compressed_json,\n reason\n )\n # Set the page as hidden and available to moderators only\n self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)\n else:\n self.subreddit.wiki[self.page_name].edit(\n compressed_json,\n reason\n )", "def get_notes(self, user):\n \"\"\"Return a list of Note objects for the given user.\n\n Return an empty list if no notes are found.\n\n Arguments:\n user: the user to search for in the usernotes (str)\n \"\"\"\n # Try to search for all notes on a user, return an empty list if none\n # are found.\n try:\n users_notes = []\n\n for note in self.cached_json['users'][user]['ns']:\n users_notes.append(Note(\n user=user,\n note=note['n'],\n subreddit=self.subreddit,\n mod=self._mod_from_index(note['m']),\n link=note['l'],\n warning=self._warning_from_index(note['w']),\n note_time=note['t']\n ))\n\n return users_notes\n except KeyError:\n # User not found\n return []", "def _compress_json(self, j):\n \"\"\"Compress the BLOB data portion of the usernotes.\n\n Arguments:\n j: the JSON in Schema v5 format (dict)\n\n Returns a dict with the 'users' key removed and 'blob' key added\n \"\"\"\n compressed_json = copy.copy(j)\n compressed_json.pop('users', None)\n\n compressed_data = zlib.compress(\n json.dumps(j['users']).encode('utf-8'),\n self.zlib_compression_strength\n )\n b64_data = base64.b64encode(compressed_data).decode('utf-8')\n\n compressed_json['blob'] = b64_data\n\n return compressed_json", "def add_note(self, note):\n \"\"\"Add a note to the usernotes wiki page.\n\n Arguments:\n note: the note to be added (Note)\n\n Returns the update message for the usernotes wiki\n\n Raises:\n ValueError when the warning type of the note can not be found in the\n stored list of warnings.\n \"\"\"\n notes = self.cached_json\n\n if not note.moderator:\n note.moderator = self.r.user.me().name\n\n # Get index of moderator in mod list from usernotes\n # Add moderator to list if not already there\n try:\n mod_index = notes['constants']['users'].index(note.moderator)\n except ValueError:\n notes['constants']['users'].append(note.moderator)\n mod_index = notes['constants']['users'].index(note.moderator)\n\n # Get index of warning type from warnings list\n # Add warning type to list if not already there\n try:\n warn_index = notes['constants']['warnings'].index(note.warning)\n except ValueError:\n if note.warning in Note.warnings:\n notes['constants']['warnings'].append(note.warning)\n warn_index = notes['constants']['warnings'].index(note.warning)\n else:\n raise ValueError('Warning type not valid: ' + note.warning)\n\n new_note = {\n 'n': note.note,\n 't': note.time,\n 'm': mod_index,\n 'l': note.link,\n 'w': warn_index\n }\n\n try:\n notes['users'][note.username]['ns'].insert(0, new_note)\n except KeyError:\n notes['users'][note.username] = {'ns': [new_note]}\n\n return '\"create new note on user {}\" via puni'.format(note.username)", "def get_notes(self):\n \"\"\"Return a list of all of the project's notes.\n\n :return: A list of notes.\n :rtype: list of :class:`pytodoist.todoist.Note`\n\n >>> from pytodoist import todoist\n >>> user = todoist.login('john.doe@gmail.com', 'password')\n >>> project = user.get_project('PyTodoist')\n >>> notes = project.get_notes()\n \"\"\"\n self.owner.sync()\n notes = self.owner.notes.values()\n return [n for n in notes if n.project_id == self.id]", "def get_notes(self, folderid=\"\", offset=0, limit=10):\n\n \"\"\"Fetch notes\n\n :param folderid: The UUID of the folder to fetch notes from\n :param offset: the pagination offset\n :param limit: the pagination limit\n \"\"\"\n\n if self.standard_grant_type is not \"authorization_code\":\n raise DeviantartError(\"Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.\")\n\n response = self._req('/notes', {\n 'folderid' : folderid,\n 'offset' : offset,\n 'limit' : limit\n })\n\n notes = []\n\n for item in response['results']:\n n = {}\n\n n['noteid'] = item['noteid']\n n['ts'] = item['ts']\n n['unread'] = item['unread']\n n['starred'] = item['starred']\n n['sent'] = item['sent']\n n['subject'] = item['subject']\n n['preview'] = item['preview']\n n['body'] = item['body']\n n['user'] = User()\n n['user'].from_dict(item['user'])\n n['recipients'] = []\n\n for recipient_item in item['recipients']:\n u = User()\n u.from_dict(recipient_item)\n n['recipients'].append(u)\n\n notes.append(n)\n\n return {\n \"results\" : notes,\n \"has_more\" : response['has_more'],\n \"next_offset\" : response['next_offset']\n }", "def note_versions(self, updater_id=None, post_id=None, note_id=None):\n \"\"\"Get list of note versions.\n\n Parameters:\n updater_id (int):\n post_id (int):\n note_id (int):\n \"\"\"\n params = {\n 'search[updater_id]': updater_id,\n 'search[post_id]': post_id,\n 'search[note_id]': note_id\n }\n return self._get('note_versions.json', params)", "def notes_for_version(self, version):\n \"\"\"Given the parsed Version of pants, return its release notes.\n\n TODO: This method should parse out the specific version from the resulting file:\n see https://github.com/pantsbuild/pants/issues/1708\n \"\"\"\n branch_name = self._branch_name(version)\n branch_notes_file = self._branch_notes.get(branch_name, None)\n if branch_notes_file is None:\n raise ValueError(\n 'Version {} lives in branch {}, which is not configured in {}.'.format(\n version, branch_name, self._branch_notes))\n return _read_contents(branch_notes_file)", "def note_list(self, body_matches=None, post_id=None, post_tags_match=None,\n creator_name=None, creator_id=None, is_active=None):\n \"\"\"Return list of notes.\n\n Parameters:\n body_matches (str): The note's body matches the given terms.\n post_id (int): A specific post.\n post_tags_match (str): The note's post's tags match the given terms.\n creator_name (str): The creator's name. Exact match.\n creator_id (int): The creator's user id.\n is_active (bool): Can be: True, False.\n \"\"\"\n params = {\n 'search[body_matches]': body_matches,\n 'search[post_id]': post_id,\n 'search[post_tags_match]': post_tags_match,\n 'search[creator_name]': creator_name,\n 'search[creator_id]': creator_id,\n 'search[is_active]': is_active\n }\n return self._get('notes.json', params)", "def notes(self):\n \"\"\"*list of the notes assoicated with this object*\n\n **Usage:**\n\n The document, project and task objects can all contain notes.\n\n .. code-block:: python\n\n docNotes = doc.notes\n projectNotes = aProject.notes\n taskNotes = aTask.notes\n \"\"\"\n return self._get_object(\n regex=re.compile(\n r'((?<=\\n)|(?<=^))(?P<title>\\S(?<!-)((?!(: +@|: *\\n|: *$)).)*)\\s*?(\\n|$)(?P<tagString>&&&)?(?P<content>&&&)?', re.UNICODE),\n objectType=\"note\",\n content=None\n )" ]
[ 0.7596529722213745, 0.7476683855056763, 0.7053613066673279, 0.7012403607368469, 0.7008568048477173, 0.6941919326782227, 0.6872028708457947, 0.6803351640701294, 0.6761101484298706, 0.6668883562088013, 0.6591576933860779, 0.6588183641433716 ]
Set up the UserNotes page with the initial JSON schema.
def _init_notes(self): """Set up the UserNotes page with the initial JSON schema.""" self.cached_json = { 'ver': self.schema, 'users': {}, 'constants': { 'users': [x.name for x in self.subreddit.moderator()], 'warnings': Note.warnings } } self.set_json('Initializing JSON via puni', True)
[ "public function up()\n {\n Schema::table('user_settings', function (Blueprint $table) {\n $table->longText('notes')->nullable()->default(null)->comment('User notes')->change();\n });\n }", "def set_json(self, reason='', new_page=False):\n \"\"\"Send the JSON from the cache to the usernotes wiki page.\n\n Arguments:\n reason: the change reason that will be posted to the wiki changelog\n (str)\n Raises:\n OverflowError if the new JSON data is greater than max_page_size\n \"\"\"\n compressed_json = json.dumps(self._compress_json(self.cached_json))\n\n if len(compressed_json) > self.max_page_size:\n raise OverflowError(\n 'Usernotes page is too large (>{0} characters)'.\n format(self.max_page_size)\n )\n\n if new_page:\n self.subreddit.wiki.create(\n self.page_name,\n compressed_json,\n reason\n )\n # Set the page as hidden and available to moderators only\n self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)\n else:\n self.subreddit.wiki[self.page_name].edit(\n compressed_json,\n reason\n )", "def get_json(self):\n \"\"\"Get the JSON stored on the usernotes wiki page.\n\n Returns a dict representation of the usernotes (with the notes BLOB\n decoded).\n\n Raises:\n RuntimeError if the usernotes version is incompatible with this\n version of puni.\n \"\"\"\n try:\n usernotes = self.subreddit.wiki[self.page_name].content_md\n notes = json.loads(usernotes)\n except NotFound:\n self._init_notes()\n else:\n if notes['ver'] != self.schema:\n raise RuntimeError(\n 'Usernotes schema is v{0}, puni requires v{1}'.\n format(notes['ver'], self.schema)\n )\n\n self.cached_json = self._expand_json(notes)\n\n return self.cached_json", "def _sync_notes(self, notes_json):\n \"\"\"\"Populate the user's notes from a JSON encoded list.\"\"\"\n for note_json in notes_json:\n note_id = note_json['id']\n task_id = note_json['item_id']\n if task_id not in self.tasks:\n # ignore orphan notes\n continue\n task = self.tasks[task_id]\n self.notes[note_id] = Note(note_json, task)", "function UserStory (params) {\n var self = this\n if (!(self instanceof UserStory)) return new UserStory(params)\n\n var extras = params['story_extras']\n var storyNotes = params['story_notes']\n\n self.screenshotCount = extras['screenshot_count'] | 0\n self.viewCount = extras['view_count'] | 0\n\n self.notes = (storyNotes || [ ]).map(function (note) {\n return new StoryNote(note)\n })\n}", "function(json){\n \n // Protect against forgetting the new keyword when calling the constructor\n if(!(this instanceof Note)){\n return new Note(json);\n }\n \n // If the given object is already an instance then just return it. DON'T copy it.\n if(Note.isInstance(json)){\n return json;\n }\n \n this.init(json);\n}", "def setup_users_page(self, ):\n \"\"\"Create and set the model on the users page\n\n :returns: None\n :rtype: None\n :raises: None\n \"\"\"\n self.users_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)\n log.debug(\"Loading users for users page.\")\n rootdata = treemodel.ListItemData(['Username', 'First', 'Last', 'Email'])\n rootitem = treemodel.TreeItem(rootdata)\n users = djadapter.users.all()\n for usr in users:\n usrdata = djitemdata.UserItemData(usr)\n treemodel.TreeItem(usrdata, rootitem)\n self.users_model = treemodel.TreeModel(rootitem)\n self.users_tablev.setModel(self.users_model)", "public function jobPopulateNotesTeamUser()\n {\n $aclManager = $this->getInjection('container')->get('aclManager');\n\n $sql = $this->getEntityManager()->getQuery()->createSelectQuery('Note', [\n 'whereClause' => [\n 'parentId!=' => null,\n 'type=' => ['Relate', 'CreateRelated', 'EmailReceived', 'EmailSent', 'Assign', 'Create'],\n ],\n 'limit' => 100000,\n 'orderBy' => [['number', 'DESC']]\n ]);\n $sth = $this->getEntityManager()->getPdo()->prepare($sql);\n $sth->execute();\n\n $i = 0;\n while ($dataRow = $sth->fetch(\\PDO::FETCH_ASSOC)) {\n $i++;\n $note = $this->getEntityManager()->getEntityFactory()->create('Note');\n $note->set($dataRow);\n $note->setAsFetched();\n\n if ($note->get('relatedId') && $note->get('relatedType')) {\n $targetType = $note->get('relatedType');\n $targetId = $note->get('relatedId');\n } else if ($note->get('parentId') && $note->get('parentType')) {\n $targetType = $note->get('parentType');\n $targetId = $note->get('parentId');\n } else {\n continue;\n }\n\n if (!$this->getEntityManager()->hasRepository($targetType)) continue;\n\n try {\n $entity = $this->getEntityManager()->getEntity($targetType, $targetId);\n if (!$entity) continue;\n $ownerUserIdAttribute = $aclManager->getImplementation($targetType)->getOwnerUserIdAttribute($entity);\n $toSave = false;\n if ($ownerUserIdAttribute) {\n if ($entity->getAttributeParam($ownerUserIdAttribute, 'isLinkMultipleIdList')) {\n $link = $entity->getAttributeParam($ownerUserIdAttribute, 'relation');\n $userIdList = $entity->getLinkMultipleIdList($link);\n } else {\n $userId = $entity->get($ownerUserIdAttribute);\n if ($userId) {\n $userIdList = [$userId];\n } else {\n $userIdList = [];\n }\n }\n if (!empty($userIdList)) {\n $note->set('usersIds', $userIdList);\n $toSave = true;\n }\n }\n if ($entity->hasLinkMultipleField('teams')) {\n $teamIdList = $entity->getLinkMultipleIdList('teams');\n if (!empty($teamIdList)) {\n $note->set('teamsIds', $teamIdList);\n $toSave = true;\n }\n }\n if ($toSave) {\n $this->getEntityManager()->saveEntity($note);\n }\n } catch (\\Exception $e) {}\n }\n }", "def add_note(self, note):\n \"\"\"Add a note to the usernotes wiki page.\n\n Arguments:\n note: the note to be added (Note)\n\n Returns the update message for the usernotes wiki\n\n Raises:\n ValueError when the warning type of the note can not be found in the\n stored list of warnings.\n \"\"\"\n notes = self.cached_json\n\n if not note.moderator:\n note.moderator = self.r.user.me().name\n\n # Get index of moderator in mod list from usernotes\n # Add moderator to list if not already there\n try:\n mod_index = notes['constants']['users'].index(note.moderator)\n except ValueError:\n notes['constants']['users'].append(note.moderator)\n mod_index = notes['constants']['users'].index(note.moderator)\n\n # Get index of warning type from warnings list\n # Add warning type to list if not already there\n try:\n warn_index = notes['constants']['warnings'].index(note.warning)\n except ValueError:\n if note.warning in Note.warnings:\n notes['constants']['warnings'].append(note.warning)\n warn_index = notes['constants']['warnings'].index(note.warning)\n else:\n raise ValueError('Warning type not valid: ' + note.warning)\n\n new_note = {\n 'n': note.note,\n 't': note.time,\n 'm': mod_index,\n 'l': note.link,\n 'w': warn_index\n }\n\n try:\n notes['users'][note.username]['ns'].insert(0, new_note)\n except KeyError:\n notes['users'][note.username] = {'ns': [new_note]}\n\n return '\"create new note on user {}\" via puni'.format(note.username)", "function StoryNote (params) {\n var self = this\n if (!(self instanceof StoryNote)) return new StoryNote(params)\n\n // who viewed the story\n self.viewer = params['viewer']\n\n // when the story was viewed by the viewer\n self.viewDate = new Date(+params['timestamp'])\n\n // whether or not the viewer took a screenshot of the story\n self.screenshot = !!params['screenshotted']\n\n // obscure data. not sure what is's for but it has the following format:\n // {\n // mField : \"123456.023Z\"\n // mId : \"username~unixtime\"\n // mKey : \"story:{username}:YYYYMMDD\"\n // }\n self.storyPointer = params['storypointer']\n}", "public function up()\r\n {\r\n\r\n // --------------------------------\r\n // Table: notes\r\n // --------------------------------\r\n Schema::create(config('combine-core.tablePrefix', 'combine') . '_notes', function (Blueprint $table) {\r\n $table->uuid('id');\r\n $table->json('activity')->nullable();\r\n $table->string('content', 1024);\r\n $table->uuid('noteable_id')->index();\r\n $table->char('noteable_type', 36);\r\n // --------------------------------\r\n // Timestamps\r\n // --------------------------------\r\n $table->timestamps();\r\n $table->softDeletes();\r\n // --------------------------------\r\n // Indexes\r\n // --------------------------------\r\n $table->primary('id');\r\n });\r\n }", "def notes=(new_notes)\n raise ArgumentError, \"The new notes cannot be nil\" unless new_notes\n\n edit_template = {\n \"notes\" => new_notes\n }\n\n self.service.editObject(edit_template)\n self.refresh_details()\n end" ]
[ 0.7966107130050659, 0.7526082992553711, 0.7520560026168823, 0.7177902460098267, 0.7156883478164673, 0.7048381567001343, 0.7048107981681824, 0.700613796710968, 0.700387179851532, 0.689842939376831, 0.6890769600868225, 0.6837928295135498 ]
Send the JSON from the cache to the usernotes wiki page. Arguments: reason: the change reason that will be posted to the wiki changelog (str) Raises: OverflowError if the new JSON data is greater than max_page_size
def set_json(self, reason='', new_page=False): """Send the JSON from the cache to the usernotes wiki page. Arguments: reason: the change reason that will be posted to the wiki changelog (str) Raises: OverflowError if the new JSON data is greater than max_page_size """ compressed_json = json.dumps(self._compress_json(self.cached_json)) if len(compressed_json) > self.max_page_size: raise OverflowError( 'Usernotes page is too large (>{0} characters)'. format(self.max_page_size) ) if new_page: self.subreddit.wiki.create( self.page_name, compressed_json, reason ) # Set the page as hidden and available to moderators only self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2) else: self.subreddit.wiki[self.page_name].edit( compressed_json, reason )
[ "def get_json(self):\n \"\"\"Get the JSON stored on the usernotes wiki page.\n\n Returns a dict representation of the usernotes (with the notes BLOB\n decoded).\n\n Raises:\n RuntimeError if the usernotes version is incompatible with this\n version of puni.\n \"\"\"\n try:\n usernotes = self.subreddit.wiki[self.page_name].content_md\n notes = json.loads(usernotes)\n except NotFound:\n self._init_notes()\n else:\n if notes['ver'] != self.schema:\n raise RuntimeError(\n 'Usernotes schema is v{0}, puni requires v{1}'.\n format(notes['ver'], self.schema)\n )\n\n self.cached_json = self._expand_json(notes)\n\n return self.cached_json", "def report(self, reason=None):\n \"\"\"Report this object to the moderators.\n\n :param reason: The user-supplied reason for reporting a comment\n or submission. Default: None (blank reason)\n :returns: The json response from the server.\n\n \"\"\"\n url = self.reddit_session.config['report']\n data = {'id': self.fullname}\n if reason:\n data['reason'] = reason\n response = self.reddit_session.request_json(url, data=data)\n # Reported objects are automatically hidden as well\n # pylint: disable=W0212\n self.reddit_session.evict(\n [self.reddit_session.config['user'],\n urljoin(self.reddit_session.user._url, 'hidden')])\n # pylint: enable=W0212\n return response", "def write(self):\n \"\"\"\n Create or update a Wiki Page on Assembla\n \"\"\"\n if not hasattr(self, 'space'):\n raise AttributeError(\"A WikiPage must have a 'space' attribute before you can write it to Assembla.\")\n\n self.api = self.space.api\n\n if self.get('id'): # We are modifying an existing wiki page\n return self.api._put_json(\n self,\n space=self.space,\n rel_path=self.space._build_rel_path('wiki_pages'),\n id_field='id'\n )\n else: # Creating a new wiki page\n return self.api._post_json(\n self,\n space=self.space,\n rel_path=self.space._build_rel_path('wiki_pages'),\n )", "def add_note(self, note):\n \"\"\"Add a note to the usernotes wiki page.\n\n Arguments:\n note: the note to be added (Note)\n\n Returns the update message for the usernotes wiki\n\n Raises:\n ValueError when the warning type of the note can not be found in the\n stored list of warnings.\n \"\"\"\n notes = self.cached_json\n\n if not note.moderator:\n note.moderator = self.r.user.me().name\n\n # Get index of moderator in mod list from usernotes\n # Add moderator to list if not already there\n try:\n mod_index = notes['constants']['users'].index(note.moderator)\n except ValueError:\n notes['constants']['users'].append(note.moderator)\n mod_index = notes['constants']['users'].index(note.moderator)\n\n # Get index of warning type from warnings list\n # Add warning type to list if not already there\n try:\n warn_index = notes['constants']['warnings'].index(note.warning)\n except ValueError:\n if note.warning in Note.warnings:\n notes['constants']['warnings'].append(note.warning)\n warn_index = notes['constants']['warnings'].index(note.warning)\n else:\n raise ValueError('Warning type not valid: ' + note.warning)\n\n new_note = {\n 'n': note.note,\n 't': note.time,\n 'm': mod_index,\n 'l': note.link,\n 'w': warn_index\n }\n\n try:\n notes['users'][note.username]['ns'].insert(0, new_note)\n except KeyError:\n notes['users'][note.username] = {'ns': [new_note]}\n\n return '\"create new note on user {}\" via puni'.format(note.username)", "def _expand_json(self, j):\n \"\"\"Decompress the BLOB portion of the usernotes.\n\n Arguments:\n j: the JSON returned from the wiki page (dict)\n\n Returns a Dict with the 'blob' key removed and a 'users' key added\n \"\"\"\n decompressed_json = copy.copy(j)\n decompressed_json.pop('blob', None) # Remove BLOB portion of JSON\n\n # Decode and decompress JSON\n compressed_data = base64.b64decode(j['blob'])\n original_json = zlib.decompress(compressed_data).decode('utf-8')\n\n decompressed_json['users'] = json.loads(original_json) # Insert users\n\n return decompressed_json", "def _init_notes(self):\n \"\"\"Set up the UserNotes page with the initial JSON schema.\"\"\"\n self.cached_json = {\n 'ver': self.schema,\n 'users': {},\n 'constants': {\n 'users': [x.name for x in self.subreddit.moderator()],\n 'warnings': Note.warnings\n }\n }\n\n self.set_json('Initializing JSON via puni', True)", "def edit_wiki_page(self, subreddit, page, content, reason=''):\n \"\"\"Create or edit a wiki page with title `page` for `subreddit`.\n\n :returns: The json response from the server.\n\n \"\"\"\n data = {'content': content,\n 'page': page,\n 'r': six.text_type(subreddit),\n 'reason': reason}\n evict = self.config['wiki_page'].format(\n subreddit=six.text_type(subreddit), page=page.lower())\n self.evict(evict)\n return self.request_json(self.config['wiki_edit'], data=data)", "def _log_action(self):\n \"\"\"\n Adds a log entry for this action to the object history in the Django admin.\n \"\"\"\n if self.publish_version == self.UNPUBLISH_CHOICE:\n message = 'Unpublished page (scheduled)'\n else:\n message = 'Published version {} (scheduled)'.format(self.publish_version)\n\n LogEntry.objects.log_action(\n user_id=self.user.pk,\n content_type_id=self.content_type.pk,\n object_id=self.object_id,\n object_repr=force_text(self.content_object),\n action_flag=CHANGE,\n change_message=message\n )", "def create_and_update_from_json_data(d, user):\n \"\"\"\n Create or update page based on python dict d loaded from JSON data.\n This applies all data except for redirect_to, which is done in a\n second pass after all pages have been imported,\n\n user is the User instance that will be used if the author can't\n be found in the DB.\n\n returns (page object, created, messages).\n\n created is True if this was a new page or False if an existing page\n was updated.\n\n messages is a list of strings warnings/messages about this import\n \"\"\"\n page = None\n parent = None\n parent_required = True\n created = False\n messages = []\n\n page_languages = set(lang[0] for lang in settings.PAGE_LANGUAGES)\n\n for lang, s in list(d['complete_slug'].items()):\n if lang not in page_languages:\n messages.append(_(\"Language '%s' not imported\") % (lang,))\n continue\n\n page = Page.objects.from_path(s, lang, exclude_drafts=False)\n if page and page.get_complete_slug(lang) == s:\n break\n if parent_required and parent is None:\n if '/' in s:\n parent = Page.objects.from_path(s.rsplit('/', 1)[0], lang,\n exclude_drafts=False)\n else:\n parent_required = False\n else:\n # can't find an existing match, need to create a new Page\n page = Page(parent=parent)\n created = True\n\n user_model = get_user_model()\n\n def custom_get_user_by_email(email):\n \"\"\"\n Simplified version\n \"\"\"\n return user_model.objects.get(email=email)\n\n try:\n page.author = custom_get_user_by_email(d['author_email'])\n except (user_model.DoesNotExist, user_model.MultipleObjectsReturned):\n page.author = user\n messages.append(_(\"Original author '%s' not found\")\n % (d['author_email'],))\n\n page.creation_date = datetime.strptime(d['creation_date'],\n ISODATE_FORMAT)\n page.publication_date = datetime.strptime(d['publication_date'],\n ISODATE_FORMAT) if d['publication_date'] else None\n page.publication_end_date = datetime.strptime(d['publication_end_date'],\n ISODATE_FORMAT) if d['publication_end_date'] else None\n page.last_modification_date = datetime.strptime(\n d['last_modification_date'], ISODATE_FORMAT)\n page.status = {\n 'published': Page.PUBLISHED,\n 'hidden': Page.HIDDEN,\n 'draft': Page.DRAFT,\n }[d['status']]\n page.template = d['template']\n page.redirect_to_url = d['redirect_to_url']\n\n page.save()\n\n # Add tags\n if settings.PAGE_TAGGING:\n from taggit.models import Tag\n tags = d.get('tags', [])\n page.tags.clear()\n if tags:\n for tag in tags:\n Tag.objects.get_or_create(name=tag)\n page.tags.add(tag)\n page.save()\n\n if settings.PAGE_USE_SITE_ID:\n if d['sites']:\n for site in d['sites']:\n try:\n page.sites.add(Site.objects.get(domain=site))\n except Site.DoesNotExist:\n messages.append(_(\"Could not add site '%s' to page\")\n % (site,))\n if not settings.PAGE_HIDE_SITES and not page.sites.count():\n # need at least one site\n page.sites.add(Site.objects.get(pk=global_settings.SITE_ID))\n\n def create_content(lang, ctype, body):\n Content.objects.create_content_if_changed(page, lang, ctype, body)\n\n for lang in d['content_language_updated_order']:\n if lang not in page_languages:\n continue\n create_content(lang, 'slug',\n d['complete_slug'][lang].rsplit('/', 1)[-1])\n create_content(lang, 'title', d['title'][lang])\n for ctype, langs_bodies in list(d['content'].items()):\n create_content(lang, ctype, langs_bodies[lang])\n\n return page, created, messages", "def report reason, thing_id\n Module.nesting[1].logger.warn \"reporting '#{thing_id}'\"\n json :post, \"/api/report\",\n reason: \"other\",\n other_reason: reason,\n thing_id: thing_id\n end", "def save_note(self, note_text, patient_id,\n document_type,\n document_status='Unsigned', wrapped_in_rtf='N'):\n \"\"\"\n invokes TouchWorksMagicConstants.ACTION_SAVE_NOTE action\n :return: JSON response\n \"\"\"\n allowed_document_status = ['Unsigned', 'Final']\n if document_status not in ['Unsigned', 'Final']:\n raise ValueError('document_status was invalid. allowed values are %s' %\n allowed_document_status)\n magic = self._magic_json(action=TouchWorksMagicConstants.ACTION_SAVE_NOTE,\n patient_id=patient_id,\n parameter1=note_text,\n parameter2=document_type,\n parameter3=document_status,\n parameter4=wrapped_in_rtf)\n response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)\n result = self._get_results_or_raise_if_magic_invalid(\n magic,\n response,\n TouchWorksMagicConstants.RESULT_SAVE_NOTE)\n return result", "def wiki_update(self, page_id, title=None, body=None,\n other_names=None, is_locked=None, is_deleted=None):\n \"\"\"Action to lets you update a wiki page (Requires login) (UNTESTED).\n\n Parameters:\n page_id (int): Whre page_id is the wiki page id.\n title (str): Page title.\n body (str): Page content.\n other_names (str): Other names.\n is_locked (int): Can be: 0, 1 (Builder+).\n is_deleted (int): Can be: 0, 1 (Builder+).\n \"\"\"\n params = {\n 'wiki_page[title]': title,\n 'wiki_page[body]': body,\n 'wiki_page[other_names]': other_names\n }\n return self._get('wiki_pages/{0}.json'.format(page_id), params,\n method='PUT', auth=True)" ]
[ 0.7193440198898315, 0.6912767291069031, 0.6646159291267395, 0.6640122532844543, 0.6605554223060608, 0.6513100862503052, 0.6420649290084839, 0.6415596604347229, 0.6386433243751526, 0.6373923420906067, 0.6370277404785156, 0.6363284587860107 ]
Return a list of Note objects for the given user. Return an empty list if no notes are found. Arguments: user: the user to search for in the usernotes (str)
def get_notes(self, user): """Return a list of Note objects for the given user. Return an empty list if no notes are found. Arguments: user: the user to search for in the usernotes (str) """ # Try to search for all notes on a user, return an empty list if none # are found. try: users_notes = [] for note in self.cached_json['users'][user]['ns']: users_notes.append(Note( user=user, note=note['n'], subreddit=self.subreddit, mod=self._mod_from_index(note['m']), link=note['l'], warning=self._warning_from_index(note['w']), note_time=note['t'] )) return users_notes except KeyError: # User not found return []
[ "def get_notes(self):\n \"\"\"Return a list of all of the project's notes.\n\n :return: A list of notes.\n :rtype: list of :class:`pytodoist.todoist.Note`\n\n >>> from pytodoist import todoist\n >>> user = todoist.login('john.doe@gmail.com', 'password')\n >>> project = user.get_project('PyTodoist')\n >>> notes = project.get_notes()\n \"\"\"\n self.owner.sync()\n notes = self.owner.notes.values()\n return [n for n in notes if n.project_id == self.id]", "def get_notifications(user):\n '''List notification for a given user'''\n notifications = []\n\n for name, func in _providers.items():\n notifications.extend([{\n 'type': name,\n 'created_on': dt,\n 'details': details\n } for dt, details in func(user)])\n\n return notifications", "def get_notes(self, folderid=\"\", offset=0, limit=10):\n\n \"\"\"Fetch notes\n\n :param folderid: The UUID of the folder to fetch notes from\n :param offset: the pagination offset\n :param limit: the pagination limit\n \"\"\"\n\n if self.standard_grant_type is not \"authorization_code\":\n raise DeviantartError(\"Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.\")\n\n response = self._req('/notes', {\n 'folderid' : folderid,\n 'offset' : offset,\n 'limit' : limit\n })\n\n notes = []\n\n for item in response['results']:\n n = {}\n\n n['noteid'] = item['noteid']\n n['ts'] = item['ts']\n n['unread'] = item['unread']\n n['starred'] = item['starred']\n n['sent'] = item['sent']\n n['subject'] = item['subject']\n n['preview'] = item['preview']\n n['body'] = item['body']\n n['user'] = User()\n n['user'].from_dict(item['user'])\n n['recipients'] = []\n\n for recipient_item in item['recipients']:\n u = User()\n u.from_dict(recipient_item)\n n['recipients'].append(u)\n\n notes.append(n)\n\n return {\n \"results\" : notes,\n \"has_more\" : response['has_more'],\n \"next_offset\" : response['next_offset']\n }", "def note_list(self, body_matches=None, post_id=None, post_tags_match=None,\n creator_name=None, creator_id=None, is_active=None):\n \"\"\"Return list of notes.\n\n Parameters:\n body_matches (str): The note's body matches the given terms.\n post_id (int): A specific post.\n post_tags_match (str): The note's post's tags match the given terms.\n creator_name (str): The creator's name. Exact match.\n creator_id (int): The creator's user id.\n is_active (bool): Can be: True, False.\n \"\"\"\n params = {\n 'search[body_matches]': body_matches,\n 'search[post_id]': post_id,\n 'search[post_tags_match]': post_tags_match,\n 'search[creator_name]': creator_name,\n 'search[creator_id]': creator_id,\n 'search[is_active]': is_active\n }\n return self._get('notes.json', params)", "def remove_note(self, username, index):\n \"\"\"Remove a single usernote from the usernotes.\n\n Arguments:\n username: the user that for whom you're removing a note (str)\n index: the index of the note which is to be removed (int)\n\n Returns the update message for the usernotes wiki\n \"\"\"\n self.cached_json['users'][username]['ns'].pop(index)\n\n # Go ahead and remove the user's entry if they have no more notes left\n if len(self.cached_json['users'][username]['ns']) == 0:\n del self.cached_json['users'][username]\n\n return '\"delete note #{} on user {}\" via puni'.format(index, username)", "def get_notes(self):\n \"\"\"Return all notes attached to this Task.\n\n :return: A list of all notes attached to this Task.\n :rtype: list of :class:`pytodoist.todoist.Note`\n\n >>> from pytodoist import todoist\n >>> user = todoist.login('john.doe@gmail.com', 'password')\n >>> project = user.get_project('PyTodoist')\n >>> task = project.add_task('Install PyTodoist.')\n >>> task.add_note('https://pypi.python.org/pypi')\n >>> notes = task.get_notes()\n >>> print(len(notes))\n 1\n \"\"\"\n owner = self.project.owner\n owner.sync()\n return [n for n in owner.notes.values() if n.item_id == self.id]", "def get_contact_notes(\n self,\n obj_id,\n search='',\n start=0,\n limit=0,\n order_by='',\n order_by_dir='ASC'\n ):\n \"\"\"\n Get a list of a contact's notes\n\n :param obj_id: int Contact ID\n :param search: str\n :param start: int\n :param limit: int\n :param order_by: str\n :param order_by_dir: str\n :return: dict|str\n \"\"\"\n\n parameters = {\n 'search': search,\n 'start': start,\n 'limit': limit,\n 'orderBy': order_by,\n 'orderByDir': order_by_dir,\n }\n response = self._client.session.get(\n '{url}/{id}/notes'.format(\n url=self.endpoint_url, id=obj_id\n ),\n params=parameters\n )\n return self.process_response(response)", "def notes(self):\n \"\"\"*list of the notes assoicated with this object*\n\n **Usage:**\n\n The document, project and task objects can all contain notes.\n\n .. code-block:: python\n\n docNotes = doc.notes\n projectNotes = aProject.notes\n taskNotes = aTask.notes\n \"\"\"\n return self._get_object(\n regex=re.compile(\n r'((?<=\\n)|(?<=^))(?P<title>\\S(?<!-)((?!(: +@|: *\\n|: *$)).)*)\\s*?(\\n|$)(?P<tagString>&&&)?(?P<content>&&&)?', re.UNICODE),\n objectType=\"note\",\n content=None\n )", "def ReadUserNotifications(self,\n username,\n state=None,\n timerange=None,\n cursor=None):\n \"\"\"Reads notifications scheduled for a user within a given timerange.\"\"\"\n\n query = (\"SELECT UNIX_TIMESTAMP(timestamp), \"\n \" notification_state, notification \"\n \"FROM user_notification \"\n \"WHERE username_hash = %s \")\n args = [mysql_utils.Hash(username)]\n\n if state is not None:\n query += \"AND notification_state = %s \"\n args.append(int(state))\n\n if timerange is not None:\n time_from, time_to = timerange # pylint: disable=unpacking-non-sequence\n\n if time_from is not None:\n query += \"AND timestamp >= FROM_UNIXTIME(%s) \"\n args.append(mysql_utils.RDFDatetimeToTimestamp(time_from))\n\n if time_to is not None:\n query += \"AND timestamp <= FROM_UNIXTIME(%s) \"\n args.append(mysql_utils.RDFDatetimeToTimestamp(time_to))\n\n query += \"ORDER BY timestamp DESC \"\n\n ret = []\n cursor.execute(query, args)\n\n for timestamp, state, notification_ser in cursor.fetchall():\n n = rdf_objects.UserNotification.FromSerializedString(notification_ser)\n n.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp)\n n.state = state\n ret.append(n)\n\n return ret", "def full_author_notes(soup, fntype_filter=None):\n \"\"\"\n Find the fn tags included in author-notes\n \"\"\"\n notes = []\n\n author_notes_section = raw_parser.author_notes(soup)\n if author_notes_section:\n fn_nodes = raw_parser.fn(author_notes_section)\n notes = footnotes(fn_nodes, fntype_filter)\n\n return notes", "def find_recent(self, nrecent=4):\n '''Find recent non-trashed notes'''\n try:\n rows = self.cur.execute(\"SELECT noteId FROM note WHERE book > 0 ORDER BY date DESC LIMIT %d;\"%nrecent).fetchall()\n except:\n self.error(\"nota.find_recent() cannot look up note list\")\n # Possibly save time by finding IDs first.\n noteIds = []\n for r in rows:\n noteIds.append(r[0],)\n self.fyi(\"noteIds: %s\" % noteIds)\n rval = []\n for n in noteIds:\n note = None\n try:\n note = self.cur.execute(\"SELECT noteId, date, title, content, hash, book FROM note WHERE noteId = ?;\", [n]).fetchone()\n except:\n self.warning(\"Problem extracting note %s from database for recent-list\" % n)\n next\n if note:\n keywordIds = []\n keywordIds.extend(self.con.execute(\"SELECT keywordid FROM notekeyword WHERE notekeyword.noteid=?;\", [n]))\n keywords = []\n for k in keywordIds:\n keywords.append(self.cur.execute(\"SELECT keyword FROM keyword WHERE keywordId=?;\", k).fetchone()[0])\n rval.append({\"noteId\":note[0], \"date\":note[1], \"title\":note[2], \"keywords\":keywords,\n \"content\":note[3], \"hash\":note[4], \"book\":note[5]})\n return rval", "def GetNotifications(user=None, token=None):\n \"\"\"Show pending notifications for a user.\"\"\"\n if not user:\n user = getpass.getuser()\n user_obj = aff4.FACTORY.Open(\n aff4.ROOT_URN.Add(\"users\").Add(user), token=token)\n return list(user_obj.Get(user_obj.Schema.PENDING_NOTIFICATIONS))" ]
[ 0.7208589911460876, 0.7078884840011597, 0.7056156992912292, 0.7021738290786743, 0.6830140948295593, 0.6799189448356628, 0.6790181994438171, 0.6784870028495789, 0.6749463081359863, 0.6740153431892395, 0.6733935475349426, 0.6706024408340454 ]
Decompress the BLOB portion of the usernotes. Arguments: j: the JSON returned from the wiki page (dict) Returns a Dict with the 'blob' key removed and a 'users' key added
def _expand_json(self, j): """Decompress the BLOB portion of the usernotes. Arguments: j: the JSON returned from the wiki page (dict) Returns a Dict with the 'blob' key removed and a 'users' key added """ decompressed_json = copy.copy(j) decompressed_json.pop('blob', None) # Remove BLOB portion of JSON # Decode and decompress JSON compressed_data = base64.b64decode(j['blob']) original_json = zlib.decompress(compressed_data).decode('utf-8') decompressed_json['users'] = json.loads(original_json) # Insert users return decompressed_json
[ "def _compress_json(self, j):\n \"\"\"Compress the BLOB data portion of the usernotes.\n\n Arguments:\n j: the JSON in Schema v5 format (dict)\n\n Returns a dict with the 'users' key removed and 'blob' key added\n \"\"\"\n compressed_json = copy.copy(j)\n compressed_json.pop('users', None)\n\n compressed_data = zlib.compress(\n json.dumps(j['users']).encode('utf-8'),\n self.zlib_compression_strength\n )\n b64_data = base64.b64encode(compressed_data).decode('utf-8')\n\n compressed_json['blob'] = b64_data\n\n return compressed_json", "def get_json(self):\n \"\"\"Get the JSON stored on the usernotes wiki page.\n\n Returns a dict representation of the usernotes (with the notes BLOB\n decoded).\n\n Raises:\n RuntimeError if the usernotes version is incompatible with this\n version of puni.\n \"\"\"\n try:\n usernotes = self.subreddit.wiki[self.page_name].content_md\n notes = json.loads(usernotes)\n except NotFound:\n self._init_notes()\n else:\n if notes['ver'] != self.schema:\n raise RuntimeError(\n 'Usernotes schema is v{0}, puni requires v{1}'.\n format(notes['ver'], self.schema)\n )\n\n self.cached_json = self._expand_json(notes)\n\n return self.cached_json", "def reducejson(j):\n \"\"\" \n Not sure if there's a better way to walk the ... interesting result\n \"\"\"\n\n authors = []\n\n for key in j[\"data\"][\"repository\"][\"commitComments\"][\"edges\"]:\n authors.append(key[\"node\"][\"author\"])\n\n for key in j[\"data\"][\"repository\"][\"issues\"][\"nodes\"]:\n authors.append(key[\"author\"])\n for c in key[\"comments\"][\"nodes\"]:\n authors.append(c[\"author\"])\n \n for key in j[\"data\"][\"repository\"][\"pullRequests\"][\"edges\"]:\n authors.append(key[\"node\"][\"author\"])\n for c in key[\"node\"][\"comments\"][\"nodes\"]:\n authors.append(c[\"author\"])\n\n unique = list({v['login']:v for v in authors if v is not None}.values())\n return unique", "def normalize_jr(jr, url=None):\n \"\"\" normalize JSON reference, also fix\n implicit reference of JSON pointer.\n input:\n - #/definitions/User\n - http://test.com/swagger.json#/definitions/User\n output:\n - http://test.com/swagger.json#/definitions/User\n\n input:\n - some_folder/User.json\n output:\n - http://test.com/some_folder/User.json\n \"\"\"\n\n if jr == None:\n return jr\n\n idx = jr.find('#')\n path, jp = (jr[:idx], jr[idx+1:]) if idx != -1 else (jr, None)\n\n if len(path) > 0:\n p = six.moves.urllib.parse.urlparse(path)\n if p.scheme == '' and url:\n p = six.moves.urllib.parse.urlparse(url)\n # it's the path of relative file\n path = six.moves.urllib.parse.urlunparse(p[:2]+('/'.join([os.path.dirname(p.path), path]),)+p[3:])\n path = derelativise_url(path)\n else:\n path = url\n\n if path:\n return ''.join([path, '#', jp]) if jp else path\n else:\n return '#' + jp", "def set_json(self, reason='', new_page=False):\n \"\"\"Send the JSON from the cache to the usernotes wiki page.\n\n Arguments:\n reason: the change reason that will be posted to the wiki changelog\n (str)\n Raises:\n OverflowError if the new JSON data is greater than max_page_size\n \"\"\"\n compressed_json = json.dumps(self._compress_json(self.cached_json))\n\n if len(compressed_json) > self.max_page_size:\n raise OverflowError(\n 'Usernotes page is too large (>{0} characters)'.\n format(self.max_page_size)\n )\n\n if new_page:\n self.subreddit.wiki.create(\n self.page_name,\n compressed_json,\n reason\n )\n # Set the page as hidden and available to moderators only\n self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)\n else:\n self.subreddit.wiki[self.page_name].edit(\n compressed_json,\n reason\n )", "def parse_journal(journal):\n \"\"\"Parses the USN Journal content removing duplicates\n and corrupted records.\n\n \"\"\"\n events = [e for e in journal if not isinstance(e, CorruptedUsnRecord)]\n keyfunc = lambda e: str(e.file_reference_number) + e.file_name + e.timestamp\n event_groups = (tuple(g) for k, g in groupby(events, key=keyfunc))\n\n if len(events) < len(list(journal)):\n LOGGER.debug(\n \"Corrupted records in UsnJrnl, some events might be missing.\")\n\n return [journal_event(g) for g in event_groups]", "def collapseuser(path):\n \"\"\"If path begins with the home directory, replaces the start of the path with \"~/\". Essentially the reverse of os.path.expanduser()\"\"\"\n home = os.path.join(os.path.expanduser(\"~\"), \"\")\n if path.startswith(home):\n path = os.path.join(\"~\", path[len(home):])\n return path", "def expanduser(path):\n # type: (str) -> str\n \"\"\"\n Expand ~ and ~user constructions.\n\n Includes a workaround for https://bugs.python.org/issue14768\n \"\"\"\n expanded = os.path.expanduser(path)\n if path.startswith('~/') and expanded.startswith('//'):\n expanded = expanded[1:]\n return expanded", "def _unpack_user(v):\n \"\"\"Internal helper to unpack a User value from a protocol buffer.\"\"\"\n uv = v.uservalue()\n email = unicode(uv.email().decode('utf-8'))\n auth_domain = unicode(uv.auth_domain().decode('utf-8'))\n obfuscated_gaiaid = uv.obfuscated_gaiaid().decode('utf-8')\n obfuscated_gaiaid = unicode(obfuscated_gaiaid)\n\n federated_identity = None\n if uv.has_federated_identity():\n federated_identity = unicode(\n uv.federated_identity().decode('utf-8'))\n\n value = users.User(email=email,\n _auth_domain=auth_domain,\n _user_id=obfuscated_gaiaid,\n federated_identity=federated_identity)\n return value", "def split_pubnote(pubnote_str):\n \"\"\"Split pubnote into journal information.\"\"\"\n pubnote = {}\n parts = pubnote_str.split(',')\n\n if len(parts) > 2:\n pubnote['journal_title'] = parts[0]\n pubnote['journal_volume'] = parts[1]\n pubnote['page_start'], pubnote['page_end'], pubnote['artid'] = split_page_artid(parts[2])\n\n return {key: val for (key, val) in six.iteritems(pubnote) if val is not None}", "def _getDict(j9Page):\n \"\"\"Parses a Journal Title Abbreviations page\n\n Note the pages are not well formatted html as the <DT> tags are not closes so html parses (Beautiful Soup) do not work. This is a simple parser that only works on the webpages and may fail if they are changed\n\n For Backend\n \"\"\"\n slines = j9Page.read().decode('utf-8').split('\\n')\n while slines.pop(0) != \"<DL>\":\n pass\n currentName = slines.pop(0).split('\"></A><DT>')[1]\n currentTag = slines.pop(0).split(\"<B><DD>\\t\")[1]\n j9Dict = {}\n while True:\n try:\n j9Dict[currentTag].append(currentName)\n except KeyError:\n j9Dict[currentTag] = [currentName]\n try:\n currentName = slines.pop(0).split('</B><DT>')[1]\n currentTag = slines.pop(0).split(\"<B><DD>\\t\")[1]\n except IndexError:\n break\n return j9Dict", "def decompress(self, value):\n \"\"\"\n Takes the sequence of ``AssignedKeyword`` instances and splits\n them into lists of keyword IDs and titles each mapping to one\n of the form field widgets.\n \"\"\"\n if hasattr(value, \"select_related\"):\n keywords = [a.keyword for a in value.select_related(\"keyword\")]\n if keywords:\n keywords = [(str(k.id), k.title) for k in keywords]\n self._ids, words = list(zip(*keywords))\n return (\",\".join(self._ids), \", \".join(words))\n return (\"\", \"\")" ]
[ 0.8413234949111938, 0.6811477541923523, 0.676547646522522, 0.638260006904602, 0.6341983079910278, 0.6279118061065674, 0.6271971464157104, 0.6196523904800415, 0.6192101240158081, 0.6188784837722778, 0.6174898743629456, 0.6166394352912903 ]
Compress the BLOB data portion of the usernotes. Arguments: j: the JSON in Schema v5 format (dict) Returns a dict with the 'users' key removed and 'blob' key added
def _compress_json(self, j): """Compress the BLOB data portion of the usernotes. Arguments: j: the JSON in Schema v5 format (dict) Returns a dict with the 'users' key removed and 'blob' key added """ compressed_json = copy.copy(j) compressed_json.pop('users', None) compressed_data = zlib.compress( json.dumps(j['users']).encode('utf-8'), self.zlib_compression_strength ) b64_data = base64.b64encode(compressed_data).decode('utf-8') compressed_json['blob'] = b64_data return compressed_json
[ "def _expand_json(self, j):\n \"\"\"Decompress the BLOB portion of the usernotes.\n\n Arguments:\n j: the JSON returned from the wiki page (dict)\n\n Returns a Dict with the 'blob' key removed and a 'users' key added\n \"\"\"\n decompressed_json = copy.copy(j)\n decompressed_json.pop('blob', None) # Remove BLOB portion of JSON\n\n # Decode and decompress JSON\n compressed_data = base64.b64decode(j['blob'])\n original_json = zlib.decompress(compressed_data).decode('utf-8')\n\n decompressed_json['users'] = json.loads(original_json) # Insert users\n\n return decompressed_json", "def reducejson(j):\n \"\"\" \n Not sure if there's a better way to walk the ... interesting result\n \"\"\"\n\n authors = []\n\n for key in j[\"data\"][\"repository\"][\"commitComments\"][\"edges\"]:\n authors.append(key[\"node\"][\"author\"])\n\n for key in j[\"data\"][\"repository\"][\"issues\"][\"nodes\"]:\n authors.append(key[\"author\"])\n for c in key[\"comments\"][\"nodes\"]:\n authors.append(c[\"author\"])\n \n for key in j[\"data\"][\"repository\"][\"pullRequests\"][\"edges\"]:\n authors.append(key[\"node\"][\"author\"])\n for c in key[\"node\"][\"comments\"][\"nodes\"]:\n authors.append(c[\"author\"])\n\n unique = list({v['login']:v for v in authors if v is not None}.values())\n return unique", "def get_json(self):\n \"\"\"Get the JSON stored on the usernotes wiki page.\n\n Returns a dict representation of the usernotes (with the notes BLOB\n decoded).\n\n Raises:\n RuntimeError if the usernotes version is incompatible with this\n version of puni.\n \"\"\"\n try:\n usernotes = self.subreddit.wiki[self.page_name].content_md\n notes = json.loads(usernotes)\n except NotFound:\n self._init_notes()\n else:\n if notes['ver'] != self.schema:\n raise RuntimeError(\n 'Usernotes schema is v{0}, puni requires v{1}'.\n format(notes['ver'], self.schema)\n )\n\n self.cached_json = self._expand_json(notes)\n\n return self.cached_json", "def compress(obj):\n \"\"\"Outputs json without whitespace.\"\"\"\n return json.dumps(obj, sort_keys=True, separators=(',', ':'),\n cls=CustomEncoder)", "def pack_dict(obj):\n \"\"\"\n Write dictionary object as a singular structure array\n :param obj: dictionary object to serialize. The fields must be simple scalar or an array.\n \"\"\"\n obj = OrderedDict(obj)\n blob = b'S'\n blob += np.array((1, 1), dtype=np.uint64).tostring()\n blob += np.array(len(obj), dtype=np.uint32).tostring()\n\n # write out field names\n for k in obj:\n blob += pack_string(k)\n\n for k, v in obj.items():\n blob_part = pack_obj(v)\n blob += np.array(len(blob_part), dtype=np.uint64).tostring()\n blob += blob_part\n\n return blob", "def shrink_blob(record, deletion):\n \"\"\"\n Makes a shrunken blob to be sent to SNS/SQS (due to the 256KB size limitations of SNS/SQS messages).\n This will essentially remove the \"configuration\" field such that the size of the SNS/SQS message remains under\n 256KB.\n :param record:\n :return:\n \"\"\"\n item = {\n \"eventName\": record[\"eventName\"],\n EVENT_TOO_BIG_FLAG: (not deletion)\n }\n\n # To handle TTLs (if they happen)\n if record.get(\"userIdentity\"):\n item[\"userIdentity\"] = record[\"userIdentity\"]\n\n # Remove the 'configuration' and 'requestParameters' fields from new and old images if applicable:\n if not deletion:\n # Only remove it from non-deletions:\n if record['dynamodb'].get('NewImage'):\n record['dynamodb']['NewImage'].pop('configuration', None)\n record['dynamodb']['NewImage'].pop('requestParameters', None)\n\n if record['dynamodb'].get('OldImage'):\n record['dynamodb']['OldImage'].pop('configuration', None)\n record['dynamodb']['OldImage'].pop('requestParameters', None)\n\n item['dynamodb'] = record['dynamodb']\n\n return item", "def blob(self):\n \"\"\"A compact text representation of the graph\"\"\"\n atom_str = \",\".join(str(number) for number in self.numbers)\n edge_str = \",\".join(\"%i_%i_%i\" % (i, j, o) for (i, j), o in zip(self.edges, self.orders))\n return \"%s %s\" % (atom_str, edge_str)", "def set_json(self, reason='', new_page=False):\n \"\"\"Send the JSON from the cache to the usernotes wiki page.\n\n Arguments:\n reason: the change reason that will be posted to the wiki changelog\n (str)\n Raises:\n OverflowError if the new JSON data is greater than max_page_size\n \"\"\"\n compressed_json = json.dumps(self._compress_json(self.cached_json))\n\n if len(compressed_json) > self.max_page_size:\n raise OverflowError(\n 'Usernotes page is too large (>{0} characters)'.\n format(self.max_page_size)\n )\n\n if new_page:\n self.subreddit.wiki.create(\n self.page_name,\n compressed_json,\n reason\n )\n # Set the page as hidden and available to moderators only\n self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)\n else:\n self.subreddit.wiki[self.page_name].edit(\n compressed_json,\n reason\n )", "def dumps(obj, **kwargs):\n \"\"\"Serialize ``obj`` to a JSON5-formatted ``str``.\"\"\"\n\n t = type(obj)\n if obj is True:\n return u'true'\n elif obj is False:\n return u'false'\n elif obj == None:\n return u'null'\n elif t == type('') or t == type(u''):\n single = \"'\" in obj\n double = '\"' in obj\n if single and double:\n return json.dumps(obj)\n elif single:\n return '\"' + obj + '\"'\n else:\n return \"'\" + obj + \"'\"\n elif t is float or t is int:\n return str(obj)\n elif t is dict:\n return u'{' + u','.join([\n _dumpkey(k) + u':' + dumps(v) for k, v in obj.items()\n ]) + '}'\n elif t is list:\n return u'[' + ','.join([dumps(el) for el in obj]) + u']'\n else: # pragma: no cover\n return u''", "def encode(self, obj):\n \"\"\"\n Take a Python object and return it as a tuple (value, value_type), a\n blob and a one-char code for what type it is\n \"\"\"\n if self._is_valid_mysql_bigint(obj):\n return obj, 'i'\n\n value = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)\n value_type = 'p'\n if (\n self._compress_min_length\n and len(value) >= self._compress_min_length\n ):\n value = zlib.compress(value, self._compress_level)\n value_type = 'z'\n return value, value_type", "def dumps(self, blob):\n \"\"\"\n Call json.dumps with the attributes of this instance as\n arguments.\n \"\"\"\n\n return json.dumps(\n blob, indent=self.indent, sort_keys=True,\n separators=self.separators,\n )", "def normalize_jr(jr, url=None):\n \"\"\" normalize JSON reference, also fix\n implicit reference of JSON pointer.\n input:\n - #/definitions/User\n - http://test.com/swagger.json#/definitions/User\n output:\n - http://test.com/swagger.json#/definitions/User\n\n input:\n - some_folder/User.json\n output:\n - http://test.com/some_folder/User.json\n \"\"\"\n\n if jr == None:\n return jr\n\n idx = jr.find('#')\n path, jp = (jr[:idx], jr[idx+1:]) if idx != -1 else (jr, None)\n\n if len(path) > 0:\n p = six.moves.urllib.parse.urlparse(path)\n if p.scheme == '' and url:\n p = six.moves.urllib.parse.urlparse(url)\n # it's the path of relative file\n path = six.moves.urllib.parse.urlunparse(p[:2]+('/'.join([os.path.dirname(p.path), path]),)+p[3:])\n path = derelativise_url(path)\n else:\n path = url\n\n if path:\n return ''.join([path, '#', jp]) if jp else path\n else:\n return '#' + jp" ]
[ 0.8370488286018372, 0.6714560389518738, 0.6385548710823059, 0.6376365423202515, 0.6277602314949036, 0.6251710653305054, 0.6193103194236755, 0.6173309683799744, 0.6137800812721252, 0.6126267313957214, 0.6081791520118713, 0.6060118675231934 ]
Add a note to the usernotes wiki page. Arguments: note: the note to be added (Note) Returns the update message for the usernotes wiki Raises: ValueError when the warning type of the note can not be found in the stored list of warnings.
def add_note(self, note): """Add a note to the usernotes wiki page. Arguments: note: the note to be added (Note) Returns the update message for the usernotes wiki Raises: ValueError when the warning type of the note can not be found in the stored list of warnings. """ notes = self.cached_json if not note.moderator: note.moderator = self.r.user.me().name # Get index of moderator in mod list from usernotes # Add moderator to list if not already there try: mod_index = notes['constants']['users'].index(note.moderator) except ValueError: notes['constants']['users'].append(note.moderator) mod_index = notes['constants']['users'].index(note.moderator) # Get index of warning type from warnings list # Add warning type to list if not already there try: warn_index = notes['constants']['warnings'].index(note.warning) except ValueError: if note.warning in Note.warnings: notes['constants']['warnings'].append(note.warning) warn_index = notes['constants']['warnings'].index(note.warning) else: raise ValueError('Warning type not valid: ' + note.warning) new_note = { 'n': note.note, 't': note.time, 'm': mod_index, 'l': note.link, 'w': warn_index } try: notes['users'][note.username]['ns'].insert(0, new_note) except KeyError: notes['users'][note.username] = {'ns': [new_note]} return '"create new note on user {}" via puni'.format(note.username)
[ "def add_note(\n self,\n note):\n \"\"\"*Add a note to this taskpaper object*\n\n **Key Arguments:**\n - ``note`` -- the note (string)\n\n **Return:**\n - None\n\n **Usage:**\n\n To add a note to a document, project or task object:\n\n .. code-block:: python\n\n newNote = doc.add_note(And another note with a link http://www.thespacedoctor.co.uk\")\n \"\"\"\n self.refresh\n note = note.strip()\n\n newNote = self._get_object(\n regex=re.compile(\n r'((?<=\\n)|(?<=^))(?P<title>\\S(?<!-)((?!(: +@|: *\\n|: *$)).)*)\\s*?(\\n|$)(?P<tagString>&&&)?(?P<content>&&&)?', re.UNICODE),\n objectType=\"note\",\n content=note\n )\n\n # ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO\n # THIS OBJECT\n oldContent = self.to_string(indentLevel=1)\n newContent = self.to_string(\n indentLevel=1)\n\n if self.parent:\n self.parent._update_document_tree(\n oldContent=oldContent,\n newContent=newContent\n )\n\n self.notes = newNote + self.notes\n self.content = self.content.replace(self.to_string(indentLevel=0, title=False), self.to_string(\n indentLevel=0, title=False))\n\n doc = self\n while doc.parent:\n doc = doc.parent\n doc.refresh\n\n return newNote[0]", "def add_note(note, **kwargs):\n \"\"\"\n Add a new note\n \"\"\"\n note_i = Note()\n note_i.ref_key = note.ref_key\n\n note_i.set_ref(note.ref_key, note.ref_id)\n\n note_i.value = note.value\n\n note_i.created_by = kwargs.get('user_id')\n\n db.DBSession.add(note_i)\n db.DBSession.flush()\n\n return note_i", "def add_note(self, content):\n \"\"\"Add a note to the project.\n\n .. warning:: Requires Todoist premium.\n\n :param content: The note content.\n :type content: str\n\n >>> from pytodoist import todoist\n >>> user = todoist.login('john.doe@gmail.com', 'password')\n >>> project = user.get_project('PyTodoist')\n >>> project.add_note('Remember to update to the latest version.')\n \"\"\"\n args = {\n 'project_id': self.id,\n 'content': content\n }\n _perform_command(self.owner, 'note_add', args)", "def add_notes(self, notes):\n \"\"\"Feed notes to self.add_note.\n\n The notes can either be an other NoteContainer, a list of Note\n objects or strings or a list of lists formatted like this:\n >>> notes = [['C', 5], ['E', 5], ['G', 6]]\n\n or even:\n >>> notes = [['C', 5, {'volume': 20}], ['E', 6, {'volume': 20}]]\n \"\"\"\n if hasattr(notes, 'notes'):\n for x in notes.notes:\n self.add_note(x)\n return self.notes\n elif hasattr(notes, 'name'):\n self.add_note(notes)\n return self.notes\n elif type(notes) == str:\n self.add_note(notes)\n return self.notes\n for x in notes:\n if type(x) == list and len(x) != 1:\n if len(x) == 2:\n self.add_note(x[0], x[1])\n else:\n self.add_note(x[0], x[1], x[2])\n else:\n self.add_note(x)\n return self.notes", "def update_note(self, note_id, revision, content):\n ''' Updates the note with the given ID to have the given content '''\n return notes_endpoint.update_note(self, note_id, revision, content)", "def add_note(self, body):\n \"\"\"\n Create a Note to current object\n\n :param body: the body of the note\n :type body: str\n :return: newly created Note\n :rtype: Tag\n \"\"\"\n from highton.models.note import Note\n created_id = self._post_request(\n endpoint=self.ENDPOINT + '/' + str(self.id) + '/' + Note.ENDPOINT,\n data=self.element_to_string(\n Note(body=body).encode()\n )\n ).headers.get('Location').replace('.xml', '').split('/')[-1]\n return Note.get(created_id)", "def add_note(self, note):\n \"\"\" Wrapper method to add a note\n\n The method can be passed the note as a dict with the `content`\n property set, which is then directly send to the web service for\n creation. Alternatively, only the body as string can also be passed. In\n this case the parameter is used as `content` for the new note.\n\n Arguments:\n - note (dict or string): the note to add\n\n Returns:\n A tuple `(note, status)`\n\n - note (dict): the newly created note\n - status (int): 0 on success and -1 otherwise\n\n \"\"\"\n\n if type(note) == str:\n return self.update_note({\"content\": note})\n elif (type(note) == dict) and \"content\" in note:\n return self.update_note(note)\n else:\n return \"No string or valid note.\", -1", "def update_note(self, note):\n \"\"\" Method to update a specific note object, if the note object does not\n have a \"key\" field, a new note is created\n\n Arguments\n - note (dict): note object to update\n\n Returns:\n A tuple `(note, status)`\n - note (dict): note object\n - status (int): 0 on success and -1 otherwise\n\n \"\"\"\n # determine whether to create a new note or update an existing one\n # Also need to add/remove key field to keep simplenote.py consistency\n if \"key\" in note:\n # Then already have a noteid we need to remove before passing to Simperium API\n noteid = note.pop(\"key\", None)\n else:\n # Adding a new note\n noteid = uuid.uuid4().hex\n\n\n # TODO: Set a ccid?\n # ccid = uuid.uuid4().hex\n if \"version\" in note:\n version = note.pop(\"version\", None)\n url = '%s/i/%s/v/%s?response=1' % (DATA_URL, noteid, version)\n else:\n url = '%s/i/%s?response=1' % (DATA_URL, noteid)\n\n # TODO: Could do with being consistent here. Everywhere else is Request(DATA_URL+params)\n note = self.__remove_simplenote_api_fields(note)\n request = Request(url, data=json.dumps(note).encode('utf-8'))\n request.add_header(self.header, self.get_token())\n request.add_header('Content-Type', 'application/json')\n\n response = \"\"\n try:\n response = urllib2.urlopen(request)\n except HTTPError as e:\n if e.code == 401:\n raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')\n else:\n return e, -1\n except IOError as e:\n return e, -1\n note = json.loads(response.read().decode('utf-8'))\n note = self.__add_simplenote_api_fields(note, noteid, int(response.info().get(\"X-Simperium-Version\")))\n return note, 0", "def add_note(path, filename=\"note.txt\"):\n \"\"\"Opens a txt file at the given path where user can add and save notes.\n\n Args:\n path (str): Directory where note will be saved.\n filename (str): Name of note. Defaults to \"note.txt\"\n \"\"\"\n path = os.path.expanduser(path)\n assert os.path.isdir(path), \"{} is not a valid directory.\".format(path)\n\n filepath = os.path.join(path, filename)\n exists = os.path.isfile(filepath)\n\n try:\n subprocess.call([EDITOR, filepath])\n except Exception as exc:\n logger.error(\"Editing note failed!\")\n raise exc\n if exists:\n print(\"Note updated at:\", filepath)\n else:\n print(\"Note created at:\", filepath)", "def note(self, note):\n \"\"\"\n Sets the note of this OrderFulfillmentPickupDetails.\n A general note about the pickup fulfillment. Notes are useful for providing additional instructions and are displayed in Square apps.\n\n :param note: The note of this OrderFulfillmentPickupDetails.\n :type: str\n \"\"\"\n\n if note is None:\n raise ValueError(\"Invalid value for `note`, must not be `None`\")\n if len(note) > 500:\n raise ValueError(\"Invalid value for `note`, length must be less than `500`\")\n\n self._note = note", "def update_note(note, **kwargs):\n \"\"\"\n Update a note\n \"\"\"\n note_i = _get_note(note.id)\n\n if note.ref_key != note_i.ref_key:\n raise HydraError(\"Cannot convert a %s note to a %s note. Please create a new note instead.\"%(note_i.ref_key, note.ref_key))\n\n note_i.set_ref(note.ref_key, note.ref_id)\n\n note_i.value = note.value\n\n db.DBSession.flush()\n\n return note_i", "def add_note(self, word_id, note,\n url='https://api.shanbay.com/bdc/note/'):\n \"\"\"创建笔记\"\"\"\n data = {\n 'vocabulary': word_id,\n 'note': note\n }\n return self._request(url, method='post', data=data).json()" ]
[ 0.7897244691848755, 0.7732542753219604, 0.7531960606575012, 0.7384588122367859, 0.7375184893608093, 0.7352802753448486, 0.7301070690155029, 0.7237897515296936, 0.7206209897994995, 0.7201989889144897, 0.7145036458969116, 0.7100257277488708 ]
Remove a single usernote from the usernotes. Arguments: username: the user that for whom you're removing a note (str) index: the index of the note which is to be removed (int) Returns the update message for the usernotes wiki
def remove_note(self, username, index): """Remove a single usernote from the usernotes. Arguments: username: the user that for whom you're removing a note (str) index: the index of the note which is to be removed (int) Returns the update message for the usernotes wiki """ self.cached_json['users'][username]['ns'].pop(index) # Go ahead and remove the user's entry if they have no more notes left if len(self.cached_json['users'][username]['ns']) == 0: del self.cached_json['users'][username] return '"delete note #{} on user {}" via puni'.format(index, username)
[ "def add_note(self, note):\n \"\"\"Add a note to the usernotes wiki page.\n\n Arguments:\n note: the note to be added (Note)\n\n Returns the update message for the usernotes wiki\n\n Raises:\n ValueError when the warning type of the note can not be found in the\n stored list of warnings.\n \"\"\"\n notes = self.cached_json\n\n if not note.moderator:\n note.moderator = self.r.user.me().name\n\n # Get index of moderator in mod list from usernotes\n # Add moderator to list if not already there\n try:\n mod_index = notes['constants']['users'].index(note.moderator)\n except ValueError:\n notes['constants']['users'].append(note.moderator)\n mod_index = notes['constants']['users'].index(note.moderator)\n\n # Get index of warning type from warnings list\n # Add warning type to list if not already there\n try:\n warn_index = notes['constants']['warnings'].index(note.warning)\n except ValueError:\n if note.warning in Note.warnings:\n notes['constants']['warnings'].append(note.warning)\n warn_index = notes['constants']['warnings'].index(note.warning)\n else:\n raise ValueError('Warning type not valid: ' + note.warning)\n\n new_note = {\n 'n': note.note,\n 't': note.time,\n 'm': mod_index,\n 'l': note.link,\n 'w': warn_index\n }\n\n try:\n notes['users'][note.username]['ns'].insert(0, new_note)\n except KeyError:\n notes['users'][note.username] = {'ns': [new_note]}\n\n return '\"create new note on user {}\" via puni'.format(note.username)", "def remove_editor(self, username, *args, **kwargs):\n \"\"\"Remove an editor from this wiki page.\n\n :param username: The name or Redditor object of the user to remove.\n\n This method points to :meth:`add_editor` with _delete=True.\n\n Additional parameters are are passed to :meth:`add_editor` and\n subsequently into :meth:`~praw.__init__.BaseReddit.request_json`.\n \"\"\"\n return self.add_editor(username=username, _delete=True, *args,\n **kwargs)", "def delete_note(self, note_id):\n \"\"\" Method to permanently delete a note\n\n Arguments:\n - note_id (string): key of the note to trash\n\n Returns:\n A tuple `(note, status)`\n\n - note (dict): an empty dict or an error message\n - status (int): 0 on success and -1 otherwise\n\n \"\"\"\n # notes have to be trashed before deletion\n note, status = self.trash_note(note_id)\n if (status == -1):\n return note, status\n\n params = '/i/%s' % (str(note_id))\n request = Request(url=DATA_URL+params, method='DELETE')\n request.add_header(self.header, self.get_token())\n try:\n response = urllib2.urlopen(request)\n except IOError as e:\n return e, -1\n except HTTPError as e:\n if e.code == 401:\n raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.')\n else:\n return e, -1\n return {}, 0", "def note_delete(self, note_id):\n \"\"\"delete a specific note (Requires login) (UNTESTED).\n\n Parameters:\n note_id (int): Where note_id is the note id.\n \"\"\"\n return self._get('notes/{0}.json'.format(note_id), method='DELETE',\n auth=True)", "def trash_note(self, note_id):\n \"\"\" Method to move a note to the trash\n\n Arguments:\n - note_id (string): key of the note to trash\n\n Returns:\n A tuple `(note, status)`\n\n - note (dict): the newly created note or an error message\n - status (int): 0 on success and -1 otherwise\n\n \"\"\"\n # get note\n note, status = self.get_note(note_id)\n if (status == -1):\n return note, status\n # set deleted property, but only if not already trashed\n # TODO: A 412 is ok, that's unmodified. Should handle this in update_note and\n # then not worry about checking here\n if not note[\"deleted\"]:\n note[\"deleted\"] = True\n note[\"modificationDate\"] = time.time()\n # update note\n return self.update_note(note)\n else:\n return note, 0", "def remove_user(self, username):\n \"\"\" Remove user from the SQLite database.\n\n * `username` [string]\n Username of user to remove.\n \"\"\"\n\n sql = '''DELETE FROM user WHERE username = ?'''\n try:\n self._db_curs.execute(sql, (username, ))\n self._db_conn.commit()\n except (sqlite3.OperationalError, sqlite3.IntegrityError) as error:\n raise AuthError(error)\n return self._db_curs.rowcount", "def remove(self, username=None):\n \"\"\"Remove User instance based on supplied user name.\"\"\"\n self._user_list = [user for user in self._user_list if user.name != username]", "def note_revert(self, note_id, version):\n \"\"\"Function to revert a specific note (Requires login) (UNTESTED).\n\n Parameters:\n note_id (int): The note id to update.\n version (int): The version to revert to.\n \"\"\"\n params = {'id': note_id, 'version': version}\n return self._get('note/revert', params, method='PUT')", "async def remove_user(self, username):\n \"\"\"Remove a user from this controller.\n \"\"\"\n client_facade = client.UserManagerFacade.from_connection(\n self.connection())\n user = tag.user(username)\n await client_facade.RemoveUser([client.Entity(user)])", "def update_note(self, note_id, revision, content):\n ''' Updates the note with the given ID to have the given content '''\n return notes_endpoint.update_note(self, note_id, revision, content)", "def delete_user(self, username):\n \"\"\"\n Deletes a user from the server.\n\n :param string username: Name of the user to delete from the server.\n \"\"\"\n path = Client.urls['users_by_name'] % username\n return self._call(path, 'DELETE')", "def del_user(self, username):\n \"\"\" Delete a user from the backend\n\n :param username: 'key' attribute of the user\n :type username: string\n\n \"\"\"\n self._check_fix_users(username)\n try:\n del self.users[username]\n except Exception as e:\n raise UserDoesntExist(username, self.backend_name)" ]
[ 0.6919499635696411, 0.6796944737434387, 0.6752998232841492, 0.6715428233146667, 0.6680077314376831, 0.666473388671875, 0.6651005148887634, 0.6631077527999878, 0.6554504632949829, 0.6528965830802917, 0.6489867568016052, 0.647257924079895 ]
Function load Get the list of all objects @return RETURN: A ForemanItem list
def load(self): """ Function load Get the list of all objects @return RETURN: A ForemanItem list """ cl_tmp = self.api.list(self.objName, limit=self.searchLimit).values() cl = [] for i in cl_tmp: cl.extend(i) return {x[self.index]: ItemPuppetClass(self.api, x['id'], self.objName, self.payloadObj, x) for x in cl}
[ "def load(self):\n \"\"\" Function load\n Get the list of all objects\n\n @return RETURN: A ForemanItem list\n \"\"\"\n return {x[self.index]: self.itemType(self.api, x['id'],\n self.objName, self.payloadObj,\n x)\n for x in self.api.list(self.objName,\n limit=self.searchLimit)}", "def load(self, limit=9999):\n \"\"\" Function list\n Get the list of all interfaces\n\n @param key: The targeted object\n @param limit: The limit of items to return\n @return RETURN: A ForemanItem list\n \"\"\"\n subItemList = self.api.list('{}/{}/{}'.format(self.parentObjName,\n self.parentKey,\n self.objName,\n ),\n limit=limit)\n if self.objName == 'puppetclass_ids':\n subItemList = list(map(lambda x: {'id': x}, subItemList))\n if self.objName == 'puppetclasses':\n sil_tmp = subItemList.values()\n subItemList = []\n for i in sil_tmp:\n subItemList.extend(i)\n return {x[self.index]: self.objType(self.api, x['id'],\n self.parentObjName,\n self.parentPayloadObj,\n self.parentKey,\n x)\n for x in subItemList}", "def get_items(self):\n \"\"\"This is out of spec, but required for adaptive assessment parts?\"\"\"\n ils = get_item_lookup_session(runtime=self._runtime, proxy=self._proxy)\n ils.use_federated_bank_view()\n items = []\n if self.has_items():\n for idstr in self._my_map['itemIds']:\n items.append(ils.get_item(Id(idstr)))\n return ItemList(items, runtime=self._runtime, proxy=self._proxy)", "def get_all(self) -> List[Commodity]:\n \"\"\" Loads all non-currency commodities, assuming they are stocks. \"\"\"\n query = (\n self.query\n .order_by(Commodity.namespace, Commodity.mnemonic)\n )\n return query.all()", "def items(self):\n '''gets the property value for items'''\n self.__init()\n items = []\n for item in self._items:\n items.append(\n UserItem(url=\"%s/items/%s\" % (self.location, item['id']),\n securityHandler=self._securityHandler,\n proxy_url=self._proxy_url,\n proxy_port=self._proxy_port,\n initalize=True)\n )\n return items", "def load_all(self):\n \"\"\"\n Force all reports to be loaded and parsed instead of lazy loading on demand.\n \n :returns: ``self`` or ``None`` if load fails\n \"\"\"\n try:\n self.toi.load_all()\n self.rosters.load_all()\n #self.summary.load_all()\n self.play_by_play.load_all()\n self.face_off_comp.load_all()\n return self\n except Exception as e:\n print(e)\n return None", "def items(self):\n \"\"\"\n Loads the items this Installation refers to.\n \"\"\"\n for id in self._items:\n yield self.store.getItemByID(int(id))", "def load_item_for_objective(self):\n \"\"\"if this is the first time for this magic part, find an LO linked item\"\"\"\n mgr = self.my_osid_object._get_provider_manager('ASSESSMENT', local=True)\n if self.my_osid_object._my_map['itemBankId']:\n item_query_session = mgr.get_item_query_session_for_bank(Id(self.my_osid_object._my_map['itemBankId']),\n proxy=self.my_osid_object._proxy)\n else:\n item_query_session = mgr.get_item_query_session(proxy=self.my_osid_object._proxy)\n item_query_session.use_federated_bank_view()\n item_query = item_query_session.get_item_query()\n for objective_id_str in self.my_osid_object._my_map['learningObjectiveIds']:\n item_query.match_learning_objective_id(Id(objective_id_str), True)\n item_list = list(item_query_session.get_items_by_query(item_query))\n # Let's query all takens and their children sections for questions, to\n # remove seen ones\n taking_agent_id = self._assessment_section._assessment_taken.taking_agent_id\n atqs = mgr.get_assessment_taken_query_session(proxy=self.my_osid_object._proxy)\n atqs.use_federated_bank_view()\n querier = atqs.get_assessment_taken_query()\n querier.match_taking_agent_id(taking_agent_id, match=True)\n # let's seed this with the current section's questions\n seen_items = [item_id for item_id in self._assessment_section._item_id_list]\n taken_ids = [str(t.ident)\n for t in atqs.get_assessments_taken_by_query(querier)]\n # Try to find the questions directly via Mongo query -- don't do\n # for section in taken._get_assessment_sections():\n # seen_items += [question['itemId'] for question in section._my_map['questions']]\n # because standing up all the sections is wasteful\n collection = JSONClientValidated('assessment',\n collection='AssessmentSection',\n runtime=self.my_osid_object._runtime)\n results = collection.find({\"assessmentTakenId\": {\"$in\": taken_ids}})\n for section in results:\n if 'questions' in section:\n seen_items += [question['itemId'] for question in section['questions']]\n unseen_item_id = None\n # need to randomly shuffle this item_list\n shuffle(item_list)\n for item in item_list:\n if str(item.ident) not in seen_items:\n unseen_item_id = item.get_id()\n break\n if unseen_item_id is not None:\n self.my_osid_object._my_map['itemIds'] = [str(unseen_item_id)]\n elif self.my_osid_object._my_map['allowRepeatItems']:\n if len(item_list) > 0:\n self.my_osid_object._my_map['itemIds'] = [str(item_list[0].ident)]\n else:\n self.my_osid_object._my_map['itemIds'] = [] # don't put '' here, it will break when it tries to find an item with id ''\n else:\n self.my_osid_object._my_map['itemIds'] = []", "def get_items(self):\n \"\"\"\n Return the item models associated with this Publish group.\n \"\"\"\n from .layers import Layer\n\n # no expansion support, just URLs\n results = []\n for url in self.items:\n if '/layers/' in url:\n r = self._client.request('GET', url)\n results.append(self._client.get_manager(Layer).create_from_result(r.json()))\n else:\n raise NotImplementedError(\"No support for %s\" % url)\n return results", "def GetItems(self):\n \"\"\"Updates self.name and self.items and returns (self.name, self.items)\"\"\"\n url = 'http://min.us/api/GetItems/' + 'm' + self.reader_id\n response = _doget(url)\n\n self.name = response[\"GALLERY_TITLE\"]\n\n # To get the item id, we have to take the file name from the URL\n # We also need to get rid of any file extension if there is any\n self.items = [a[16:].split('.')[0] for a in response[\"ITEMS_GALLERY\"]]\n\n return (self.name, self.items)", "def get_all(self):\n \"\"\"Gets all items in file.\"\"\"\n logger.debug('Fetching items. Path: {data_file}'.format(\n data_file=self.data_file\n ))\n\n return load_file(self.data_file)", "def get_all(self, force_download=False):\n \"\"\" Retrieve the metadata for all items in this list from the server,\n as Item objects\n\n :rtype: List\n :returns: a List of the corresponding Item objects\n :type force_download: Boolean\n :param force_download: True to download from the server\n regardless of the cache's contents\n\n :raises: APIError if the API request is not successful\n\n\n \"\"\"\n cl = self.client\n return [cl.get_item(item, force_download) for item in self.item_urls]" ]
[ 0.9007964730262756, 0.8018741607666016, 0.7024894952774048, 0.6753305196762085, 0.6598423719406128, 0.6597557663917542, 0.655421257019043, 0.653819739818573, 0.6532294154167175, 0.6501277089118958, 0.648154616355896, 0.6473885774612427 ]
Return True if the item relates to the given app_id (and app_ver, if passed).
def is_related_to(item, app_id, app_ver=None): """Return True if the item relates to the given app_id (and app_ver, if passed).""" versionRange = item.get('versionRange') if not versionRange: return True for vR in versionRange: if not vR.get('targetApplication'): return True if get_related_targetApplication(vR, app_id, app_ver) is not None: return True return False
[ "def get_app_relations(app_id, num=20, kind='1'):\n '''\n The the related infors.\n '''\n info_tag = MInfor2Catalog.get_first_category(app_id)\n if info_tag:\n return TabPost2Tag.select(\n TabPost2Tag,\n TabPost.title.alias('post_title'),\n TabPost.valid.alias('post_valid')\n ).join(\n TabPost, on=(TabPost2Tag.post_id == TabPost.uid)\n ).where(\n (TabPost2Tag.tag_id == info_tag.tag_id) &\n (TabPost.kind == kind)\n ).order_by(\n peewee.fn.Random()\n ).limit(num)\n return TabPost2Tag.select(\n TabPost2Tag,\n TabPost.title.alias('post_title'),\n TabPost.valid.alias('post_valid')\n ).join(\n TabPost, on=(TabPost2Tag.post_id == TabPost.uid)\n ).where(\n TabPost.kind == kind\n ).order_by(peewee.fn.Random()).limit(num)", "def application_id(self, app_id):\n \"\"\"Validate request application id matches true application id.\n\n Verifying the Application ID matches: https://goo.gl/qAdqe4.\n\n Args:\n app_id: str. Request application_id.\n\n Returns:\n bool: True if valid, False otherwise.\n \"\"\"\n if self.app_id != app_id:\n warnings.warn('Application ID is invalid.')\n return False\n return True", "def has_app(app_name):\n \"\"\"\n Determines whether an app is listed in INSTALLED_APPS or the app registry.\n :param app_name: string\n :return: bool\n \"\"\"\n if DJANGO_VERSION >= (1, 7):\n from django.apps import apps\n return apps.is_installed(app_name)\n else:\n from django.conf import settings\n\n return app_name in settings.INSTALLED_APPS", "def get_related_targetApplication(vR, app_id, app_ver):\n \"\"\"Return the first matching target application in this version range.\n Returns None if there are no target applications or no matching ones.\"\"\"\n targetApplication = vR.get('targetApplication')\n if not targetApplication:\n return None\n\n for tA in targetApplication:\n guid = tA.get('guid')\n if not guid or guid == app_id:\n if not app_ver:\n return tA\n # We purposefully use maxVersion only, so that the blocklist contains items\n # whose minimum version is ahead of the version we get passed. This means\n # the blocklist we serve is \"future-proof\" for app upgrades.\n if between(version_int(app_ver), '0', tA.get('maxVersion', '*')):\n return tA\n\n return None", "def get_version(self, app_id, version):\n \"\"\"Get the configuration of an app at a specific version.\n\n :param str app_id: application ID\n :param str version: application version\n\n :return: application configuration\n :rtype: :class:`marathon.models.app.MarathonApp`\n \"\"\"\n response = self._do_request('GET', '/v2/apps/{app_id}/versions/{version}'\n .format(app_id=app_id, version=version))\n return MarathonApp.from_json(response.json())", "def list_versions(self, app_id):\n \"\"\"List the versions of an app.\n\n :param str app_id: application ID\n\n :returns: list of versions\n :rtype: list[str]\n \"\"\"\n response = self._do_request(\n 'GET', '/v2/apps/{app_id}/versions'.format(app_id=app_id))\n return [version for version in response.json()['versions']]", "def _get_app_version(self, app_config):\n \"\"\"\n Some plugins ship multiple applications and extensions.\n However all of them have the same version, because they are released together.\n That's why only-top level module is used to fetch version information.\n \"\"\"\n\n base_name = app_config.__module__.split('.')[0]\n module = __import__(base_name)\n return getattr(module, '__version__', 'N/A')", "def match_app_version(self, app=None, label=None, version=None, default=False):\n \"\"\"Match app version against a semantic version string.\n\n Checks if a KE-chain app matches a version comparison. Uses the `semver` matcher to check.\n\n `match(\"2.0.0\", \">=1.0.0\")` => `True`\n `match(\"1.0.0\", \">1.0.0\")` => `False`\n\n Examples\n --------\n >>> client.match_app_version(label='wim', version=\">=1.99\")\n >>> True\n\n >>> client.match_app_version(app='kechain2.core.pim', version=\">=1.0.0\")\n >>> True\n\n :param app: (optional) appname eg. 'kechain.core.wim'\n :type app: basestring or None\n :param label: (optional) app label (last part of the app name) eb 'wim'\n :type label: basestring or None\n :param version: semantic version string to match appname version against eg '2.0.0' or '>=2.0.0'\n :type version: basestring\n :param default: (optional) boolean to return if the version of the app is not set but the app found.\n Set to None to return a NotFoundError when a version if not found in the app.\n :type default: bool or None\n :return: True if the version of the app matches against the match_version, otherwise False\n :raises IllegalArgumentError: if no app nor a label is provided\n :raises NotFoundError: if the app is not found\n :raises ValueError: if the version provided is not parseable by semver,\n should contain (<operand><major>.<minor>.<patch) where <operand> is '>,<,>=,<=,=='\n\n \"\"\"\n if not app or not label and not (app and label):\n target_app = [a for a in self.app_versions if a.get('app') == app or a.get('label') == label]\n if not target_app and not isinstance(default, bool):\n raise NotFoundError(\"Could not find the app or label provided\")\n elif not target_app and isinstance(default, bool):\n return default\n else:\n raise IllegalArgumentError(\"Please provide either app or label\")\n\n if not version:\n raise IllegalArgumentError(\"Please provide semantic version string including operand eg: `>=1.0.0`\")\n\n app_version = target_app[0].get('version')\n\n if target_app and app_version and version:\n import semver\n return semver.match(app_version, version)\n elif not app_version:\n if isinstance(default, bool):\n return default\n else:\n raise NotFoundError(\"No version found on the app '{}'\".format(target_app[0].get('app')))", "def locate_item(ident, stateless=False, cache_id=None):\n '''Locate a dash application, given either the\n slug of an instance or the name for a stateless app'''\n if stateless:\n dash_app = find_stateless_by_name(ident)\n else:\n dash_app = get_object_or_404(DashApp, slug=ident)\n\n app = dash_app.as_dash_instance(cache_id=cache_id)\n return dash_app, app", "def set_app_id(self, id, version, icon):\n '''Sets some meta-information about the application.\n See also L{set_user_agent}().\n @param id: Java-style application identifier, e.g. \"com.acme.foobar\".\n @param version: application version numbers, e.g. \"1.2.3\".\n @param icon: application icon name, e.g. \"foobar\".\n @version: LibVLC 2.1.0 or later.\n '''\n return libvlc_set_app_id(self, str_to_bytes(id), str_to_bytes(version), str_to_bytes(icon))", "def rollback_app(self, app_id, version, force=False):\n \"\"\"Roll an app back to a previous version.\n\n :param str app_id: application ID\n :param str version: application version\n :param bool force: apply even if a deployment is in progress\n\n :returns: a dict containing the deployment id and version\n :rtype: dict\n \"\"\"\n params = {'force': force}\n data = json.dumps({'version': version})\n response = self._do_request(\n 'PUT', '/v2/apps/{app_id}'.format(app_id=app_id), params=params, data=data)\n return response.json()", "def update_app(self, app_id, app, force=False, minimal=True):\n \"\"\"Update an app.\n\n Applies writable settings in `app` to `app_id`\n Note: this method can not be used to rename apps.\n\n :param str app_id: target application ID\n :param app: application settings\n :type app: :class:`marathon.models.app.MarathonApp`\n :param bool force: apply even if a deployment is in progress\n :param bool minimal: ignore nulls and empty collections\n\n :returns: a dict containing the deployment id and version\n :rtype: dict\n \"\"\"\n # Changes won't take if version is set - blank it for convenience\n app.version = None\n\n params = {'force': force}\n data = app.to_json(minimal=minimal)\n\n response = self._do_request(\n 'PUT', '/v2/apps/{app_id}'.format(app_id=app_id), params=params, data=data)\n return response.json()" ]
[ 0.7099787592887878, 0.6942530870437622, 0.6856756806373596, 0.6784685850143433, 0.6681079268455505, 0.6609764695167542, 0.6578934788703918, 0.6472867131233215, 0.6428728103637695, 0.6427755951881409, 0.6412286162376404, 0.637650191783905 ]
Return the first matching target application in this version range. Returns None if there are no target applications or no matching ones.
def get_related_targetApplication(vR, app_id, app_ver): """Return the first matching target application in this version range. Returns None if there are no target applications or no matching ones.""" targetApplication = vR.get('targetApplication') if not targetApplication: return None for tA in targetApplication: guid = tA.get('guid') if not guid or guid == app_id: if not app_ver: return tA # We purposefully use maxVersion only, so that the blocklist contains items # whose minimum version is ahead of the version we get passed. This means # the blocklist we serve is "future-proof" for app upgrades. if between(version_int(app_ver), '0', tA.get('maxVersion', '*')): return tA return None
[ "def target_app(self):\n \"\"\"Defer target app retrieval until requested\"\"\"\n if self.__target_app is None:\n self.__target_app = self._swimlane.apps.get(id=self.__target_app_id)\n\n return self.__target_app", "def get_app(self, reference_app=None):\n \"\"\"Helper method that implements the logic to look up an application.\"\"\"\n\n if reference_app is not None:\n return reference_app\n\n if self.app is not None:\n return self.app\n\n ctx = stack.top\n\n if ctx is not None:\n return ctx.app\n\n raise RuntimeError('Application not registered on Bouncer'\n ' instance and no application bound'\n ' to current context')", "def _get_target(self, target):\n \"\"\"\n Get the Package or Module related to given target.\n\n Args:\n target (str): target to find.\n\n Returns:\n Package/Module: package containing target or corresponding module.\n \"\"\"\n depth = target.count('.') + 1\n parts = target.split('.', 1)\n for m in self.modules:\n if parts[0] == m.name:\n if depth < 3:\n return m\n for p in self.packages:\n if parts[0] == p.name:\n if depth == 1:\n return p\n # pylama:ignore=W0212\n target = p._get_target(parts[1])\n if target:\n return target\n # FIXME: can lead to internal dep instead of external\n # see example with django.contrib.auth.forms\n # importing forms from django\n # Idea: when parsing files with ast, record what objects\n # are defined in the module. Then check here if the given\n # part is one of these objects.\n if depth < 3:\n return p\n return None", "def current_app(self):\n \"\"\"Return the current app.\"\"\"\n current_focus = self.adb_shell(CURRENT_APP_CMD)\n if current_focus is None:\n return None\n\n current_focus = current_focus.replace(\"\\r\", \"\")\n matches = WINDOW_REGEX.search(current_focus)\n\n # case 1: current app was successfully found\n if matches:\n (pkg, activity) = matches.group(\"package\", \"activity\")\n return {\"package\": pkg, \"activity\": activity}\n\n # case 2: current app could not be found\n logging.warning(\"Couldn't get current app, reply was %s\", current_focus)\n return None", "def get_applications(self):\n \"\"\"Return the list of supported applications.\"\"\"\n applications = []\n\n # Isolate all of the bnodes referring to target applications\n for target_app in self.get_objects(None,\n self.uri('targetApplication')):\n applications.append({\n 'guid': self.get_object(target_app, self.uri('id')),\n 'min_version': self.get_object(target_app,\n self.uri('minVersion')),\n 'max_version': self.get_object(target_app,\n self.uri('maxVersion'))})\n return applications", "def find_first_version(self):\n \"\"\"Finds the first version of igraph that exists in the nightly build\n repo from the version numbers provided in ``self.versions_to_try``.\"\"\"\n for version in self.versions_to_try:\n remote_url = self.get_download_url(version=version)\n if http_url_exists(remote_url):\n return version, remote_url\n return None, None", "def find_segment_first(self, *args, **kwargs):\n \"\"\"Finds the first matching segment.\n\n Same parameters as find_segments(), but only returns the first match, or None if no match is found.\"\"\"\n\n for m in self.find_segments(*args, **kwargs):\n return m\n\n return None", "def first_plugin_context(self):\n \"\"\"Returns the context is associated with the first app this plugin was\n registered on\"\"\"\n # Note, because registrations are stored in a set, its not _really_\n # the first one, but whichever one it sees first in the set.\n first_spf_reg = next(iter(self.registrations))\n return self.get_context_from_spf(first_spf_reg)", "def get_static_app_matching(apps):\n \"\"\" Returning a matching containing applications to serve static files\n correspond to each passed applications.\n \"\"\"\n return reduce(lambda a, b: a + b,\n [generate_static_matching(app)\n for app in apps if app is not None])", "def get_target(self, permitted_group, cmdline, alias_cmdline):\n '''\n When we are permitted to run a command on a target, look to see\n what the default targeting is for that group, and for that specific\n command (if provided).\n\n It's possible for None or False to be the result of either, which means\n that it's expected that the caller provide a specific target.\n\n If no configured target is provided, the command line will be parsed\n for target=foo and tgt_type=bar\n\n Test for this::\n\n h = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'},\n 'default_target': {'target': '*', 'tgt_type': 'glob'},\n 'targets': {'pillar.get': {'target': 'you_momma', 'tgt_type': 'list'}},\n 'users': {'dmangot', 'jmickle', 'pcn'}}\n f = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'},\n 'default_target': {}, 'targets': {},'users': {'dmangot', 'jmickle', 'pcn'}}\n\n g = {'aliases': {}, 'commands': {'cmd.run', 'pillar.get'},\n 'default_target': {'target': '*', 'tgt_type': 'glob'},\n 'targets': {}, 'users': {'dmangot', 'jmickle', 'pcn'}}\n\n Run each of them through ``get_configured_target(('foo', f), 'pillar.get')`` and confirm a valid target\n\n '''\n # Default to targeting all minions with a type of glob\n null_target = {'target': '*', 'tgt_type': 'glob'}\n\n def check_cmd_against_group(cmd):\n '''\n Validate cmd against the group to return the target, or a null target\n '''\n name, group_config = permitted_group\n target = group_config.get('default_target')\n if not target: # Empty, None, or False\n target = null_target\n if group_config.get('targets'):\n if group_config['targets'].get(cmd):\n target = group_config['targets'][cmd]\n if not target.get('target'):\n log.debug('Group %s is not configured to have a target for cmd %s.', name, cmd)\n return target\n\n for this_cl in cmdline, alias_cmdline:\n _, kwargs = self.parse_args_and_kwargs(this_cl)\n if 'target' in kwargs:\n log.debug('target is in kwargs %s.', kwargs)\n if 'tgt_type' in kwargs:\n log.debug('tgt_type is in kwargs %s.', kwargs)\n return {'target': kwargs['target'], 'tgt_type': kwargs['tgt_type']}\n return {'target': kwargs['target'], 'tgt_type': 'glob'}\n\n for this_cl in cmdline, alias_cmdline:\n checked = check_cmd_against_group(this_cl[0])\n log.debug('this cmdline has target %s.', this_cl)\n if checked.get('target'):\n return checked\n return null_target", "def get_app(self, reference_app=None):\n \"\"\"Helper method that implements the logic to look up an\n application.\"\"\"\n\n if reference_app is not None:\n return reference_app\n\n if current_app:\n return current_app._get_current_object()\n\n if self.app is not None:\n return self.app\n\n raise RuntimeError(\n 'No application found. Either work inside a view function or push'\n ' an application context. See'\n ' http://flask-sqlalchemy.pocoo.org/contexts/.'\n )", "def load_app(target):\n \"\"\" Load a bottle application from a module and make sure that the import\n does not affect the current default application, but returns a separate\n application object. See :func:`load` for the target parameter. \"\"\"\n global NORUN\n NORUN, nr_old = True, NORUN\n tmp = default_app.push() # Create a new \"default application\"\n try:\n rv = load(target) # Import the target module\n return rv if callable(rv) else tmp\n finally:\n default_app.remove(tmp) # Remove the temporary added default application\n NORUN = nr_old" ]
[ 0.7042108774185181, 0.6782410740852356, 0.6733211874961853, 0.6708325743675232, 0.6686870455741882, 0.6660854816436768, 0.6612887978553772, 0.6568704843521118, 0.6525574326515198, 0.6496841907501221, 0.6484965085983276, 0.6481257677078247 ]
Generate the addons blocklists. <emItem blockID="i372" id="5nc3QHFgcb@r06Ws9gvNNVRfH.com"> <versionRange minVersion="0" maxVersion="*" severity="3"> <targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}"> <versionRange minVersion="39.0a1" maxVersion="*"/> </targetApplication> </versionRange> <prefs> <pref>browser.startup.homepage</pref> <pref>browser.search.defaultenginename</pref> </prefs> </emItem>
def write_addons_items(xml_tree, records, app_id, api_ver=3, app_ver=None): """Generate the addons blocklists. <emItem blockID="i372" id="5nc3QHFgcb@r06Ws9gvNNVRfH.com"> <versionRange minVersion="0" maxVersion="*" severity="3"> <targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}"> <versionRange minVersion="39.0a1" maxVersion="*"/> </targetApplication> </versionRange> <prefs> <pref>browser.startup.homepage</pref> <pref>browser.search.defaultenginename</pref> </prefs> </emItem> """ if not records: return emItems = etree.SubElement(xml_tree, 'emItems') groupby = {} for item in records: if is_related_to(item, app_id, app_ver): if item['guid'] in groupby: emItem = groupby[item['guid']] # When creating new records from the Kinto Admin we don't have proper blockID. if 'blockID' in item: # Remove the first caracter which is the letter i to # compare the numeric value i45 < i356. current_blockID = int(item['blockID'][1:]) previous_blockID = int(emItem.attrib['blockID'][1:]) # Group by and keep the biggest blockID in the XML file. if current_blockID > previous_blockID: emItem.attrib['blockID'] = item['blockID'] else: # If the latest entry does not have any blockID attribute, its # ID should be used. (the list of records is sorted by ascending # last_modified). # See https://bugzilla.mozilla.org/show_bug.cgi?id=1473194 emItem.attrib['blockID'] = item['id'] else: emItem = etree.SubElement(emItems, 'emItem', blockID=item.get('blockID', item['id'])) groupby[item['guid']] = emItem prefs = etree.SubElement(emItem, 'prefs') for p in item['prefs']: pref = etree.SubElement(prefs, 'pref') pref.text = p # Set the add-on ID emItem.set('id', item['guid']) for field in ['name', 'os']: if field in item: emItem.set(field, item[field]) build_version_range(emItem, item, app_id)
[ "def write_plugin_items(xml_tree, records, app_id, api_ver=3, app_ver=None):\n \"\"\"Generate the plugin blocklists.\n\n <pluginItem blockID=\"p422\">\n <match name=\"filename\" exp=\"JavaAppletPlugin\\\\.plugin\"/>\n <versionRange minVersion=\"Java 7 Update 16\"\n maxVersion=\"Java 7 Update 24\"\n severity=\"0\" vulnerabilitystatus=\"1\">\n <targetApplication id=\"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}\">\n <versionRange minVersion=\"17.0\" maxVersion=\"*\"/>\n </targetApplication>\n </versionRange>\n </pluginItem>\n \"\"\"\n\n if not records:\n return\n\n pluginItems = etree.SubElement(xml_tree, 'pluginItems')\n for item in records:\n for versionRange in item.get('versionRange', []):\n if not versionRange.get('targetApplication'):\n add_plugin_item(pluginItems, item, versionRange,\n app_id=app_id, api_ver=api_ver,\n app_ver=app_ver)\n else:\n targetApplication = get_related_targetApplication(versionRange, app_id, app_ver)\n if targetApplication is not None:\n add_plugin_item(pluginItems, item, versionRange, targetApplication,\n app_id=app_id, api_ver=api_ver,\n app_ver=app_ver)", "function createListBlock(block, listItems) {\n var listItemContent = listItems.map(function(listItemNode) {\n var content = listItemNode.innerHTML.substr(2);\n return {content: content};\n });\n var listData = {\n format: 'html',\n listItems: listItemContent.reverse()\n };\n block.mediator.trigger(\"block:create\", 'List', listData, block.el, { autoFocus: true });\n}", "function parseBlock(tokens) {\n if (tokens.type === 'blockquote') {\n return [getBlockquoteToken(tokens)];\n } else if (tokens.type === 'bullet_list' && tokens.content.type === 'list_item' ||\n tokens.type === 'ordered-list' && tokens.content.type === 'list_item') {\n return [getListToken(tokens)];\n }\n return tokens;\n}", "@SuppressWarnings(\"fallthrough\")\n protected List<DCTree> blockContent() {\n ListBuffer<DCTree> trees = new ListBuffer<>();\n textStart = -1;\n\n loop:\n while (bp < buflen) {\n switch (ch) {\n case '\\n': case '\\r': case '\\f':\n newline = true;\n // fallthrough\n\n case ' ': case '\\t':\n nextChar();\n break;\n\n case '&':\n entity(trees);\n break;\n\n case '<':\n newline = false;\n addPendingText(trees, bp - 1);\n trees.add(html());\n if (textStart == -1) {\n textStart = bp;\n lastNonWhite = -1;\n }\n break;\n\n case '>':\n newline = false;\n addPendingText(trees, bp - 1);\n trees.add(m.at(bp).newErroneousTree(newString(bp, bp + 1), diagSource, \"dc.bad.gt\"));\n nextChar();\n if (textStart == -1) {\n textStart = bp;\n lastNonWhite = -1;\n }\n break;\n\n case '{':\n inlineTag(trees);\n break;\n\n case '@':\n if (newline) {\n addPendingText(trees, lastNonWhite);\n break loop;\n }\n // fallthrough\n\n default:\n newline = false;\n if (textStart == -1)\n textStart = bp;\n lastNonWhite = bp;\n nextChar();\n }\n }\n\n if (lastNonWhite != -1)\n addPendingText(trees, lastNonWhite);\n\n return trees.toList();\n }", "def listBlocks(self, dataset=\"\", block_name=\"\", data_tier_name=\"\", origin_site_name=\"\",\n logical_file_name=\"\", run_num=-1, min_cdate=0, max_cdate=0,\n min_ldate=0, max_ldate=0, cdate=0, ldate=0, open_for_writing=-1, detail=False):\n \"\"\"\n dataset, block_name, data_tier_name or logical_file_name must be passed.\n \"\"\"\n if (not dataset) or re.search(\"['%','*']\", dataset):\n if (not block_name) or re.search(\"['%','*']\", block_name):\n if (not logical_file_name) or re.search(\"['%','*']\", logical_file_name):\n if not data_tier_name or re.search(\"['%','*']\", data_tier_name):\n msg = \"DBSBlock/listBlock. You must specify at least one parameter(dataset, block_name,\\\n\t\t\t \tdata_tier_name, logical_file_name) with listBlocks api\"\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)\n\n if data_tier_name:\n if not (min_cdate and max_cdate) or (max_cdate-min_cdate)>32*24*3600:\n msg = \"min_cdate and max_cdate are mandatory parameters. If data_tier_name parameter is used \\\n the maximal time range allowed is 31 days\"\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)\n if detail:\n msg = \"DBSBlock/listBlock. Detail parameter not allowed togther with data_tier_name\"\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)\n\n with self.dbi.connection() as conn:\n dao = (self.blockbrieflist, self.blocklist)[detail]\n for item in dao.execute(conn, dataset, block_name, data_tier_name, origin_site_name, logical_file_name, run_num,\n min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate):\n yield item", "function addonsManager_getElements(aSpec) {\n var spec = aSpec || { };\n var type = spec.type;\n var subtype = spec.subtype;\n var value = spec.value;\n var parent = spec.parent;\n\n var root = parent ? parent.getNode() : this._controller.tabs.activeTab;\n var nodeCollector = new domUtils.nodeCollector(root);\n\n switch (type) {\n // Add-ons\n case \"addons\":\n nodeCollector.queryNodes(\".addon\").filterByDOMProperty(subtype, value);\n break;\n case \"addonsList\":\n nodeCollector.queryNodes(\"#addon-list\");\n break;\n // Categories\n case \"categoriesList\":\n nodeCollector.queryNodes(\"#categories\");\n break;\n case \"categories\":\n nodeCollector.queryNodes(\".category\").filterByDOMProperty(subtype, value);\n break;\n // Detail view\n case \"detailView_element\":\n nodeCollector.queryNodes(value);\n break;\n case \"detailView_disableButton\":\n nodeCollector.queryNodes(\"#detail-disable\");\n break;\n case \"detailView_enableButton\":\n nodeCollector.queryNodes(\"#detail-enable\");\n break;\n case \"detailView_installButton\":\n nodeCollector.queryNodes(\"#detail-install\");\n break;\n case \"detailView_preferencesButton\":\n nodeCollector.queryNodes(\"#detail-prefs\");\n break;\n case \"detailView_removeButton\":\n nodeCollector.queryNodes(\"#detail-uninstall\");\n break;\n case \"detailView_findUpdatesLink\":\n nodeCollector.queryNodes(\"#detail-findUpdates\");\n break;\n // Bug 599771 - button-link's are missing id or anonid\n //case \"detailView_restartLink\":\n // nodeCollector.queryNodes(\"#detail-restart\");\n // break;\n case \"detailView_undoLink\":\n nodeCollector.queryNodes(\"#detail-undo\");\n break;\n case \"detailView_findUpdatesRadiogroup\":\n nodeCollector.queryNodes(\"#detail-findUpdates\");\n break;\n // List view\n case \"listView_element\":\n nodeCollector.queryAnonymousNodes(subtype, value);\n break;\n case \"listView_disableButton\":\n nodeCollector.queryAnonymousNodes(\"anonid\", \"disable-btn\");\n break;\n case \"listView_enableButton\":\n nodeCollector.queryAnonymousNodes(\"anonid\", \"enable-btn\");\n break;\n case \"listView_installButton\":\n // There is another binding we will have to skip\n nodeCollector.queryAnonymousNodes(\"anonid\", \"install-status\");\n nodeCollector.root = nodeCollector.nodes[0];\n nodeCollector.queryAnonymousNodes(\"anonid\", \"install-remote\");\n break;\n case \"listView_preferencesButton\":\n nodeCollector.queryAnonymousNodes(\"anonid\", \"preferences-btn\");\n break;\n case \"listView_removeButton\":\n nodeCollector.queryAnonymousNodes(\"anonid\", \"remove-btn\");\n break;\n case \"listView_moreLink\":\n // Bug 599771 - button-link's are missing id or anonid\n nodeCollector.queryAnonymousNodes(\"class\", \"details button-link\");\n break;\n // Bug 599771 - button-link's are missing id or anonid\n //case \"listView_restartLink\":\n // nodeCollector.queryAnonymousNodes(\"anonid\", \"restart\");\n // break;\n case \"listView_undoLink\":\n nodeCollector.queryAnonymousNodes(\"anonid\", \"undo\");\n break;\n case \"listView_cancelDownload\":\n // There is another binding we will have to skip\n nodeCollector.queryAnonymousNodes(\"anonid\", \"install-status\");\n nodeCollector.root = nodeCollector.nodes[0];\n nodeCollector.queryAnonymousNodes(\"anonid\", \"cancel\");\n break;\n case \"listView_pauseDownload\":\n // There is another binding we will have to skip\n nodeCollector.queryAnonymousNodes(\"anonid\", \"install-status\");\n nodeCollector.root = nodeCollector.nodes[0];\n nodeCollector.queryAnonymousNodes(\"anonid\", \"pause\");\n break;\n case \"listView_progressDownload\":\n // There is another binding we will have to skip\n nodeCollector.queryAnonymousNodes(\"anonid\", \"install-status\");\n nodeCollector.root = nodeCollector.nodes[0];\n nodeCollector.queryAnonymousNodes(\"anonid\", \"progress\");\n break;\n // Search\n // Bug 599775 - Controller needs to handle radio groups correctly\n // Means for now we have to use the radio buttons\n case \"search_filterRadioButtons\":\n nodeCollector.queryNodes(\".search-filter-radio\").filterByDOMProperty(subtype, value);\n break;\n case \"search_filterRadioGroup\":\n nodeCollector.queryNodes(\"#search-filter-radiogroup\");\n break;\n case \"search_textbox\":\n nodeCollector.queryNodes(\"#header-search\");\n break;\n case \"search_throbber\":\n nodeCollector.queryNodes(\"#header-searching\");\n break;\n // Utils\n case \"utilsButton\":\n nodeCollector.queryNodes(\"#header-utils-btn\");\n break;\n case \"utilsButton_menu\":\n nodeCollector.queryNodes(\"#utils-menu\");\n break;\n case \"utilsButton_menuItem\":\n nodeCollector.queryNodes(value);\n break;\n // Views\n case \"viewDeck\":\n nodeCollector.queryNodes(\"#view-port\");\n break;\n case \"views\":\n nodeCollector.queryNodes(\".view-pane\").filterByDOMProperty(subtype, value);\n break;\n default:\n throw new Error(arguments.callee.name + \": Unknown element type - \" + spec.type);\n }\n\n return nodeCollector.elements;\n }", "private Block generateTree(List<HeaderBlock> headers, int start, int depth, boolean numbered,\n String documentReference)\n {\n Block tocBlock = null;\n\n int currentLevel = start - 1;\n Block currentBlock = null;\n for (HeaderBlock headerBlock : headers) {\n int headerLevel = headerBlock.getLevel().getAsInt();\n\n if (headerLevel >= start && headerLevel <= depth) {\n // Move to next header in toc tree\n\n if (currentLevel < headerLevel) {\n while (currentLevel < headerLevel) {\n if (currentBlock instanceof ListBLock) {\n currentBlock = addItemBlock(currentBlock, null, documentReference);\n }\n\n currentBlock = createChildListBlock(numbered, currentBlock);\n ++currentLevel;\n }\n } else {\n while (currentLevel > headerLevel) {\n currentBlock = currentBlock.getParent().getParent();\n --currentLevel;\n }\n currentBlock = currentBlock.getParent();\n }\n\n currentBlock = addItemBlock(currentBlock, headerBlock, documentReference);\n }\n }\n\n if (currentBlock != null) {\n tocBlock = currentBlock.getRoot();\n }\n\n return tocBlock;\n }", "public void initWorklist(boolean addAll) {\n if (addAll) {\n Block last = null;\n for (Block b = blocklistHead; b != null; b = b.nextBlock) {\n b.nextInWorklist = b.nextBlock;\n last = b;\n }\n worklistHead = blocklistHead;\n worklistTail = last;\n } else {\n Block largest = blocklistHead;\n if (largest == null) {\n return;\n }\n int largestSize = largest.size();\n for (Block b = largest.nextBlock; b != null; b = b.nextBlock) {\n int size = b.size();\n if (size > largestSize) {\n addToWorklist(largest);\n largest = b;\n largestSize = size;\n } else {\n addToWorklist(b);\n }\n }\n }\n }", "@Override\n public Map<String, Object> generate(String encoding, String locale, String productExt) {\n\n Map<String, Object> returnMap;\n try {\n //keeps list of commands\n final ArrayList<String> commandList = new ArrayList<String>();\n\n //Get java home\n final String javaHome = AccessController.doPrivileged(\n new PrivilegedAction<String>() {\n @Override\n public String run() {\n return System.getProperty(\"java.home\");\n }\n });\n\n //Build path to ws-featurelist.jar\n final String featureListGenPath = getWsLocationAdmin().resolveString(WsLocationConstants.SYMBOL_INSTALL_DIR + \"bin/tools/ws-featurelist.jar\");\n\n //First command is to invoke the featureList jar\n commandList.add(javaHome + \"/bin/java\");\n commandList.add(\"-jar\");\n commandList.add(featureListGenPath);\n\n //Encoding\n if (encoding != null && !encoding.trim().isEmpty()) {\n commandList.add(\"--encoding=\" + encoding);\n }\n\n //Locale\n if (locale != null && !locale.trim().isEmpty()) {\n commandList.add(\"--locale=\" + locale);\n }\n\n //Product Extension\n if (productExt != null && !productExt.trim().isEmpty()) {\n commandList.add(\"--productExtension=\" + productExt);\n }\n\n //Create empty file that will be populated\n final File targetDirectory = new File(getWsLocationAdmin().resolveString(WsLocationConstants.SYMBOL_SERVER_STATE_DIR));\n if (!FileUtils.fileExists(targetDirectory)) {\n FileUtils.fileMkDirs(targetDirectory);\n }\n\n final File generatedFile = File.createTempFile(FEAT_LIST_PREFIX, FEAT_LIST_SUFFIX, targetDirectory);\n\n //Filename goes last in the script's invoke\n commandList.add(generatedFile.getAbsolutePath());\n\n //Debug command list\n if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) {\n StringBuilder sb = new StringBuilder();\n for (String command : commandList) {\n sb.append(command);\n sb.append(\"\\n\");\n }\n Tr.debug(tc, \"List of commands:\\n\" + sb.toString());\n }\n\n //Run the command\n ProcessBuilder builder = new ProcessBuilder(commandList);\n builder.redirectErrorStream(true); //merge error and output together\n\n Process featureListGenProc = builder.start();\n String output = getOutput(featureListGenProc);\n int exitVal = featureListGenProc.waitFor();\n\n if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) {\n Tr.debug(tc, \"ExitVal: \" + exitVal);\n }\n\n if (exitVal != 0) {\n return retError(output);\n }\n\n returnMap = new HashMap<String, Object>();\n returnMap.put(FeatureListMBean.KEY_FILE_PATH, generatedFile.getAbsolutePath());\n returnMap.put(FeatureListMBean.KEY_OUTPUT, output);\n returnMap.put(FeatureListMBean.KEY_RETURN_CODE, FeatureListMBean.RETURN_CODE_OK);\n\n } catch (IOException ioe) {\n return retError(ioe.getMessage());\n } catch (InterruptedException ie) {\n return retError(ie.getMessage());\n }\n\n return returnMap;\n }", "function VirtualizeContentsView_createChunkWithBlocks(groups, count, blockSize, chunkSize) {\n var that = this;\n this._listView._writeProfilerMark(\"createChunk,StartTM\");\n\n function addToGroup(itemsContainer, toAdd) {\n var indexOfNextGroupItem;\n var lastExistingBlock = itemsContainer.itemsBlocks.length ? itemsContainer.itemsBlocks[itemsContainer.itemsBlocks.length - 1] : null;\n\n toAdd = Math.min(toAdd, chunkSize);\n\n // 1) Add missing containers to the latest itemsblock if it was only partially filled during the previous pass.\n if (lastExistingBlock && lastExistingBlock.items.length < blockSize) {\n var emptySpotsToFill = Math.min(toAdd, blockSize - lastExistingBlock.items.length),\n sizeOfOldLastBlock = lastExistingBlock.items.length,\n\n indexOfNextGroupItem = (itemsContainer.itemsBlocks.length - 1) * blockSize + sizeOfOldLastBlock;\n var containersMarkup = _Helpers._stripedContainers(emptySpotsToFill, indexOfNextGroupItem);\n\n _SafeHtml.insertAdjacentHTMLUnsafe(lastExistingBlock.element, \"beforeend\", containersMarkup);\n children = lastExistingBlock.element.children;\n\n for (var j = 0; j < emptySpotsToFill; j++) {\n var child = children[sizeOfOldLastBlock + j];\n lastExistingBlock.items.push(child);\n that.containers.push(child);\n }\n\n toAdd -= emptySpotsToFill;\n }\n indexOfNextGroupItem = itemsContainer.itemsBlocks.length * blockSize;\n\n // 2) Generate as many full itemblocks of containers as we can.\n var newBlocksCount = Math.floor(toAdd / blockSize),\n markup = \"\",\n firstBlockFirstItemIndex = indexOfNextGroupItem,\n secondBlockFirstItemIndex = indexOfNextGroupItem + blockSize;\n\n if (newBlocksCount > 0) {\n var pairOfItemBlocks = [\n // Use pairs to ensure that the container striping pattern is maintained regardless if blockSize is even or odd.\n \"<div class='win-itemsblock'>\" + _Helpers._stripedContainers(blockSize, firstBlockFirstItemIndex) + \"</div>\",\n \"<div class='win-itemsblock'>\" + _Helpers._stripedContainers(blockSize, secondBlockFirstItemIndex) + \"</div>\"\n ];\n markup = _Helpers._repeat(pairOfItemBlocks, newBlocksCount);\n indexOfNextGroupItem += (newBlocksCount * blockSize);\n }\n\n // 3) Generate and partially fill, one last itemblock if there are any remaining containers to add.\n var sizeOfNewLastBlock = toAdd % blockSize;\n if (sizeOfNewLastBlock > 0) {\n markup += \"<div class='win-itemsblock'>\" + _Helpers._stripedContainers(sizeOfNewLastBlock, indexOfNextGroupItem) + \"</div>\";\n indexOfNextGroupItem += sizeOfNewLastBlock;\n newBlocksCount++;\n }\n\n var blocksTemp = _Global.document.createElement(\"div\");\n _SafeHtml.setInnerHTMLUnsafe(blocksTemp, markup);\n var children = blocksTemp.children;\n\n for (var i = 0; i < newBlocksCount; i++) {\n var block = children[i],\n blockNode = {\n element: block,\n items: _Helpers._nodeListToArray(block.children)\n };\n itemsContainer.itemsBlocks.push(blockNode);\n for (var n = 0; n < blockNode.items.length; n++) {\n that.containers.push(blockNode.items[n]);\n }\n }\n }\n\n function newGroup(group) {\n var node = {\n header: that._listView._groupDataSource ? that._createHeaderContainer() : null,\n itemsContainer: {\n element: that._createItemsContainer(),\n itemsBlocks: []\n }\n };\n\n that.tree.push(node);\n that.keyToGroupIndex[group.key] = that.tree.length - 1;\n\n addToGroup(node.itemsContainer, group.size);\n }\n\n if (this.tree.length && this.tree.length <= groups.length) {\n var lastContainer = this.tree[this.tree.length - 1].itemsContainer,\n finalSize = groups[this.tree.length - 1].size,\n currentSize = 0;\n\n if (lastContainer.itemsBlocks.length) {\n currentSize = (lastContainer.itemsBlocks.length - 1) * blockSize + lastContainer.itemsBlocks[lastContainer.itemsBlocks.length - 1].items.length;\n }\n\n if (currentSize < finalSize) {\n addToGroup(lastContainer, finalSize - currentSize);\n this._listView._writeProfilerMark(\"createChunk,StopTM\");\n return;\n }\n }\n\n if (this.tree.length < groups.length) {\n newGroup(groups[this.tree.length]);\n }\n\n this._listView._writeProfilerMark(\"createChunk,StopTM\");\n }", "private void renderListItemContents(final StringBuilder builder) {\n final Multimedia multimedia = multimediaRenderer.getGedObject();\n builder.append(\"<span class=\\\"label\\\">\");\n builder.append(GedRenderer.escapeString(multimedia));\n builder.append(\":</span> \");\n\n final MultimediaVisitor visitor = new MultimediaVisitor();\n multimedia.accept(visitor);\n if (visitor.isImage()) {\n builder.append(visitor.getTitle());\n builder.append(\"<br/>\\n<a href=\\\"\");\n builder.append(visitor.getFilePath());\n builder.append(\"\\\"><img height=\\\"300px\\\" src=\\\"\");\n builder.append(visitor.getFilePath());\n builder.append(\"\\\" title=\\\"\");\n builder.append(visitor.getTitle());\n builder.append(\"\\\"/></a>\");\n } else {\n builder.append(\"<a href=\\\"\");\n builder.append(visitor.getFilePath());\n builder.append(\"\\\">\");\n builder.append(visitor.getTitle());\n builder.append(\"</a>\");\n }\n }", "private Set<Addon> getAddonsToScan()\n {\n AddonFilter filter = new AddonFilter()\n {\n @Override\n public boolean accept(Addon addon)\n {\n // make sure to include ourselves as well (even though we don't technically depend on ourselves)\n return addonDependsOnReporting(addon) || addon.equals(CssJsResourceRenderingRuleProvider.this.addon);\n }\n };\n\n return furnace.getAddonRegistry().getAddons(filter);\n }" ]
[ 0.6949023604393005, 0.6635684370994568, 0.6583225131034851, 0.6570259928703308, 0.6558802723884583, 0.6533246636390686, 0.6520910263061523, 0.6436138153076172, 0.6408869028091431, 0.6392215490341187, 0.6388567686080933, 0.6378030180931091 ]
Generate the plugin blocklists. <pluginItem blockID="p422"> <match name="filename" exp="JavaAppletPlugin\\.plugin"/> <versionRange minVersion="Java 7 Update 16" maxVersion="Java 7 Update 24" severity="0" vulnerabilitystatus="1"> <targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}"> <versionRange minVersion="17.0" maxVersion="*"/> </targetApplication> </versionRange> </pluginItem>
def write_plugin_items(xml_tree, records, app_id, api_ver=3, app_ver=None): """Generate the plugin blocklists. <pluginItem blockID="p422"> <match name="filename" exp="JavaAppletPlugin\\.plugin"/> <versionRange minVersion="Java 7 Update 16" maxVersion="Java 7 Update 24" severity="0" vulnerabilitystatus="1"> <targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}"> <versionRange minVersion="17.0" maxVersion="*"/> </targetApplication> </versionRange> </pluginItem> """ if not records: return pluginItems = etree.SubElement(xml_tree, 'pluginItems') for item in records: for versionRange in item.get('versionRange', []): if not versionRange.get('targetApplication'): add_plugin_item(pluginItems, item, versionRange, app_id=app_id, api_ver=api_ver, app_ver=app_ver) else: targetApplication = get_related_targetApplication(versionRange, app_id, app_ver) if targetApplication is not None: add_plugin_item(pluginItems, item, versionRange, targetApplication, app_id=app_id, api_ver=api_ver, app_ver=app_ver)
[ "def write_addons_items(xml_tree, records, app_id, api_ver=3, app_ver=None):\n \"\"\"Generate the addons blocklists.\n\n <emItem blockID=\"i372\" id=\"5nc3QHFgcb@r06Ws9gvNNVRfH.com\">\n <versionRange minVersion=\"0\" maxVersion=\"*\" severity=\"3\">\n <targetApplication id=\"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}\">\n <versionRange minVersion=\"39.0a1\" maxVersion=\"*\"/>\n </targetApplication>\n </versionRange>\n <prefs>\n <pref>browser.startup.homepage</pref>\n <pref>browser.search.defaultenginename</pref>\n </prefs>\n </emItem>\n \"\"\"\n if not records:\n return\n\n emItems = etree.SubElement(xml_tree, 'emItems')\n groupby = {}\n for item in records:\n if is_related_to(item, app_id, app_ver):\n if item['guid'] in groupby:\n emItem = groupby[item['guid']]\n # When creating new records from the Kinto Admin we don't have proper blockID.\n if 'blockID' in item:\n # Remove the first caracter which is the letter i to\n # compare the numeric value i45 < i356.\n current_blockID = int(item['blockID'][1:])\n previous_blockID = int(emItem.attrib['blockID'][1:])\n # Group by and keep the biggest blockID in the XML file.\n if current_blockID > previous_blockID:\n emItem.attrib['blockID'] = item['blockID']\n else:\n # If the latest entry does not have any blockID attribute, its\n # ID should be used. (the list of records is sorted by ascending\n # last_modified).\n # See https://bugzilla.mozilla.org/show_bug.cgi?id=1473194\n emItem.attrib['blockID'] = item['id']\n else:\n emItem = etree.SubElement(emItems, 'emItem',\n blockID=item.get('blockID', item['id']))\n groupby[item['guid']] = emItem\n prefs = etree.SubElement(emItem, 'prefs')\n for p in item['prefs']:\n pref = etree.SubElement(prefs, 'pref')\n pref.text = p\n\n # Set the add-on ID\n emItem.set('id', item['guid'])\n\n for field in ['name', 'os']:\n if field in item:\n emItem.set(field, item[field])\n\n build_version_range(emItem, item, app_id)", "private List<String> createPluginStrings() {\n final List<String> formatList = new ArrayList<String>();\n for (final Plugin plugin : overriddenParameters.getPlugins()) {\n formatList.add(plugin.asPluginString(fileCounter));\n }\n return formatList;\n }", "def plugins(self, plugin_type='all', sort='id', direction='asc',\n size=1000, offset=0, all=True, loops=0, since=None, **filterset):\n \"\"\"plugins\n Returns a list of of the plugins and their associated families. For\n simplicity purposes, the plugin family names will be injected into the\n plugin data so that only 1 list is returned back with all of the\n information.\n \"\"\"\n plugins = []\n\n # First we need to generate the basic payload that we will be augmenting\n # to build the\n payload = {\n 'size': size,\n 'offset': offset,\n 'type': plugin_type,\n 'sortField': sort,\n 'sortDirection': direction.upper(),\n }\n\n # If there was a filter given, we will need to populate that.\n if len(filterset) > 0:\n fname = list(filterset.keys())[0]\n if fname in self._xrefs:\n fname = 'xrefs:%s' % fname.replace('_', '-')\n payload['filterField'] = fname\n payload['filterString'] = filterset[list(filterset.keys())[0]]\n\n # We also need to check if there was a datetime object sent to us and\n # parse that down if given.\n if since is not None and isinstance(since, date):\n payload['since'] = calendar.timegm(since.utctimetuple())\n\n # And now we run through the loop needed to pull all of the data. This\n # may take some time even though we are pulling large data sets. At the\n # time of development of this module, there were over 55k active plugins\n # and over 7k passive ones.\n while all or loops > 0:\n # First things first, we need to query the data.\n data = self.raw_query('plugin', 'init', data=payload)\n if not data:\n return []\n\n # This no longer works in 4.4 as the family name is already\n # referenced. Will re-activate this code when I can get a SC4.2\n # Instance up and running to test...\n # ---\n # Next we convert the family dictionary list into a flat dictionary.\n #fams = {}\n #for famitem in data['families']:\n # fams[famitem['id']] = famitem['name']\n\n # Then we parse thtrough the data set, adding in the family name\n # into the plugin definition before adding it into the plugins list.\n for plugin in data['plugins']:\n # plugin['familyName'] = fams[plugin['familyID']]\n plugins.append(plugin)\n # ---\n\n # Next its time to increment the offset so that we get a new data\n # set. We will also check here to see if the length really is the\n # same as whats specified in the size variable. If it isnt, then\n # we have reached the end of the dataset and might as well set\n # the continue variable to False.\n if len(data['plugins']) < size:\n all = False\n loops = 0\n else:\n loops -= 1\n payload['offset'] += len(data['plugins'])\n return plugins", "public static void generate(Configuration configuration) {\n PackageListWriter packgen;\n try {\n packgen = new PackageListWriter(configuration);\n packgen.generatePackageListFile(configuration.root);\n packgen.close();\n } catch (IOException exc) {\n configuration.message.error(\"doclet.exception_encountered\",\n exc.toString(), DocPaths.PACKAGE_LIST);\n throw new DocletAbortException(exc);\n }\n }", "private void initListBlock(final Block root)\n {\n Line line = root.lines;\n line = line.next;\n while (line != null)\n {\n final LineType t = line.getLineType(this.config);\n if ((t == LineType.OLIST || t == LineType.ULIST)\n || (!line.isEmpty && (line.prevEmpty && line.leading == 0 && !(t == LineType.OLIST || t == LineType.ULIST))))\n {\n root.split(line.previous).type = BlockType.LIST_ITEM;\n }\n line = line.next;\n }\n root.split(root.lineTail).type = BlockType.LIST_ITEM;\n }", "def listBlocks(self, dataset=\"\", block_name=\"\", data_tier_name=\"\", origin_site_name=\"\",\n logical_file_name=\"\", run_num=-1, min_cdate=0, max_cdate=0,\n min_ldate=0, max_ldate=0, cdate=0, ldate=0, open_for_writing=-1, detail=False):\n \"\"\"\n dataset, block_name, data_tier_name or logical_file_name must be passed.\n \"\"\"\n if (not dataset) or re.search(\"['%','*']\", dataset):\n if (not block_name) or re.search(\"['%','*']\", block_name):\n if (not logical_file_name) or re.search(\"['%','*']\", logical_file_name):\n if not data_tier_name or re.search(\"['%','*']\", data_tier_name):\n msg = \"DBSBlock/listBlock. You must specify at least one parameter(dataset, block_name,\\\n\t\t\t \tdata_tier_name, logical_file_name) with listBlocks api\"\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)\n\n if data_tier_name:\n if not (min_cdate and max_cdate) or (max_cdate-min_cdate)>32*24*3600:\n msg = \"min_cdate and max_cdate are mandatory parameters. If data_tier_name parameter is used \\\n the maximal time range allowed is 31 days\"\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)\n if detail:\n msg = \"DBSBlock/listBlock. Detail parameter not allowed togther with data_tier_name\"\n dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)\n\n with self.dbi.connection() as conn:\n dao = (self.blockbrieflist, self.blocklist)[detail]\n for item in dao.execute(conn, dataset, block_name, data_tier_name, origin_site_name, logical_file_name, run_num,\n min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate):\n yield item", "function listBlocks(plugins) {\n return plugins\n .reverse()\n .reduce(function(result, plugin) {\n var blocks = plugin.getBlocks();\n return result.merge(blocks);\n }, Immutable.Map());\n}", "@CheckForNull\n /*package*/ JSONArray getPlatformPluginList() {\n Jenkins.get().checkPermission(Jenkins.ADMINISTER);\n JSONArray initialPluginList = null;\n updateSiteList: for (UpdateSite updateSite : Jenkins.get().getUpdateCenter().getSiteList()) {\n String updateCenterJsonUrl = updateSite.getUrl();\n String suggestedPluginUrl = updateCenterJsonUrl.replace(\"/update-center.json\", \"/platform-plugins.json\");\n try {\n URLConnection connection = ProxyConfiguration.open(new URL(suggestedPluginUrl));\n \n try {\n if(connection instanceof HttpURLConnection) {\n int responseCode = ((HttpURLConnection)connection).getResponseCode();\n if(HttpURLConnection.HTTP_OK != responseCode) {\n throw new HttpRetryException(\"Invalid response code (\" + responseCode + \") from URL: \" + suggestedPluginUrl, responseCode);\n }\n }\n \n String initialPluginJson = IOUtils.toString(connection.getInputStream(), \"utf-8\");\n initialPluginList = JSONArray.fromObject(initialPluginJson);\n break updateSiteList;\n } catch(Exception e) {\n // not found or otherwise unavailable\n LOGGER.log(Level.FINE, e.getMessage(), e);\n continue updateSiteList;\n }\n } catch(Exception e) {\n LOGGER.log(Level.FINE, e.getMessage(), e);\n }\n }\n if (initialPluginList == null) {\n // fall back to local file\n try {\n ClassLoader cl = getClass().getClassLoader();\n URL localPluginData = cl.getResource(\"jenkins/install/platform-plugins.json\");\n String initialPluginJson = IOUtils.toString(localPluginData.openStream(), \"utf-8\");\n initialPluginList = JSONArray.fromObject(initialPluginJson);\n } catch (Exception e) {\n LOGGER.log(Level.SEVERE, e.getMessage(), e);\n }\n }\n return initialPluginList;\n }", "@SuppressWarnings(\"fallthrough\")\n protected List<DCTree> blockContent() {\n ListBuffer<DCTree> trees = new ListBuffer<>();\n textStart = -1;\n\n loop:\n while (bp < buflen) {\n switch (ch) {\n case '\\n': case '\\r': case '\\f':\n newline = true;\n // fallthrough\n\n case ' ': case '\\t':\n nextChar();\n break;\n\n case '&':\n entity(trees);\n break;\n\n case '<':\n newline = false;\n addPendingText(trees, bp - 1);\n trees.add(html());\n if (textStart == -1) {\n textStart = bp;\n lastNonWhite = -1;\n }\n break;\n\n case '>':\n newline = false;\n addPendingText(trees, bp - 1);\n trees.add(m.at(bp).newErroneousTree(newString(bp, bp + 1), diagSource, \"dc.bad.gt\"));\n nextChar();\n if (textStart == -1) {\n textStart = bp;\n lastNonWhite = -1;\n }\n break;\n\n case '{':\n inlineTag(trees);\n break;\n\n case '@':\n if (newline) {\n addPendingText(trees, lastNonWhite);\n break loop;\n }\n // fallthrough\n\n default:\n newline = false;\n if (textStart == -1)\n textStart = bp;\n lastNonWhite = bp;\n nextChar();\n }\n }\n\n if (lastNonWhite != -1)\n addPendingText(trees, lastNonWhite);\n\n return trees.toList();\n }", "public void saveBlacklist(ListConfigurationBean configBean) {\n\n if (m_dialogWindow != null) {\n m_dialogWindow.close();\n m_dialogWindow = null;\n }\n CmsObject cms = A_CmsUI.getCmsObject();\n\n try {\n m_lockAction = CmsLockUtil.ensureLock(cms, m_currentResource);\n } catch (CmsException e) {\n CmsErrorDialog.showErrorDialog(e);\n return;\n }\n\n try {\n CmsFile configFile = cms.readFile(m_currentResource);\n CmsXmlContent content = CmsXmlContentFactory.unmarshal(cms, configFile);\n // list configurations are single locale contents\n Locale locale = CmsLocaleManager.MASTER_LOCALE;\n int count = 0;\n while (content.hasValue(N_BLACKLIST, locale)) {\n content.removeValue(N_BLACKLIST, locale, 0);\n }\n for (CmsUUID hiddenId : configBean.getBlacklist()) {\n CmsXmlVfsFileValue contentVal;\n contentVal = (CmsXmlVfsFileValue)content.addValue(cms, N_BLACKLIST, locale, count);\n contentVal.setIdValue(cms, hiddenId);\n count++;\n }\n configFile.setContents(content.marshal());\n cms.writeFile(configFile);\n if (m_lockAction.getChange().equals(LockChange.locked)) {\n CmsLockUtil.tryUnlock(cms, configFile);\n }\n } catch (CmsException e) {\n e.printStackTrace();\n }\n m_currentConfig = configBean;\n }", "private ListBLock createChildListBlock(boolean numbered, Block parentBlock)\n {\n ListBLock childListBlock =\n numbered ? new NumberedListBlock(Collections.emptyList()) : new BulletedListBlock(Collections.emptyList());\n\n if (parentBlock != null) {\n parentBlock.addChild(childListBlock);\n }\n\n return childListBlock;\n }", "private int parseListItem (int start) {\n start = skipSpacesToNewline(start, wikiLength); // skip spaces\n int end=parseItem(start, null, ContextType.LIST_ITEM);\n if ((listLevels[listLevel]=='>' || listLevels[listLevel]==':') &&\n wikiText.substring(start, end).trim().length()==0) { // empty line within blockquote/div\n if (!blockquoteBR) {\n sb.append(\"<br/><br/>\");\n blockquoteBR=true;\n }\n }\n else {\n blockquoteBR=false;\n }\n return end;\n }" ]
[ 0.6678083539009094, 0.6628898978233337, 0.660322904586792, 0.659954845905304, 0.6570162773132324, 0.6534080505371094, 0.6514649391174316, 0.6495108008384705, 0.6472249627113342, 0.6460555791854858, 0.6452942490577698, 0.6452105641365051 ]
Generate the gfxBlacklistEntry. <gfxBlacklistEntry blockID="g35"> <os>WINNT 6.1</os> <vendor>0x10de</vendor> <devices> <device>0x0a6c</device> </devices> <feature>DIRECT2D</feature> <featureStatus>BLOCKED_DRIVER_VERSION</featureStatus> <driverVersion>8.17.12.5896</driverVersion> <driverVersionComparator>LESS_THAN_OR_EQUAL</driverVersionComparator> <versionRange minVersion="3.2" maxVersion="3.4" /> </gfxBlacklistEntry>
def write_gfx_items(xml_tree, records, app_id, api_ver=3): """Generate the gfxBlacklistEntry. <gfxBlacklistEntry blockID="g35"> <os>WINNT 6.1</os> <vendor>0x10de</vendor> <devices> <device>0x0a6c</device> </devices> <feature>DIRECT2D</feature> <featureStatus>BLOCKED_DRIVER_VERSION</featureStatus> <driverVersion>8.17.12.5896</driverVersion> <driverVersionComparator>LESS_THAN_OR_EQUAL</driverVersionComparator> <versionRange minVersion="3.2" maxVersion="3.4" /> </gfxBlacklistEntry> """ if not records: return gfxItems = etree.SubElement(xml_tree, 'gfxItems') for item in records: is_record_related = ('guid' not in item or item['guid'] == app_id) if is_record_related: entry = etree.SubElement(gfxItems, 'gfxBlacklistEntry', blockID=item.get('blockID', item['id'])) fields = ['os', 'vendor', 'feature', 'featureStatus', 'driverVersion', 'driverVersionComparator'] for field in fields: if field in item: node = etree.SubElement(entry, field) node.text = item[field] # Devices if item['devices']: devices = etree.SubElement(entry, 'devices') for d in item['devices']: device = etree.SubElement(devices, 'device') device.text = d if 'versionRange' in item: version = item['versionRange'] versionRange = etree.SubElement(entry, 'versionRange') for field in ['minVersion', 'maxVersion']: value = version.get(field) if value: versionRange.set(field, str(value))
[ "public Blacklist generateBlacklist(Model model)\n\t{\n\t\tChemicalNameNormalizer normalizer = new ChemicalNameNormalizer(model);\n\t\tSIFSearcher searcher = new SIFSearcher(new Fetcher(normalizer), SIFEnum.USED_TO_PRODUCE);\n\n\t\tSet<SIFInteraction> sifs = searcher.searchSIF(model);\n\n\t\t// read interactions into maps\n\n\t\tMap<String, Set<String>> upstrMap = new HashMap<String, Set<String>>();\n\t\tMap<String, Set<String>> dwstrMap = new HashMap<String, Set<String>>();\n\t\tMap<String, Set<String>> neighMap = new HashMap<String, Set<String>>();\n\n\t\tfor (SIFInteraction sif : sifs)\n\t\t{\n\t\t\tString source = sif.sourceID;\n\t\t\tString target = sif.targetID;\n\n\t\t\tif (!neighMap.containsKey(source)) neighMap.put(source, new HashSet<String>());\n\t\t\tif (!neighMap.containsKey(target)) neighMap.put(target, new HashSet<String>());\n\t\t\tif (!dwstrMap.containsKey(source)) dwstrMap.put(source, new HashSet<String>());\n\t\t\tif (!dwstrMap.containsKey(target)) dwstrMap.put(target, new HashSet<String>());\n\t\t\tif (!upstrMap.containsKey(source)) upstrMap.put(source, new HashSet<String>());\n\t\t\tif (!upstrMap.containsKey(target)) upstrMap.put(target, new HashSet<String>());\n\n\t\t\tneighMap.get(source).add(target);\n\t\t\tneighMap.get(target).add(source);\n\t\t\tdwstrMap.get(source).add(target);\n\t\t\tupstrMap.get(target).add(source);\n\t\t}\n\n\t\t// remove intersection of upstream and downstream\n\n\t\tfor (String name : neighMap.keySet())\n\t\t{\n\t\t\tif (!upstrMap.containsKey(name) || !dwstrMap.containsKey(name)) continue;\n\n\t\t\tSet<String> upstr = upstrMap.get(name);\n\t\t\tSet<String> dwstr = dwstrMap.get(name);\n\n\t\t\tSet<String> temp = new HashSet<String>(upstr);\n\t\t\tupstr.removeAll(dwstr);\n\t\t\tdwstr.removeAll(temp);\n\t\t}\n\n\n\t\tBlacklist blacklist = new Blacklist();\n\n\t\t// populate the blacklist\n\n\t\tfor (SmallMoleculeReference smr : model.getObjects(SmallMoleculeReference.class))\n\t\t{\n\t\t\tString name = normalizer.getName(smr);\n\n\t\t\tint neighSize = neighMap.containsKey(name) ? neighMap.get(name).size() : 0;\n\t\t\tint upstrOnly = upstrMap.containsKey(name) ? upstrMap.get(name).size() : 0;\n\t\t\tint dwstrOnly = dwstrMap.containsKey(name) ? dwstrMap.get(name).size() : 0;\n\n//\t\t\tif (neighSize > 30) System.out.println(name + \"\\t\" + neighSize + \"\\t\" + upstrOnly + \"\\t\" + dwstrOnly);\n\n\t\t\tif (decider.isUbique(neighSize, upstrOnly, dwstrOnly))\n\t\t\t{\n\t\t\t\tblacklist.addEntry(smr.getUri(),\n\t\t\t\t\tdecider.getScore(neighSize, upstrOnly, dwstrOnly),\n\t\t\t\t\tdecider.getContext(neighSize, upstrOnly, dwstrOnly));\n\t\t\t}\n\t\t}\n\n\t\treturn blacklist;\n\t}", "public Blacklist generateBlacklist(Model model) throws IOException\n\t{\n\t\tMap<String, String> nameMapping = readNameMapping();\n\t\tif (nameMapping == null)\n\t\t{\n\t\t\tgenerateNameMappingFileToCurate(model);\n\t\t\tthrow new RuntimeException(\"Small molecule name mapping file not found. Generated a \" +\n\t\t\t\t\"mapping file, but it needs manual curation before use.\\nPlease go over some top \" +\n\t\t\t\t\"portion of this file and delete invalid lines and any uncurated bottom part.\\n\" +\n\t\t\t\t\"After that, you can rerun this method.\");\n\t\t}\n\n\t\tSIFSearcher searcher = new SIFSearcher(new Fetcher(nameMapping), SIFEnum.USED_TO_PRODUCE);\n\n\t\tSet<SIFInteraction> sifs = searcher.searchSIF(model);\n\n\t\t// read interactions into maps\n\n\t\tMap<String, Set<String>> upstrMap = new HashMap<String, Set<String>>();\n\t\tMap<String, Set<String>> dwstrMap = new HashMap<String, Set<String>>();\n\t\tfinal Map<String, Set<String>> neighMap = new HashMap<String, Set<String>>();\n\n\t\tfor (SIFInteraction sif : sifs)\n\t\t{\n\t\t\tString source = sif.sourceID;\n\t\t\tString target = sif.targetID;\n\n\t\t\tif (!neighMap.containsKey(source)) neighMap.put(source, new HashSet<String>());\n\t\t\tif (!neighMap.containsKey(target)) neighMap.put(target, new HashSet<String>());\n\t\t\tif (!dwstrMap.containsKey(source)) dwstrMap.put(source, new HashSet<String>());\n\t\t\tif (!dwstrMap.containsKey(target)) dwstrMap.put(target, new HashSet<String>());\n\t\t\tif (!upstrMap.containsKey(source)) upstrMap.put(source, new HashSet<String>());\n\t\t\tif (!upstrMap.containsKey(target)) upstrMap.put(target, new HashSet<String>());\n\n\t\t\tneighMap.get(source).add(target);\n\t\t\tneighMap.get(target).add(source);\n\t\t\tdwstrMap.get(source).add(target);\n\t\t\tupstrMap.get(target).add(source);\n\t\t}\n\n\t\t// remove intersection of upstream and downstream\n\n\t\tfor (String name : neighMap.keySet())\n\t\t{\n\t\t\tif (!upstrMap.containsKey(name) || !dwstrMap.containsKey(name)) continue;\n\n\t\t\tSet<String> upstr = upstrMap.get(name);\n\t\t\tSet<String> dwstr = dwstrMap.get(name);\n\n\t\t\tSet<String> temp = new HashSet<String>(upstr);\n\t\t\tupstr.removeAll(dwstr);\n\t\t\tdwstr.removeAll(temp);\n\t\t}\n\n//\t\twriteTheGuideRankingToTuneTheDecider(model, nameMapping, upstrMap, dwstrMap, neighMap);\n//\t\tif (true) return null;\n\n\t\tSet<String> white = readWhitelist();\n\n\t\tBlacklist blacklist = new Blacklist();\n\n\t\t// populate the blacklist\n\n\t\tFetcher nameFetcher = new Fetcher(nameMapping);\n\t\tfor (SmallMoleculeReference smr : model.getObjects(SmallMoleculeReference.class))\n\t\t{\n\t\t\tSet<String> names = nameFetcher.fetchID(smr);\n\t\t\tif (names.isEmpty()) continue;\n\t\t\tString name = names.iterator().next();\n\n\t\t\tif (white != null && white.contains(name)) continue;\n\n\t\t\tint neighSize = neighMap.containsKey(name) ? neighMap.get(name).size() : 0;\n\t\t\tint upstrOnly = upstrMap.containsKey(name) ? upstrMap.get(name).size() : 0;\n\t\t\tint dwstrOnly = dwstrMap.containsKey(name) ? dwstrMap.get(name).size() : 0;\n\n\t\t\tif (decider.isUbique(neighSize, upstrOnly, dwstrOnly))\n\t\t\t{\n\t\t\t\tblacklist.addEntry(smr.getUri(),\n\t\t\t\t\tdecider.getScore(neighSize, upstrOnly, dwstrOnly),\n\t\t\t\t\tdecider.getContext(neighSize, upstrOnly, dwstrOnly));\n\t\t\t}\n\t\t}\n\n\t\treturn blacklist;\n\t}", "public static void enterSafeBlock() \r\n\t{\r\n\t\tif (inSafe) {\r\n\t\t\treturn;\r\n\t\t}\r\n\t\t\r\n\t\tRenderer.get().flush();\r\n\t\tlastUsed = TextureImpl.getLastBind();\r\n\t\tTextureImpl.bindNone();\r\n\t\tGL11.glPushAttrib(GL11.GL_ALL_ATTRIB_BITS);\r\n\t\tGL11.glPushClientAttrib(GL11.GL_ALL_CLIENT_ATTRIB_BITS);\r\n\t\tGL11.glMatrixMode(GL11.GL_MODELVIEW);\r\n\t\tGL11.glPushMatrix();\r\n\t\tGL11.glMatrixMode(GL11.GL_PROJECTION);\r\n\t\tGL11.glPushMatrix();\r\n\t\tGL11.glMatrixMode(GL11.GL_MODELVIEW);\r\n\t\t\r\n\t\tinSafe = true;\r\n\t}", "def create_blacklist_entry(self, \n value, \n reason=\"unwanted\", \n context=\"allFields\", \n exact_match = False, \n enabled = True\n ):\n \"\"\"Creates a new blacklist entry.\n \n Keyword arguments:\n value -- The string value to blacklist.\n reason -- The reason for why the value is blacklisted. Can be: \"spam\", \"unwanted\".\n context -- The context where the entry's value may match. Can be:\n allFields -- Match can be made in any field.\n author -- Match can be made in any author related field.\n authorName -- Match can be made in the author name of the content.\n authorMail -- Match can be made in the author email of the content.\n authorIp -- Match can be made in the author ip address of the content.\n authorId -- Match can be made in the author id of the content.\n links -- Match can be made in any of the links of the content.\n postTitle -- Match can be made in the post title of the content.\n post -- Match can be made in the post title or the post body of the content.\n exact_match -- Whether there has to be an exact word match. Can be: \"exact\", \"contains\".\n e.g. for a value of \"call\", \"caller\" would be a contains match, but not an exact match.\n enabled -- Whether or not this blacklist entry is enabled.\n \n Returns:\n blacklist_entry_id -- The unique identifier of the blacklist entry created.\n \"\"\"\n create_blacklist_endpoint = Template(\"${rest_root}/blacklist/${public_key}\")\n url = create_blacklist_endpoint.substitute(rest_root=self._rest_root, public_key=self._public_key)\n \n data = {\"value\": value,\n \"reason\": reason,\n \"context\": context,\n \"match\": \"exact\" if exact_match else \"contains\",\n \"status\": 1 if enabled else 0\n }\n \n response = self.__post_request(url, data)\n return response[\"entry\"][\"id\"]", "def add_entry(self, src, dst, duration=3600, src_port1=None,\n src_port2=None, src_proto='predefined_tcp',\n dst_port1=None, dst_port2=None,\n dst_proto='predefined_tcp'):\n \"\"\" \n Create a blacklist entry.\n \n A blacklist can be added directly from the engine node, or from\n the system context. If submitting from the system context, it becomes\n a global blacklist. This will return the properly formatted json\n to submit.\n \n :param src: source address, with cidr, i.e. 10.10.10.10/32 or 'any'\n :param dst: destination address with cidr, i.e. 1.1.1.1/32 or 'any'\n :param int duration: length of time to blacklist\n \n Both the system and engine context blacklist allow kw to be passed\n to provide additional functionality such as adding source and destination\n ports or port ranges and specifying the protocol. The following parameters\n define the ``kw`` that can be passed.\n \n The following example shows creating an engine context blacklist\n using additional kw::\n \n engine.blacklist('1.1.1.1/32', '2.2.2.2/32', duration=3600,\n src_port1=1000, src_port2=1500, src_proto='predefined_udp',\n dst_port1=3, dst_port2=3000, dst_proto='predefined_udp')\n \n :param int src_port1: start source port to limit blacklist\n :param int src_port2: end source port to limit blacklist\n :param str src_proto: source protocol. Either 'predefined_tcp'\n or 'predefined_udp'. (default: 'predefined_tcp')\n :param int dst_port1: start dst port to limit blacklist\n :param int dst_port2: end dst port to limit blacklist\n :param str dst_proto: dst protocol. Either 'predefined_tcp'\n or 'predefined_udp'. (default: 'predefined_tcp')\n \n .. note:: if blocking a range of ports, use both src_port1 and\n src_port2, otherwise providing only src_port1 is adequate. The\n same applies to dst_port1 / dst_port2. In addition, if you provide\n src_portX but not dst_portX (or vice versa), the undefined port\n side definition will default to all ports.\n \"\"\"\n self.entries.setdefault('entries', []).append(prepare_blacklist(\n src, dst, duration, src_port1, src_port2, src_proto, dst_port1,\n dst_port2, dst_proto))", "public Blacklist generateBlacklist(Model model)\n\t{\n\t\tBlacklist blacklist = new Blacklist();\n\n\t\t// populate the blacklist\n\n\t\tfor (SmallMoleculeReference smr : model.getObjects(SmallMoleculeReference.class))\n\t\t{\n\t\t\tString name = smr.getDisplayName();\n\t\t\tif (name == null) continue;\n\t\t\tname = name.toLowerCase();\n\n\t\t\tif (knownNames.containsKey(name))\n\t\t\t{\n\t\t\t\tblacklist.addEntry(smr.getUri(), 1, knownNames.get(name));\n\n\t\t\t\tfor (SimplePhysicalEntity spe : smr.getEntityReferenceOf())\n\t\t\t\t{\n\t\t\t\t\tblacklist.addEntry(spe.getUri(), 1, knownNames.get(name));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn blacklist;\n\t}", "protected ValidationPlanResult validateEntry(Object entry) throws ValidationEngineException\n\t{\n\n\t\tValidationPlanResult planResult = new ValidationPlanResult();\n\n\t\tString id = entry.toString();\n\t\tif (id == null)\n\t\t{\n\t\t\tid = \"*\";\n\t\t}\n\n\t\tprintMessage(\tid + \", \",LOG_LEVEL_ALL);\n\t\tswitch(fileType)\n\t\t{\n\t\tcase EMBL:\n\t\tcase GENBANK:\n\t\tcase FASTA:\n\t\t\tplanResult = emblValidator.execute(entry); \n\t\t\tbreak;\n\t\tcase GFF3:\n\t\t\tplanResult = gff3Validator.execute(entry);\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tbreak;\n\t\t\t\t\t\t\n\t\t}\n\t\treturn planResult;\n\t}", "public java.util.Map<String, java.util.List<BlacklistEntry>> getBlacklistReport() {\n return blacklistReport;\n }", "public void blockManagement() {\n Vector dwgObjectsWithoutBlocks = new Vector();\n boolean addingToBlock = false;\n for( int i = 0; i < dwgObjects.size(); i++ ) {\n try {\n DwgObject entity = (DwgObject) dwgObjects.get(i);\n if (entity instanceof DwgArc && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgEllipse && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgCircle && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgPolyline2D && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgPolyline3D && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgLwPolyline && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgSolid && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgLine && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgPoint && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgMText && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgText && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgAttrib && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgAttdef && !addingToBlock) {\n dwgObjectsWithoutBlocks.add(entity);\n } else if (entity instanceof DwgBlock) {\n addingToBlock = true;\n } else if (entity instanceof DwgEndblk) {\n addingToBlock = false;\n } else if (entity instanceof DwgBlockHeader) {\n addingToBlock = true;\n } else if (entity instanceof DwgInsert && !addingToBlock) {\n /* double[] p = ((DwgInsert) entity).getInsertionPoint();\n Point2D point = new Point2D.Double(p[0], p[1]);\n double[] scale = ((DwgInsert) entity).getScale();\n double rot = ((DwgInsert) entity).getRotation();\n int blockHandle = ((DwgInsert) entity).getBlockHeaderHandle();\n manageInsert(point, scale, rot, blockHandle, i,\n dwgObjectsWithoutBlocks);*/\n } else {\n // System.out.println(\"Detectado dwgObject pendiente de implementar\");\n }\n } catch (StackOverflowError e) {\n e.printStackTrace();\n System.out.println(\"Overflowerror at object: \" + i);\n }\n }\n dwgObjects = dwgObjectsWithoutBlocks;\n }", "public static void logVersionInfo() {\n Log.i(TAG, \"vendor : \" + GLES20.glGetString(GLES20.GL_VENDOR));\n Log.i(TAG, \"renderer: \" + GLES20.glGetString(GLES20.GL_RENDERER));\n Log.i(TAG, \"version : \" + GLES20.glGetString(GLES20.GL_VERSION));\n\n if (false) {\n int[] values = new int[1];\n GLES30.glGetIntegerv(GLES30.GL_MAJOR_VERSION, values, 0);\n int majorVersion = values[0];\n GLES30.glGetIntegerv(GLES30.GL_MINOR_VERSION, values, 0);\n int minorVersion = values[0];\n if (GLES30.glGetError() == GLES30.GL_NO_ERROR) {\n Log.i(TAG, \"iversion: \" + majorVersion + \".\" + minorVersion);\n }\n }\n }", "def get_blacklist_entry(self, blacklist_entry_id):\n \"\"\"Get a single blacklist entry\n\n Keyword arguments:\n blacklist_entry_id -- The unique identifier of the blacklist entry to get.\n \"\"\"\n get_blacklist_entries_endpoint = Template(\"${rest_root}/blacklist/${public_key}/${blacklist_entry_id}\")\n url = get_blacklist_entries_endpoint.substitute(rest_root=self._rest_root, public_key=self._public_key, blacklist_entry_id=blacklist_entry_id)\n\n response = self.__get_request(url)\n return response[\"entry\"]", "def blacklist_bulk(self, blacklist):\n \"\"\"\n Add blacklist entries to the engine node in bulk. For blacklist to work,\n you must also create a rule with action \"Apply Blacklist\".\n First create your blacklist entries using :class:`smc.elements.other.Blacklist`\n then provide the blacklist to this method.\n \n :param blacklist Blacklist: pre-configured blacklist entries\n \n .. note:: This method requires SMC version >= 6.4\n \"\"\"\n self.make_request(\n EngineCommandFailed,\n method='create',\n resource='blacklist',\n json=blacklist.entries)" ]
[ 0.6555547118186951, 0.6552377343177795, 0.6530686616897583, 0.6528235077857971, 0.6493707895278931, 0.6475033164024353, 0.644248366355896, 0.6430034041404724, 0.638952374458313, 0.6351384520530701, 0.6331576108932495, 0.6322445869445801 ]
Generate the certificate blocklists. <certItem issuerName="MIGQMQswCQYD...IENB"> <serialNumber>UoRGnb96CUDTxIqVry6LBg==</serialNumber> </certItem> or <certItem subject='MCIxIDAeBgNVBAMMF0Fub3RoZXIgVGVzdCBFbmQtZW50aXR5' pubKeyHash='VCIlmPM9NkgFQtrs4Oa5TeFcDu6MWRTKSNdePEhOgD8='> </certItem>
def write_cert_items(xml_tree, records, api_ver=3, app_id=None, app_ver=None): """Generate the certificate blocklists. <certItem issuerName="MIGQMQswCQYD...IENB"> <serialNumber>UoRGnb96CUDTxIqVry6LBg==</serialNumber> </certItem> or <certItem subject='MCIxIDAeBgNVBAMMF0Fub3RoZXIgVGVzdCBFbmQtZW50aXR5' pubKeyHash='VCIlmPM9NkgFQtrs4Oa5TeFcDu6MWRTKSNdePEhOgD8='> </certItem> """ if not records or not should_include_certs(app_id, app_ver): return certItems = etree.SubElement(xml_tree, 'certItems') for item in records: if item.get('subject') and item.get('pubKeyHash'): cert = etree.SubElement(certItems, 'certItem', subject=item['subject'], pubKeyHash=item['pubKeyHash']) else: cert = etree.SubElement(certItems, 'certItem', issuerName=item['issuerName']) serialNumber = etree.SubElement(cert, 'serialNumber') serialNumber.text = item['serialNumber']
[ "def _possible_issuers(self, cert):\n \"\"\"\n Returns a generator that will list all possible issuers for the cert\n\n :param cert:\n An asn1crypto.x509.Certificate object to find the issuer of\n \"\"\"\n\n issuer_hashable = cert.issuer.hashable\n if issuer_hashable not in self._subject_map:\n return\n\n for issuer in self._subject_map[issuer_hashable]:\n # Info from the authority key identifier extension can be used to\n # eliminate possible options when multiple keys with the same\n # subject exist, such as during a transition, or with cross-signing.\n if cert.authority_key_identifier and issuer.key_identifier:\n if cert.authority_key_identifier != issuer.key_identifier:\n continue\n elif cert.authority_issuer_serial:\n if cert.authority_issuer_serial != issuer.issuer_serial:\n continue\n\n yield issuer", "public ArrayList<X509Certificate> getCertificateChain(PKCS7 block)\n throws IOException\n {\n X509Certificate userCert;\n userCert = block.getCertificate(certificateSerialNumber, issuerName);\n if (userCert == null)\n return null;\n\n ArrayList<X509Certificate> certList = new ArrayList<X509Certificate>();\n certList.add(userCert);\n\n X509Certificate[] pkcsCerts = block.getCertificates();\n if (pkcsCerts == null\n || userCert.getSubjectDN().equals(userCert.getIssuerDN())) {\n return certList;\n }\n\n Principal issuer = userCert.getIssuerDN();\n int start = 0;\n while (true) {\n boolean match = false;\n int i = start;\n while (i < pkcsCerts.length) {\n if (issuer.equals(pkcsCerts[i].getSubjectDN())) {\n // next cert in chain found\n certList.add(pkcsCerts[i]);\n // if selected cert is self-signed, we're done\n // constructing the chain\n if (pkcsCerts[i].getSubjectDN().equals(\n pkcsCerts[i].getIssuerDN())) {\n start = pkcsCerts.length;\n } else {\n issuer = pkcsCerts[i].getIssuerDN();\n X509Certificate tmpCert = pkcsCerts[start];\n pkcsCerts[start] = pkcsCerts[i];\n pkcsCerts[i] = tmpCert;\n start++;\n }\n match = true;\n break;\n } else {\n i++;\n }\n }\n if (!match)\n break;\n }\n\n return certList;\n }", "public CertificateListDescriptionInner listByIotHub(String resourceGroupName, String resourceName) {\n return listByIotHubWithServiceResponseAsync(resourceGroupName, resourceName).toBlocking().single().body();\n }", "def gen_subject\n subject_name = \"/C=#{EasyRSA::Config.country}\"\n subject_name += \"/ST=#{EasyRSA::Config.state}\" unless !EasyRSA::Config.state || EasyRSA::Config.state.empty?\n subject_name += \"/L=#{EasyRSA::Config.city}\"\n subject_name += \"/O=#{EasyRSA::Config.company}\"\n subject_name += \"/OU=#{EasyRSA::Config.orgunit}\"\n subject_name += \"/CN=#{@id}\"\n subject_name += \"/name=#{EasyRSA::Config.name}\" unless !EasyRSA::Config.name || EasyRSA::Config.name.empty?\n subject_name += \"/emailAddress=#{@email}\"\n\n @cert.subject = OpenSSL::X509::Name.parse(subject_name)\n end", "def gen_issuer\n name = \"/C=#{EasyRSA::Config.country}\"\n name += \"/ST=#{EasyRSA::Config.state}\" unless !EasyRSA::Config.state || EasyRSA::Config.state.empty?\n name += \"/L=#{EasyRSA::Config.city}\"\n name += \"/O=#{EasyRSA::Config.company}\"\n name += \"/OU=#{EasyRSA::Config.orgunit}\"\n name += \"/CN=#{EasyRSA::Config.server}\"\n name += \"/name=#{EasyRSA::Config.name}\" unless !EasyRSA::Config.name || EasyRSA::Config.name.empty?\n name += \"/name=#{EasyRSA::Config.orgunit}\" if !EasyRSA::Config.name || EasyRSA::Config.name.empty?\n name += \"/emailAddress=#{EasyRSA::Config.email}\"\n\n @ca_cert.issuer = OpenSSL::X509::Name.parse(name)\n end", "public PagedList<Certificate> list(final CertificateListOptions certificateListOptions) {\n ServiceResponseWithHeaders<Page<Certificate>, CertificateListHeaders> response = listSinglePageAsync(certificateListOptions).toBlocking().single();\n return new PagedList<Certificate>(response.body()) {\n @Override\n public Page<Certificate> nextPage(String nextPageLink) {\n CertificateListNextOptions certificateListNextOptions = null;\n if (certificateListOptions != null) {\n certificateListNextOptions = new CertificateListNextOptions();\n certificateListNextOptions.withClientRequestId(certificateListOptions.clientRequestId());\n certificateListNextOptions.withReturnClientRequestId(certificateListOptions.returnClientRequestId());\n certificateListNextOptions.withOcpDate(certificateListOptions.ocpDate());\n }\n return listNextSinglePageAsync(nextPageLink, certificateListNextOptions).toBlocking().single().body();\n }\n };\n }", "public TBSCertificate buildTBSCertificate(PrincipalIndentifier subjectName,\n PublicKeyParameters subject, X509CertificateParameters parameters) throws IOException\n {\n PrincipalIndentifier issuerName;\n CertifiedPublicKey issuer = null;\n\n if (this.signer instanceof CertifyingSigner) {\n issuer = ((CertifyingSigner) this.signer).getCertifier();\n issuerName = issuer.getSubject();\n } else {\n issuerName = subjectName;\n }\n\n BcX509TBSCertificateBuilder builder = getTBSCertificateBuilder();\n\n builder.setSerialNumber(new BigInteger(128, this.random)).setIssuer(issuerName);\n\n addValidityDates(builder);\n\n extendsTBSCertificate(builder, issuer, subjectName, subject, parameters);\n\n return builder.setSubject(subjectName).setSubjectPublicKeyInfo(subject).setSignature(this.signer).build();\n }", "private SSLContext buildSSLContext(String keystorePath, String keystoreType, String keystorePassword, String keystoreKeyPassword, String keystoreCertAlias,\n String truststorePath, String truststoreType, String truststorePassword,\n boolean validateCerts, String crlPath,\n String secureRandomAlgorithm,\n boolean validatePeerCerts, boolean enableCRLDP,\n boolean enableOCSP, String ocspResponderURL,\n String sslKeystoreProvider,\n String sslTruststoreProvider,\n String sslProvider) {\n try {\n URL keyStoreURL = loadResource(keystorePath);\n KeyStore keyStore = getKeyStore(keyStoreURL,\n keystoreType != null ? keystoreType : \"JKS\",\n keystorePassword, sslKeystoreProvider);\n\n // key managers\n String _keyManagerFactoryAlgorithm = Security.getProperty(\"ssl.KeyManagerFactory.algorithm\") == null\n ? KeyManagerFactory.getDefaultAlgorithm()\n : Security.getProperty(\"ssl.KeyManagerFactory.algorithm\");\n KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(_keyManagerFactoryAlgorithm);\n keyManagerFactory.init(keyStore, keystoreKeyPassword == null ? null : keystoreKeyPassword.toCharArray());\n KeyManager[] keyManagers = keyManagerFactory.getKeyManagers();\n\n // trust managers - possibly with OCSP\n TrustManager[] trustManagers = null;\n SecureRandom random = (secureRandomAlgorithm == null) ? null : SecureRandom.getInstance(secureRandomAlgorithm);\n if (truststorePath != null) {\n URL trustStoreURL = loadResource(truststorePath);\n KeyStore trustStore = getKeyStore(trustStoreURL,\n truststoreType != null ? truststoreType : \"JKS\",\n truststorePassword, sslTruststoreProvider);\n\n String _trustManagerFactoryAlgorithm = Security.getProperty(\"ssl.TrustManagerFactory.algorithm\") == null\n ? TrustManagerFactory.getDefaultAlgorithm()\n : Security.getProperty(\"ssl.TrustManagerFactory.algorithm\");\n\n Collection<? extends CRL> crls = crlPath == null ? null : loadCRL(crlPath);\n\n if (validateCerts && keyStore != null) {\n if (keystoreCertAlias == null) {\n List<String> aliases = Collections.list(keyStore.aliases());\n keystoreCertAlias = aliases.size() == 1 ? aliases.get(0) : null;\n }\n\n Certificate cert = keystoreCertAlias == null ? null : keyStore.getCertificate(keystoreCertAlias);\n if (cert == null) {\n throw new IllegalArgumentException(\"No certificate found in the keystore\" + (keystoreCertAlias == null ? \"\" : \" for alias \\\"\" + keystoreCertAlias + \"\\\"\"));\n }\n\n CertificateValidator validator = new CertificateValidator(trustStore, crls);\n validator.setEnableCRLDP(enableCRLDP);\n validator.setEnableOCSP(enableOCSP);\n validator.setOcspResponderURL(ocspResponderURL);\n validator.validate(keyStore, cert);\n }\n\n // Revocation checking is only supported for PKIX algorithm\n // see org.eclipse.jetty.util.ssl.SslContextFactory.getTrustManagers()\n if (validatePeerCerts && _trustManagerFactoryAlgorithm.equalsIgnoreCase(\"PKIX\")) {\n PKIXBuilderParameters pbParams = new PKIXBuilderParameters(trustStore, new X509CertSelector());\n\n // Make sure revocation checking is enabled\n pbParams.setRevocationEnabled(true);\n\n if (crls != null && !crls.isEmpty()) {\n pbParams.addCertStore(CertStore.getInstance(\"Collection\", new CollectionCertStoreParameters(crls)));\n }\n\n if (enableCRLDP) {\n // Enable Certificate Revocation List Distribution Points (CRLDP) support\n System.setProperty(\"com.sun.security.enableCRLDP\", \"true\");\n }\n\n if (enableOCSP) {\n // Enable On-Line Certificate Status Protocol (OCSP) support\n Security.setProperty(\"ocsp.enable\", \"true\");\n\n if (ocspResponderURL != null) {\n // Override location of OCSP Responder\n Security.setProperty(\"ocsp.responderURL\", ocspResponderURL);\n }\n }\n\n TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(_trustManagerFactoryAlgorithm);\n trustManagerFactory.init(new CertPathTrustManagerParameters(pbParams));\n\n trustManagers = trustManagerFactory.getTrustManagers();\n } else {\n TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(_trustManagerFactoryAlgorithm);\n trustManagerFactory.init(trustStore);\n\n trustManagers = trustManagerFactory.getTrustManagers();\n }\n }\n\n SSLContext context;\n if (null == sslProvider || sslProvider.isEmpty()) {\n context = SSLContext.getInstance(\"TLS\");\n } else {\n context = SSLContext.getInstance(\"TLS\", sslProvider);\n }\n\n context.init(keyManagers, trustManagers, random);\n\n return context;\n } catch (Exception e) {\n throw new IllegalArgumentException(\"Unable to build SSL context\", e);\n }\n }", "def chain(self, certlist):\n \"\"\"\n Construct the chain of certificates leading from 'self' to the\n self signed root using the certificates in 'certlist'. If the\n list does not provide all the required certs to go to the root\n the function returns a incomplete chain starting with the\n certificate. This fact can be tested by tchecking if the last\n certificate of the returned chain is self signed (if c is the\n result, c[-1].isSelfSigned())\n \"\"\"\n d = {}\n for c in certlist:\n # XXX we should check if we have duplicate\n d[c.subject] = c\n res = [self]\n cur = self\n while not cur.isSelfSigned():\n if cur.issuer in d:\n possible_issuer = d[cur.issuer]\n if cur.isIssuerCert(possible_issuer):\n res.append(possible_issuer)\n cur = possible_issuer\n else:\n break\n return res", "public ServiceFuture<List<CertificateItem>> getCertificatesAsync(final String vaultBaseUrl, final Integer maxresults, final Boolean includePending, final ListOperationCallback<CertificateItem> serviceCallback) {\n return AzureServiceFuture.fromPageResponse(\n getCertificatesSinglePageAsync(vaultBaseUrl, maxresults, includePending),\n new Func1<String, Observable<ServiceResponse<Page<CertificateItem>>>>() {\n @Override\n public Observable<ServiceResponse<Page<CertificateItem>>> call(String nextPageLink) {\n return getCertificatesNextSinglePageAsync(nextPageLink);\n }\n },\n serviceCallback);\n }", "CertificatePinner provideCertificatePinnerFor(Environment environment, CertificateBlacklist certificateBlacklist) {\n CertificatePinner.Builder certificatePinnerBuilder = new CertificatePinner.Builder();\n Map<String, List<String>> certificatesPins = provideCertificatesPinsFor(environment);\n certificatesPins = removeBlacklistedPins(certificatesPins, certificateBlacklist);\n addCertificatesPins(certificatesPins, certificatePinnerBuilder);\n return certificatePinnerBuilder.build();\n }", "public byte[] getTBSCertList() throws CRLException {\n if (tbsCertList == null)\n throw new CRLException(\"Uninitialized CRL\");\n byte[] dup = new byte[tbsCertList.length];\n System.arraycopy(tbsCertList, 0, dup, 0, dup.length);\n return dup;\n }" ]
[ 0.6695551872253418, 0.6681156158447266, 0.6666167378425598, 0.664018988609314, 0.6620565056800842, 0.6599345207214355, 0.6576005816459656, 0.6559449434280396, 0.6530895233154297, 0.6477333307266235, 0.6466966271400452, 0.6456843614578247 ]
Create or update a label
def label(self, name, color, update=True): """Create or update a label """ url = '%s/labels' % self data = dict(name=name, color=color) response = self.http.post( url, json=data, auth=self.auth, headers=self.headers ) if response.status_code == 201: return True elif response.status_code == 422 and update: url = '%s/%s' % (url, name) response = self.http.patch( url, json=data, auth=self.auth, headers=self.headers ) response.raise_for_status() return False
[ "def create_label(self, label, doc=None, callback=dummy_progress_cb):\n \"\"\"\n Create a new label\n\n Arguments:\n doc --- first document on which the label must be added (required\n for now)\n \"\"\"\n if doc:\n clone = doc.clone() # make sure it's serializable\n r = self.index.create_label(label, doc=clone)\n return r", "def create_label(self, label, doc=None):\n \"\"\"\n Create a new label\n\n Arguments:\n doc --- first document on which the label must be added (required\n for now)\n \"\"\"\n label = copy.copy(label)\n assert(label not in self.labels.values())\n self.labels[label.name] = label\n self.label_guesser.load(label.name)\n # TODO(Jflesch): Should train with previous documents\n if doc:\n doc.add_label(label)\n self.upd_doc(doc)\n self.commit()", "def make_label(id_, lineno):\n \"\"\" Creates a label entry. Returns None on error.\n \"\"\"\n entry = SYMBOL_TABLE.declare_label(id_, lineno)\n if entry:\n gl.DATA_LABELS[id_] = gl.DATA_PTR_CURRENT # This label points to the current DATA block index\n return entry", "def label(self, label):\n \"\"\"\n set the label\n \"\"\"\n if self.direction in ['i'] and label is not None:\n raise ValueError(\"label not accepted for indep dimension\")\n\n\n if label is None:\n self._label = label\n return\n\n if not isinstance(label, str):\n try:\n label = str(label)\n except:\n raise TypeError(\"label must be of type str\")\n\n self._label = label", "def add_label(self, name, color):\n \"\"\"Add a new label. It's id will automatically be calculated.\"\"\"\n color_upper = color.upper()\n if not self._color_re.match(color_upper):\n raise ValueError('Invalid color: {}'.format(color))\n\n labels_tag = self.root[0]\n last_id = int(labels_tag[-1].get('id'))\n new_id = str(last_id + 1)\n\n new_label = etree.Element('label', id=new_id, color=color_upper)\n new_label.text = name\n\n labels_tag.append(new_label)", "def make_label(self, path):\n \"\"\"\n this borrows too much from the internals of ofs\n maybe expose different parts of the api?\n \"\"\"\n from datetime import datetime\n from StringIO import StringIO\n path = path.lstrip(\"/\")\n bucket, label = path.split(\"/\", 1)\n\n bucket = self.ofs._require_bucket(bucket)\n key = self.ofs._get_key(bucket, label)\n if key is None:\n key = bucket.new_key(label)\n self.ofs._update_key_metadata(key, { '_creation_time': str(datetime.utcnow()) })\n key.set_contents_from_file(StringIO(''))\n key.close()", "def declare_label(self, label, lineno, value=None, local=False, namespace=None):\n \"\"\" Sets a label with the given value or with the current address (org)\n if no value is passed.\n\n Exits with error if label already set,\n otherwise return the label object\n \"\"\"\n ex_label, namespace = Memory.id_name(label, namespace)\n\n is_address = value is None\n if value is None:\n value = self.org\n\n if ex_label in self.local_labels[-1].keys():\n self.local_labels[-1][ex_label].define(value, lineno)\n self.local_labels[-1][ex_label].is_address = is_address\n else:\n self.local_labels[-1][ex_label] = Label(ex_label, lineno, value, local, namespace, is_address)\n\n self.set_memory_slot()\n\n return self.local_labels[-1][ex_label]", "def label_present(name, value, node=None, apiserver_url=None):\n '''\n .. versionadded:: 2016.3.0\n\n Set label to the current node\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' k8s.label_present hw/disktype ssd\n\n salt '*' k8s.label_present hw/disktype ssd kube-node.cluster.local http://kube-master.cluster.local\n\n '''\n ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}\n\n # Get salt minion ID\n node = _guess_node_id(node)\n # Try to get kubernetes master\n apiserver_url = _guess_apiserver(apiserver_url)\n if apiserver_url is None:\n return False\n\n # Get all labels\n labels = _get_labels(node, apiserver_url)\n\n if name not in labels:\n # This is a new label\n ret['changes'] = {name: value}\n labels[name] = str(value)\n res = _set_labels(node, apiserver_url, labels)\n if res.get('status') == 409:\n # there is an update during operation, need to retry\n log.debug(\"Got 409, will try later\")\n ret['changes'] = {}\n ret['comment'] = \"Could not create label {0}, please retry\".format(name)\n else:\n ret['comment'] = \"Label {0} created\".format(name)\n elif labels.get(name) != str(value):\n # This is a old label and we are going to edit it\n ret['changes'] = {name: str(value)}\n labels[name] = str(value)\n res = _set_labels(node, apiserver_url, labels)\n if res.get('status') == 409:\n # there is an update during operation, need to retry\n log.debug(\"Got 409, will try later\")\n ret['changes'] = {}\n ret['comment'] = \"Could not update label {0}, please retry\".format(name)\n else:\n ret['comment'] = \"Label {0} updated\".format(name)\n else:\n # This is a old label and it has already the wanted value\n ret['comment'] = \"Label {0} already set\".format(name)\n\n return ret", "def update_label(self, old_label, new_label):\n \"\"\"\n Update a label\n\n Replace 'old_label' by 'new_label'\n \"\"\"\n logger.info(\"%s : Updating label ([%s] -> [%s])\"\n % (str(self), old_label.name, new_label.name))\n labels = self.labels\n try:\n labels.remove(old_label)\n except ValueError:\n # this document doesn't have this label\n return\n\n logger.info(\"%s : Updating label ([%s] -> [%s])\"\n % (str(self), old_label.name, new_label.name))\n labels.append(new_label)\n with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), 'w') \\\n as file_desc:\n for label in labels:\n file_desc.write(\"%s,%s\\n\" % (label.name,\n label.get_color_str()))", "private Label getOrCreateLabel(final String name) {\n\n\t\tLabel label = labelCache.get(name);\n\t\tif (label == null) {\n\n\t\t\tlabel = new LabelImpl(name);\n\t\t\tlabelCache.put(name, label);\n\t\t}\n\n\t\treturn label;\n\t}", "def createLabel(self, name):\n \"\"\"Create a new label.\n\n Args:\n name (str): Label name.\n\n Returns:\n gkeepapi.node.Label: The new label.\n\n Raises:\n LabelException: If the label exists.\n \"\"\"\n if self.findLabel(name):\n raise exception.LabelException('Label exists')\n node = _node.Label()\n node.name = name\n self._labels[node.id] = node # pylint: disable=protected-access\n return node", "def add(self, label):\n \"\"\"Add a label.\n\n Args:\n label (gkeepapi.node.Label): The Label object.\n \"\"\"\n self._labels[label.id] = label\n self._dirty = True" ]
[ 0.7641974091529846, 0.7632727026939392, 0.7598841786384583, 0.7563969492912292, 0.7557106018066406, 0.7523928880691528, 0.7517892122268677, 0.7472151517868042, 0.7379534244537354, 0.7373034954071045, 0.7370362877845764, 0.735865592956543 ]
A dictionary with a one-to-one translation of keywords is used to provide the transformation.
def translate(source, dictionary): '''A dictionary with a one-to-one translation of keywords is used to provide the transformation. ''' toks = tokenize.generate_tokens(StringIO(source).readline) result = [] for toktype, tokvalue, _, _, _ in toks: if toktype == tokenize.NAME and tokvalue in dictionary: result.append((toktype, dictionary[tokvalue])) else: result.append((toktype, tokvalue)) return tokenize.untokenize(result)
[ "def transform(matches, framework, namespace, static_endpoint):\n \"\"\"\n The actual transformation occurs here.\n\n flask example: images/staticfy.jpg', ==>\n \"{{ url_for('static', filename='images/staticfy.jpg') }}\"\n \"\"\"\n transformed = []\n namespace = namespace + '/' if namespace else ''\n\n for attribute, elements in matches:\n for element in elements:\n asset_location = get_asset_location(element, attribute)\n\n # string substitution\n sub_dict = {\n 'static_endpoint': static_endpoint, 'namespace': namespace,\n 'asset_location': asset_location\n }\n transformed_string = frameworks[framework] % sub_dict\n\n res = (attribute, element[attribute], transformed_string)\n transformed.append(res)\n\n return transformed", "def tt(self, key, locale=None, locale2=None, default=I18n.DFT):\n \"\"\"\n |tt| means text transform.\n \n key: tt key.\n locale: main locale key into |self.tt_dd|. Default to |self.locale|\n locale2: fallback locale key into |self.tt_dd|. Default to |self.locale2|\n default: a default value in case tt value is not found. Default to raise KeyError.\n \"\"\"\n \n #/\n locale = locale or self.locale\n \n locale2 = locale2 or self.locale2\n \n #/ get tt dict of the locale\n tt_d = self.tt_dd.get(locale, None)\n \n if tt_d is not None:\n #/\n val = tt_d.get(key, I18n.DFT)\n \n #/ if tt value is found\n if val is not I18n.DFT:\n return val\n \n #/ tt value is not found\n \n #/ if has locale2\n ## Y\n if locale2 and locale2 != locale:\n #/ fall back to locale2\n return self.tt(key, locale=locale2, default=default)\n ## N\n else:\n #/ if default is specified\n ## N\n if default is I18n.DFT:\n raise KeyError(key)\n ## Y\n else:\n return default", "def translate_keyword_tokens( token )\n keywords = [\"__LINE__\", \"__ENCODING__\", \"__FILE__\", \"BEGIN\", \n \"END\", \"alias\", \"and\", \"begin\", \"break\", \"case\",\n \"class\", \"def\", \"defined?\", \"do\", \"else\", \"elsif\",\n \"end\", \"ensure\", \"false\", \"for\", \"if\", \"in\",\n \"module\", \"next\", \"nil\", \"not\", \"or\", \"redo\",\n \"rescue\", \"retry\", \"return\", \"self\", \"super\",\n \"then\", \"true\", \"undef\", \"unless\", \"until\", \n \"when\", \"while\", \"yield\"]\n if keywords.include?( token.content )\n token.type = token.content.downcase.to_sym\n # Change the state if we match a keyword\n @expr_state = :beg\n end\n \n # A couple of exceptions \n if token.content == \"BEGIN\"\n token.type = :begin_global \n @expr_state = :beg\n elsif token.content == \"END\"\n token.type = :end_global \n @expr_state = :beg\n end\n \n token\n end", "def _kw(keywords):\n \"\"\"Turn list of keywords into dictionary.\"\"\"\n r = {}\n for k, v in keywords:\n r[k] = v\n return r", "def compile_keywords(keywords):\n \"\"\"\n Translate `keywords` to full keyword records as they are used in Aleph.\n\n Returns tuple with three lists, each of which is later used in different\n part of the MRC/MARC record.\n\n Args:\n keywords (list): List of keyword strings.\n\n Returns:\n tuple: (mdt_list, cz_keyword_list, en_keyword_list)\n \"\"\"\n mdt = []\n cz_keywords = []\n en_keywords = []\n for keyword in keywords:\n keyword = keyword_to_info(keyword.encode(\"utf-8\"))\n\n if not keyword:\n continue\n\n cz_keywords.append({\n \"uid\": keyword[\"uid\"],\n \"zahlavi\": keyword[\"zahlavi\"],\n \"zdroj\": \"czenas\",\n })\n\n if keyword.get(\"mdt\"):\n mdt.append({\n \"mdt\": keyword[\"mdt\"],\n \"mrf\": keyword[\"mrf\"],\n })\n\n angl_ekvivalent = keyword.get(\"angl_ekvivalent\")\n if angl_ekvivalent:\n en_keywords.append({\n \"zahlavi\": angl_ekvivalent,\n \"zdroj\": keyword.get(\"zdroj_angl_ekvivalentu\") or \"eczenas\",\n })\n\n return mdt, cz_keywords, en_keywords", "def transform_item(self, item):\n \"\"\"\n Transforms JSON object\n \"\"\"\n obj = {\n 'id': item['primaryId'],\n 'label': item['symbol'],\n 'full_name': item['name'],\n 'type': item['soTermId'],\n 'taxon': {'id': item['taxonId']},\n }\n if 'synonyms' in item:\n obj['synonyms'] = item['synonyms']\n if 'crossReferenceIds' in item:\n obj['xrefs'] = [self._normalize_id(x) for x in item['crossReferenceIds']]\n\n # TODO: synonyms\n # TODO: genomeLocations\n # TODO: geneLiteratureUrl\n return obj", "def _transform_incoming(self, son, collection, skip=0):\n \"\"\"Recursively replace all keys that need transforming.\"\"\"\n skip = 0 if skip < 0 else skip\n if isinstance(son, dict):\n for (key, value) in son.items():\n if key.startswith('$'):\n if isinstance(value, dict):\n skip = 2\n else:\n pass # allow mongo to complain\n if self.replace in key:\n k = key if skip else self.transform_key(key)\n son[k] = self._transform_incoming(\n son.pop(key), collection, skip=skip - 1)\n elif isinstance(value, dict): # recurse into sub-docs\n son[key] = self._transform_incoming(value, collection,\n skip=skip - 1)\n elif isinstance(value, list):\n son[key] = [\n self._transform_incoming(k, collection, skip=skip - 1)\n for k in value\n ]\n return son\n elif isinstance(son, list):\n return [self._transform_incoming(item, collection, skip=skip - 1)\n for item in son]\n else:\n return son", "function normalize(word) {\n var result = '';\n for (var i = 0; i < word.length; i++) {\n var kana = word[i];\n var target = transform[kana];\n if (target === false) {\n continue;\n }\n if (target) {\n kana = target;\n }\n result += kana;\n }\n return result;\n }", "def resolve_dict_keywords(keywords):\n \"\"\"Replace dictionary content with html.\n\n :param keywords: The keywords.\n :type keywords: dict\n\n :return: New keywords with updated content.\n :rtype: dict\n \"\"\"\n\n for keyword in ['value_map', 'inasafe_fields', 'inasafe_default_values']:\n value = keywords.get(keyword)\n if value:\n value = value.get('content')\n value = KeywordIO._dict_to_row(value).to_html()\n keywords[keyword]['content'] = value\n\n value_maps = keywords.get('value_maps')\n thresholds = keywords.get('thresholds')\n if value_maps:\n value_maps = value_maps.get('content')\n value_maps = KeywordIO._value_maps_row(value_maps).to_html()\n keywords['value_maps']['content'] = value_maps\n if thresholds:\n thresholds = thresholds.get('content')\n thresholds = KeywordIO._threshold_to_row(thresholds).to_html()\n keywords['thresholds']['content'] = thresholds\n\n return keywords", "def kwargs_from_keyword(from_kwargs,to_kwargs,keyword,clean_origin=True):\n\t\"\"\"\n\tLooks for keys of the format keyword_value. \n\tAnd return a dictionary with {keyword:value} format\n\n\tParameters:\n\t-----------\n\t\tfrom_kwargs : dict\n\t\t\tOriginal dictionary\n\t\tto_kwargs : dict\n\t\t\tDictionary where the items will be appended\n\t\tkeyword : string\n\t\t\tKeyword to look for in the orginal dictionary\n\t\tclean_origin : bool\n\t\t\tIf True then the k,v pairs from the original \n\t\t\tdictionary are deleted\n\t\"\"\"\n\tfor k in list(from_kwargs.keys()):\n\t\tif '{0}_'.format(keyword) in k:\n\t\t\tto_kwargs[k.replace('{0}_'.format(keyword),'')]=from_kwargs[k]\n\t\t\tif clean_origin:\n\t\t\t\tdel from_kwargs[k]\n\treturn to_kwargs", "def _translate(self, options):\n \"\"\"\n Perform translation of feed options passed in as keyword\n arguments to CouchDB/Cloudant equivalent.\n \"\"\"\n translation = dict()\n for key, val in iteritems_(options):\n self._validate(key, val, feed_arg_types(self._source))\n try:\n if isinstance(val, STRTYPE):\n translation[key] = val\n elif not isinstance(val, NONETYPE):\n arg_converter = TYPE_CONVERTERS.get(type(val), json.dumps)\n translation[key] = arg_converter(val)\n except Exception as ex:\n raise CloudantArgumentError(115, key, ex)\n return translation", "def transform_outgoing(self, son, collection):\n \"\"\"Recursively restore all transformed keys.\"\"\"\n if isinstance(son, dict):\n for (key, value) in son.items():\n if self.replacement in key:\n k = self.revert_key(key)\n son[k] = self.transform_outgoing(son.pop(key), collection)\n elif isinstance(value, dict): # recurse into sub-docs\n son[key] = self.transform_outgoing(value, collection)\n elif isinstance(value, list):\n son[key] = [self.transform_outgoing(item, collection)\n for item in value]\n return son\n elif isinstance(son, list):\n return [self.transform_outgoing(item, collection)\n for item in son]\n else:\n return son" ]
[ 0.717033326625824, 0.7130061984062195, 0.7078253030776978, 0.707695484161377, 0.7022884488105774, 0.6966860294342041, 0.6933697462081909, 0.6905336380004883, 0.6890057921409607, 0.6862103939056396, 0.6845335364341736, 0.6836443543434143 ]
Function enhance Enhance the object with new item or enhanced items
def enhance(self): """ Function enhance Enhance the object with new item or enhanced items """ self.update({'images': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemImages)})
[ "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'os_default_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOsDefaultTemplate)})\n self.update({'config_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemConfigTemplate)})\n self.update({'ptables':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPTable)})\n self.update({'media':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemMedia)})\n self.update({'architectures':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemArchitecture)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'interfaces':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemInterface)})\n self.update({'subnets':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemSubnet)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'os_default_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOsDefaultTemplate)})\n self.update({'operatingsystems':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOperatingSystem)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'puppetclasses':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPuppetClasses)})\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'interfaces':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemInterface)})\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemSmartClassParameter)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'puppetclasses':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPuppetClasses)})\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ItemSmartClassParameter)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n if self.objName in ['hosts', 'hostgroups',\n 'puppet_classes']:\n from foreman.itemSmartClassParameter\\\n import ItemSmartClassParameter\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ItemSmartClassParameter)})", "public T enhance(T t) {\n if (!needsEnhancement(t)) {\n return t;\n }\n\n try {\n return getEnhancedClass().getConstructor(baseClass).newInstance(t);\n } catch (Exception e) {\n throw new RuntimeException(String.format(\"Could not enhance object %s (%s)\", t, t.getClass()), e);\n }\n }", "private static void doEnhancement(CtClass cc, Version modelVersion) throws CannotCompileException,\n NotFoundException, ClassNotFoundException {\n CtClass inter = cp.get(OpenEngSBModel.class.getName());\n cc.addInterface(inter);\n addFields(cc);\n addGetOpenEngSBModelTail(cc);\n addSetOpenEngSBModelTail(cc);\n addRetrieveModelName(cc);\n addRetrieveModelVersion(cc, modelVersion);\n addOpenEngSBModelEntryMethod(cc);\n addRemoveOpenEngSBModelEntryMethod(cc);\n addRetrieveInternalModelId(cc);\n addRetrieveInternalModelTimestamp(cc);\n addRetrieveInternalModelVersion(cc);\n addToOpenEngSBModelValues(cc);\n addToOpenEngSBModelEntries(cc);\n cc.setModifiers(cc.getModifiers() & ~Modifier.ABSTRACT);\n }", "def enhance(self, inverse=False, gamma=1.0, stretch=\"no\",\n stretch_parameters=None, **kwargs):\n \"\"\"Image enhancement function. It applies **in this order** inversion,\n gamma correction, and stretching to the current image, with parameters\n *inverse* (see :meth:`Image.invert`), *gamma* (see\n :meth:`Image.gamma`), and *stretch* (see :meth:`Image.stretch`).\n \"\"\"\n self.invert(inverse)\n if stretch_parameters is None:\n stretch_parameters = {}\n\n stretch_parameters.update(kwargs)\n self.stretch(stretch, **stretch_parameters)\n self.gamma(gamma)", "def load(self, data):\n \"\"\" Function load\n Store the object data\n \"\"\"\n self.clear()\n self.update(data)\n self.enhance()", "def enhance2dataset(dset):\n \"\"\"Apply enhancements to dataset *dset* and return the resulting data\n array of the image.\"\"\"\n attrs = dset.attrs\n img = get_enhanced_image(dset)\n # Clip image data to interval [0.0, 1.0]\n data = img.data.clip(0.0, 1.0)\n data.attrs = attrs\n\n return data", "function enhancedEcommerceProductAction(track, action, data) {\n enhancedEcommerceTrackProduct(track);\n window.ga('ec:setAction', action, data || {});\n}" ]
[ 0.8752948045730591, 0.8729672431945801, 0.8695272207260132, 0.8679497838020325, 0.8630505800247192, 0.8407313823699951, 0.7447202205657959, 0.7079142928123474, 0.707258939743042, 0.6839107275009155, 0.6810131072998047, 0.6799886226654053 ]
Async component of _run
async def _run_payloads(self): """Async component of _run""" delay = 0.0 try: while self.running.is_set(): await self._start_payloads() await self._reap_payloads() await asyncio.sleep(delay) delay = min(delay + 0.1, 1.0) except Exception: await self._cancel_payloads() raise
[ "function doRun() {\n if (Module['calledRun']) return; // run may have just been called while the async setStatus time below was happening\n Module['calledRun'] = true;\n\n if (ABORT) return;\n\n ensureInitRuntime();\n\n preMain();\n\n\n if (Module['onRuntimeInitialized']) Module['onRuntimeInitialized']();\n\n if (Module['_main'] && shouldRunNow) Module['callMain'](args);\n\n postRun();\n }", "def run_module_async(kwargs, job_id, timeout_secs, started_sender, econtext):\n \"\"\"\n Execute a module with its run status and result written to a file,\n terminating on the process on completion. This function must run in a child\n forked using :func:`create_fork_child`.\n\n @param mitogen.core.Sender started_sender:\n A sender that will receive :data:`True` once the job has reached a\n point where its initial job file has been written. This is required to\n avoid a race where an overly eager controller can check for a task\n before it has reached that point in execution, which is possible at\n least on Python 2.4, where forking is not available for async tasks.\n \"\"\"\n arunner = AsyncRunner(\n job_id,\n timeout_secs,\n started_sender,\n econtext,\n kwargs\n )\n arunner.run()", "function runInAsync(options, res) {\n setTimeout(() => {\n if (res.errMsg.indexOf(':ok') >= 0 && typeof options.success === 'function') options.success(res)\n if (res.errMsg.indexOf(':fail') >= 0 && typeof options.fail === 'function') options.fail(res)\n if (typeof options.complete === 'function') options.complete(res)\n }, 0)\n}", "def run(self, conn, tmp, module_name, module_args, inject):\n ''' transfer the given module name, plus the async module, then run it '''\n\n # shell and command module are the same\n if module_name == 'shell':\n module_name = 'command'\n module_args += \" #USE_SHELL\"\n\n (module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject)\n self.runner._low_level_exec_command(conn, \"chmod a+rx %s\" % module_path, tmp)\n\n return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,\n async_module=module_path,\n async_jid=self.runner.generated_jid,\n async_limit=self.runner.background,\n inject=inject\n )", "function run(options, ast, cb) {\n var report = new Report(options, ast);\n report.run(cb);\n}", "function AsyncRunner(times, fn) {\n Runner.apply(this, arguments);\n\n this.pause = 0;\n this.end = noop;\n this.times = times;\n}", "async function run(task, action, ...args) {\n const command = process.argv[2]\n const taskName =\n command && !command.startsWith('-') ? `${task} ${command}` : task\n const start = new Date()\n process.stdout.write(`Starting '${taskName}'...\\n`)\n try {\n await action(...args)\n process.stdout.write(\n `Finished '${taskName}' after ${new Date().getTime() -\n start.getTime()}ms\\n`\n )\n } catch (error) {\n process.stderr.write(`${error.stack}\\n`)\n }\n}", "def runner(coro):\n \"\"\"Function execution decorator.\"\"\"\n\n @wraps(coro)\n def inner(self, *args, **kwargs):\n if self.mode == 'async':\n return coro(self, *args, **kwargs)\n return self._loop.run_until_complete(coro(self, *args, **kwargs))\n\n return inner", "def run_async(**kwargs):\n '''\n Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.\n\n This uses the same parameters as :py:func:`ansible_runner.interface.run`\n\n :returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object\n '''\n r = init_runner(**kwargs)\n runner_thread = threading.Thread(target=r.run)\n runner_thread.start()\n return runner_thread, r", "def schedule_run_async(resource_group_name, registry_name, run_request, custom_headers:nil)\n # Send request\n promise = begin_schedule_run_async(resource_group_name, registry_name, run_request, custom_headers:custom_headers)\n\n promise = promise.then do |response|\n # Defining deserialization method.\n deserialize_method = lambda do |parsed_response|\n result_mapper = Azure::ContainerRegistry::Mgmt::V2018_09_01::Models::Run.mapper()\n parsed_response = @client.deserialize(result_mapper, parsed_response)\n end\n\n # Waiting for response.\n @client.get_long_running_operation_result(response, deserialize_method)\n end\n\n promise\n end", "def run_async(self, time_limit):\n ''' Run this module asynchronously and return a poller. '''\n\n self.background = time_limit\n results = self.run()\n return results, poller.AsyncPoller(results, self)", "function() {\n self.tasksCount = 0;\n var list = [], i = 0;\n for(var t in self) {\n if( typeof(self[t]) == 'function') {\n list[i] = {\n func: self[t],\n cb: hasCallback(self[t])\n }\n ++i\n }\n }\n run(list)\n }" ]
[ 0.7492655515670776, 0.7317105531692505, 0.7314708232879639, 0.7230328917503357, 0.7164127826690674, 0.7092439532279968, 0.708595871925354, 0.7068096399307251, 0.7048815488815308, 0.7021708488464355, 0.701999306678772, 0.7010310292243958 ]
Start all queued payloads
async def _start_payloads(self): """Start all queued payloads""" with self._lock: for coroutine in self._payloads: task = self.event_loop.create_task(coroutine()) self._tasks.add(task) self._payloads.clear() await asyncio.sleep(0)
[ "async def _start_payloads(self, nursery):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n for coroutine in self._payloads:\n nursery.start_soon(coroutine)\n self._payloads.clear()\n await trio.sleep(0)", "def _start_payloads(self):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n payloads = self._payloads.copy()\n self._payloads.clear()\n for subroutine in payloads:\n thread = CapturingThread(target=subroutine)\n thread.start()\n self._threads.add(thread)\n self._logger.debug('booted thread %s', thread)\n time.sleep(0)", "async def _await_all(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n # we run a top-level nursery that automatically reaps/cancels for us\n async with trio.open_nursery() as nursery:\n while self.running.is_set():\n await self._start_payloads(nursery=nursery)\n await trio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n # cancel the scope to cancel all payloads\n nursery.cancel_scope.cancel()", "function start(payload) {\n for (var id in this.callbacks) {\n this.isPending[id] = false;\n this.isHandled[id] = false;\n }\n this.isDispatching = true;\n this.pendingPayload = payload;\n}", "async def _run_payloads(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n try:\n while self.running.is_set():\n await self._start_payloads()\n await self._reap_payloads()\n await asyncio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n except Exception:\n await self._cancel_payloads()\n raise", "def start(self, payload):\n \"\"\"Start the daemon and all processes or only specific processes.\"\"\"\n # Start specific processes, if `keys` is given in the payload\n if payload.get('keys'):\n succeeded = []\n failed = []\n for key in payload.get('keys'):\n success = self.process_handler.start_process(key)\n if success:\n succeeded.append(str(key))\n else:\n failed.append(str(key))\n\n message = ''\n if len(succeeded) > 0:\n message += 'Started processes: {}.'.format(', '.join(succeeded))\n status = 'success'\n if len(failed) > 0:\n message += '\\nNo paused, queued or stashed process for keys: {}'.format(', '.join(failed))\n status = 'error'\n\n answer = {'message': message.strip(), 'status': status}\n\n # Start a all processes and the daemon\n else:\n self.process_handler.start_all()\n if self.paused:\n self.paused = False\n answer = {'message': 'Daemon and all processes started.',\n 'status': 'success'}\n else:\n answer = {'message': 'Daemon already running, starting all processes.',\n 'status': 'success'}\n return answer", "def register_payload(self, *payloads, flavour: ModuleType):\n \"\"\"Queue one or more payload for execution after its runner is started\"\"\"\n for payload in payloads:\n self._logger.debug('registering payload %s (%s)', NameRepr(payload), NameRepr(flavour))\n self.runners[flavour].register_payload(payload)", "def payload_register(ptype, klass, pid):\n \"\"\" is used while a hook is running to let Juju know that a\n payload has been started.\"\"\"\n cmd = ['payload-register']\n for x in [ptype, klass, pid]:\n cmd.append(x)\n subprocess.check_call(cmd)", "def start(queue, profile=None, tag='salt/engine/sqs', owner_acct_id=None):\n '''\n Listen to sqs and fire message on event bus\n '''\n if __opts__.get('__role') == 'master':\n fire_master = salt.utils.event.get_master_event(\n __opts__,\n __opts__['sock_dir'],\n listen=False).fire_event\n else:\n fire_master = __salt__['event.send']\n\n message_format = __opts__.get('sqs.message_format', None)\n\n sqs = _get_sqs_conn(profile)\n q = None\n while True:\n if not q:\n q = sqs.get_queue(queue, owner_acct_id=owner_acct_id)\n q.set_message_class(boto.sqs.message.RawMessage)\n\n _process_queue(q, queue, fire_master, tag=tag, owner_acct_id=owner_acct_id, message_format=message_format)", "def run_payload(self, payload, *, flavour: ModuleType):\n \"\"\"Execute one payload after its runner is started and return its output\"\"\"\n return self.runners[flavour].run_payload(payload)", "def run(self):\n \"\"\"\n Execute all current and future payloads\n\n Blocks and executes payloads until :py:meth:`stop` is called.\n It is an error for any orphaned payload to return or raise.\n \"\"\"\n self._logger.info('runner started: %s', self)\n try:\n with self._lock:\n assert not self.running.is_set() and self._stopped.is_set(), 'cannot re-run: %s' % self\n self.running.set()\n self._stopped.clear()\n self._run()\n except Exception:\n self._logger.exception('runner aborted: %s', self)\n raise\n else:\n self._logger.info('runner stopped: %s', self)\n finally:\n with self._lock:\n self.running.clear()\n self._stopped.set()", "def _reap_payloads(self):\n \"\"\"Clean up all finished payloads\"\"\"\n for thread in self._threads.copy():\n # CapturingThread.join will throw\n if thread.join(timeout=0):\n self._threads.remove(thread)\n self._logger.debug('reaped thread %s', thread)" ]
[ 0.88545161485672, 0.8552003502845764, 0.7891626954078674, 0.7770432233810425, 0.7561456561088562, 0.7468627095222473, 0.7409204840660095, 0.7305784225463867, 0.7299909591674805, 0.7234981060028076, 0.7174237966537476, 0.70259028673172 ]
Clean up all finished payloads
async def _reap_payloads(self): """Clean up all finished payloads""" for task in self._tasks.copy(): if task.done(): self._tasks.remove(task) if task.exception() is not None: raise task.exception() await asyncio.sleep(0)
[ "def _reap_payloads(self):\n \"\"\"Clean up all finished payloads\"\"\"\n for thread in self._threads.copy():\n # CapturingThread.join will throw\n if thread.join(timeout=0):\n self._threads.remove(thread)\n self._logger.debug('reaped thread %s', thread)", "async def _cancel_payloads(self):\n \"\"\"Cancel all remaining payloads\"\"\"\n for task in self._tasks:\n task.cancel()\n await asyncio.sleep(0)\n for task in self._tasks:\n while not task.done():\n await asyncio.sleep(0.1)\n task.cancel()", "def clear(self, payload):\n \"\"\"Clear queue from any `done` or `failed` entries.\n\n The log will be rotated once. Otherwise we would loose all logs from\n thoes finished processes.\n \"\"\"\n self.logger.rotate(self.queue)\n self.queue.clear()\n self.logger.write(self.queue)\n\n answer = {'message': 'Finished entries have been removed.', 'status': 'success'}\n return answer", "def reset_everything(self, payload):\n \"\"\"Kill all processes, delete the queue and clean everything up.\"\"\"\n kill_signal = signals['9']\n self.process_handler.kill_all(kill_signal, True)\n self.process_handler.wait_for_finish()\n self.reset = True\n\n answer = {'message': 'Resetting current queue', 'status': 'success'}\n return answer", "def cleanup_payload(self, payload):\n \"\"\"\n Basically, turns payload that looks like ' \\\\n ' to ''. In the \n calling function, if this function returns '' no object is added \n for that payload.\n \"\"\"\n p = payload.replace('\\n', '')\n p = p.rstrip()\n p = p.lstrip()\n return p", "async def _start_payloads(self, nursery):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n for coroutine in self._payloads:\n nursery.start_soon(coroutine)\n self._payloads.clear()\n await trio.sleep(0)", "async def _await_all(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n # we run a top-level nursery that automatically reaps/cancels for us\n async with trio.open_nursery() as nursery:\n while self.running.is_set():\n await self._start_payloads(nursery=nursery)\n await trio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n # cancel the scope to cancel all payloads\n nursery.cancel_scope.cancel()", "protected void cleanupFromFinish() {\n _response = null;\n _bufferSize = 0;\n _encoding = null;\n // LIBERTY _responseBuffer = null;\n // _outWriterEncoding = null;\n _gotOutputStream = false;\n _gotWriter = false;\n this._pwriter = null;\n }", "private function cleanUp()\n {\n if ($this->quiet) {\n $errors = explode(PHP_EOL, ob_get_clean());\n $shown = [];\n\n foreach ($errors as $error) {\n if ($error && !in_array($error, $shown)) {\n out($error, 'error');\n $shown[] = $error;\n }\n }\n }\n\n if (file_exists($this->tmpPharPath)) {\n @unlink($this->tmpPharPath);\n }\n if (file_exists($this->tmpPubkeyPath)) {\n @unlink($this->tmpPharPath);\n }\n }", "def _cleanup(self) -> None:\n \"\"\"Cleanup unused transports.\"\"\"\n if self._cleanup_handle:\n self._cleanup_handle.cancel()\n\n now = self._loop.time()\n timeout = self._keepalive_timeout\n\n if self._conns:\n connections = {}\n deadline = now - timeout\n for key, conns in self._conns.items():\n alive = []\n for proto, use_time in conns:\n if proto.is_connected():\n if use_time - deadline < 0:\n transport = proto.transport\n proto.close()\n if (key.is_ssl and\n not self._cleanup_closed_disabled):\n self._cleanup_closed_transports.append(\n transport)\n else:\n alive.append((proto, use_time))\n\n if alive:\n connections[key] = alive\n\n self._conns = connections\n\n if self._conns:\n self._cleanup_handle = helpers.weakref_handle(\n self, '_cleanup', timeout, self._loop)", "public void done() {\n\t\tfor(ByteBuffer bb : bbs) {\n\t\t\tringPut(bb);\n\t\t}\n\t\tbbs.clear();\n\t\tcurr = null;\n\t}", "function (aRequests) {\n\t\t\taRequests.forEach(function (oRequest) {\n\t\t\t\tif (Array.isArray(oRequest)) {\n\t\t\t\t\t_Requestor.cleanBatch(oRequest);\n\t\t\t\t} else {\n\t\t\t\t\toRequest.body = _Requestor.cleanPayload(oRequest.body);\n\t\t\t\t}\n\t\t\t});\n\t\t\treturn aRequests;\n\t\t}" ]
[ 0.8029473423957825, 0.7866622805595398, 0.7357620596885681, 0.7221269011497498, 0.7220130562782288, 0.7094706892967224, 0.7041050791740417, 0.7006880640983582, 0.6978424191474915, 0.6969825029373169, 0.6950753331184387, 0.6941037774085999 ]
Cancel all remaining payloads
async def _cancel_payloads(self): """Cancel all remaining payloads""" for task in self._tasks: task.cancel() await asyncio.sleep(0) for task in self._tasks: while not task.done(): await asyncio.sleep(0.1) task.cancel()
[ "private void cancelFutures() {\r\n cancelFuture();\r\n for (Future<PartETag> f : futures) {\r\n f.cancel(true);\r\n }\r\n multipartUploadCallable.getFutures().clear();\r\n futures.clear();\r\n }", "public void cancelAll() {\n if (DBG) Log.v(TAG, \"prepare to cancel all\");\n cancelSchedule(ARRIVE);\n schedule(CANCEL_ALL, 0, 0, null, 0);\n }", "def cancel(self, *args, **kwargs):\n \"\"\" Cancel all queue items - then attempt to cancel all in progress items \"\"\"\n self._cancel_called = True\n self.clear_waiting_coordinators(cancel=True)\n super(AsperaTransferCoordinatorController, self).cancel(*args, **kwargs)", "async def cancel_remaining(self):\n '''Cancel all remaining tasks.'''\n self._closed = True\n task_list = list(self._pending)\n for task in task_list:\n task.cancel()\n for task in task_list:\n with suppress(CancelledError):\n await task", "public synchronized void cancelAll() {\n\t\tif (activeRequests.size() > 0) {\n\t\t\tList<HttpRequest> temp = new ArrayList<>(activeRequests);\n\t\t\tfor (HttpRequest request : temp) {\n\t\t\t\trequest.cancel();\n\t\t\t}\n\t\t}\n\t\tnotifyCancelledAllRequests();\n\t}", "def cancel(self):\n '''Cancel any request.'''\n if self._body:\n self._body._cancel = True\n else:\n self._cancel = True", "def reset_everything(self, payload):\n \"\"\"Kill all processes, delete the queue and clean everything up.\"\"\"\n kill_signal = signals['9']\n self.process_handler.kill_all(kill_signal, True)\n self.process_handler.wait_for_finish()\n self.reset = True\n\n answer = {'message': 'Resetting current queue', 'status': 'success'}\n return answer", "function cancel() {\n timeouts.forEach(function (timeout) {\n clearTimeout(timeout);\n });\n Object.keys(callbacks).forEach(function (key) {\n callbacks[key]('cancel');\n delete callbacks[key];\n });\n timeouts = [];\n }", "private static <T> void cancelAll(ArrayList<Future<T>> futures, int j) {\n for (int size = futures.size(); j < size; j++)\n futures.get(j).cancel(true);\n }", "async def _reap_payloads(self):\n \"\"\"Clean up all finished payloads\"\"\"\n for task in self._tasks.copy():\n if task.done():\n self._tasks.remove(task)\n if task.exception() is not None:\n raise task.exception()\n await asyncio.sleep(0)", "@Override\n\tpublic void onFinishSending(PayloadSender sender, PayloadData payload, boolean cancelled, String errorMessage, int responseCode, JSONObject responseData) {\n\t\tApptentiveNotificationCenter.defaultCenter()\n\t\t\t.postNotification(NOTIFICATION_PAYLOAD_DID_FINISH_SEND,\n\t\t\t\tNOTIFICATION_KEY_PAYLOAD, payload,\n\t\t\t\tNOTIFICATION_KEY_SUCCESSFUL, errorMessage == null && !cancelled ? TRUE : FALSE,\n\t\t\t\tNOTIFICATION_KEY_RESPONSE_CODE, responseCode,\n\t\t\t\tNOTIFICATION_KEY_RESPONSE_DATA, responseData);\n\n\t\tif (cancelled) {\n\t\t\tApptentiveLog.v(PAYLOADS, \"Payload sending was cancelled: %s\", payload);\n\t\t\treturn; // don't remove cancelled payloads from the queue\n\t\t}\n\n\t\tif (errorMessage != null) {\n\t\t\tApptentiveLog.e(PAYLOADS, \"Payload sending failed: %s\\n%s\", payload, errorMessage);\n\t\t\tif (appInBackground) {\n\t\t\t\tApptentiveLog.v(PAYLOADS, \"The app went to the background so we won't remove the payload from the queue\");\n\t\t\t\tretrySending(5000);\n\t\t\t\treturn;\n\t\t\t} else if (responseCode == -1) {\n\t\t\t\tApptentiveLog.v(PAYLOADS, \"Payload failed to send due to a connection error.\");\n\t\t\t\tretrySending(5000);\n\t\t\t\treturn;\n\t\t\t} else if (responseCode >= 500) {\n\t\t\t\tApptentiveLog.v(PAYLOADS, \"Payload failed to send due to a server error.\");\n\t\t\t\tretrySending(5000);\n\t\t\t\treturn;\n\t\t\t}\n\t\t} else {\n\t\t\tApptentiveLog.v(PAYLOADS, \"Payload was successfully sent: %s\", payload);\n\t\t}\n\n\t\t// Only let the payload be deleted if it was successfully sent, or got an unrecoverable client error.\n\t\tdeletePayload(payload.getNonce());\n\t}", "def _cancel_send_messages(self, d):\n \"\"\"Cancel a `send_messages` request\n First check if the request is in a waiting batch, of so, great, remove\n it from the batch. If it's not found, we errback() the deferred and\n the downstream processing steps take care of aborting further\n processing.\n We check if there's a current _batch_send_d to determine where in the\n chain we were (getting partitions, or already sent request to Kafka)\n and errback differently.\n \"\"\"\n # Is the request in question in an unsent batch?\n for req in self._batch_reqs:\n if req.deferred == d:\n # Found the request, remove it and return.\n msgs = req.messages\n self._waitingMsgCount -= len(msgs)\n for m in (_m for _m in msgs if _m is not None):\n self._waitingByteCount -= len(m)\n # This _should_ be safe as we abort the iteration upon removal\n self._batch_reqs.remove(req)\n d.errback(CancelledError(request_sent=False))\n return\n\n # If it wasn't found in the unsent batch. We just rely on the\n # downstream processing of the request to check if the deferred\n # has been called and skip further processing for this request\n # Errback the deferred with whether or not we sent the request\n # to Kafka already\n d.errback(\n CancelledError(request_sent=(self._batch_send_d is not None)))\n return" ]
[ 0.7676242589950562, 0.7511706948280334, 0.7398670315742493, 0.734765350818634, 0.7277913689613342, 0.7266598343849182, 0.7244936227798462, 0.7222749590873718, 0.7204983830451965, 0.7172811627388, 0.7171241641044617, 0.7121745944023132 ]
Check a plaintext password against a hashed password.
def check_password(password: str, encrypted: str) -> bool: """ Check a plaintext password against a hashed password. """ # some old passwords have {crypt} in lower case, and passlib wants it to be # in upper case. if encrypted.startswith("{crypt}"): encrypted = "{CRYPT}" + encrypted[7:] return pwd_context.verify(password, encrypted)
[ "def check_password(self, hashed_password, plain_password):\n \"\"\"Encode the plain_password with the salt of the hashed_password.\n\n Return the comparison with the encrypted_password.\n \"\"\"\n salt, encrypted_password = hashed_password.split('$')\n re_encrypted_password = self.get_hash(salt, plain_password)\n return encrypted_password == re_encrypted_password", "def is_password_valid(plaintextpw: str, storedhash: str) -> bool:\n \"\"\"\n Checks if a plaintext password matches a stored hash.\n\n Uses ``bcrypt``. The stored hash includes its own incorporated salt.\n \"\"\"\n # Upon CamCOPS from MySQL 5.5.34 (Ubuntu) to 5.1.71 (CentOS 6.5), the\n # VARCHAR was retrieved as Unicode. We needed to convert that to a str.\n # For Python 3 compatibility, we just str-convert everything, avoiding the\n # unicode keyword, which no longer exists.\n if storedhash is None:\n storedhash = \"\"\n storedhash = str(storedhash)\n if plaintextpw is None:\n plaintextpw = \"\"\n plaintextpw = str(plaintextpw)\n try:\n h = bcrypt.hashpw(plaintextpw, storedhash)\n except ValueError: # e.g. ValueError: invalid salt\n return False\n return h == storedhash", "def _verify_password(self, raw_password, hashed_password):\n \"\"\"\n Verifies that a plaintext password matches the hashed version of that\n password using the stored passlib password context\n \"\"\"\n PraetorianError.require_condition(\n self.pwd_ctx is not None,\n \"Praetorian must be initialized before this method is available\",\n )\n return self.pwd_ctx.verify(raw_password, hashed_password)", "public static boolean checkpw(String plaintext, String hashed) {\n return (hashed.compareTo(hashpw(plaintext, hashed)) == 0);\n }", "public static boolean checkPassword(String plaintext, String storedHash) {\n boolean password_verified;\n if (null == storedHash || !storedHash.startsWith(\"$2a$\"))\n throw new IllegalArgumentException(\"Invalid hash provided for comparison\");\n password_verified = BCrypt.checkpw(plaintext, storedHash);\n return (password_verified);\n }", "def compare(crypted_string, plain_string)\n BCrypt::Password.new(crypted_string).is_password?(plain_string)\n rescue BCrypt::Errors::InvalidHash\n false\n end", "def verify_password(self, password, password_hash):\n \"\"\"Verify plaintext ``password`` against ``hashed password``.\n\n Args:\n password(str): Plaintext password that the user types in.\n password_hash(str): Password hash generated by a previous call to ``hash_password()``.\n Returns:\n | True when ``password`` matches ``password_hash``.\n | False otherwise.\n Example:\n\n ::\n\n if verify_password('mypassword', user.password):\n login_user(user)\n \"\"\"\n\n # Print deprecation warning if called with (password, user) instead of (password, user.password)\n if isinstance(password_hash, self.user_manager.db_manager.UserClass):\n print(\n 'Deprecation warning: verify_password(password, user) has been changed'\\\n ' to: verify_password(password, password_hash). The user param will be deprecated.'\\\n ' Please change your call with verify_password(password, user) into'\\\n ' a call with verify_password(password, user.password)'\n ' as soon as possible.')\n password_hash = password_hash.password # effectively user.password\n\n # Use passlib's CryptContext to verify a password\n return self.password_crypt_context.verify(password, password_hash)", "function hash_password(plain_text, salt, callback) {\n // Get the crypto library.\n const crypto = require(\"crypto\")\n\n // These should be a *slow* as possible, higher = slower.\n // Slow it down until you tweak a bounce change.\n const password_iterations = process.env.PW_GEN_PW_ITERS || 4096\n\n // Password length and algorithm.\n const password_length = process.env.PW_GEN_PW_LENGTH || 512\n const password_algorithm = process.env.PW_GEN_PW_ALG || \"sha256\"\n\n // Create a hash, we're going to encrypt the password.\n // I wish Node had native support for good KDF functions\n // like bcrypt or scrypt but PBKDF2 is good for now.\n crypto.pbkdf2(plain_text, salt, password_iterations, password_length, password_algorithm, (err, key) => {\n // Move on.\n callback(key.toString(\"hex\"), salt)\n })\n}", "def hash_password(password):\n \"\"\"Hash the specified plaintext password.\n\n It uses the configured hashing options.\n\n .. versionadded:: 2.0.2\n\n :param password: The plaintext password to hash\n \"\"\"\n if use_double_hash():\n password = get_hmac(password).decode('ascii')\n\n return _pwd_context.hash(\n password,\n **config_value('PASSWORD_HASH_OPTIONS', default={}).get(\n _security.password_hash, {})\n )", "def check(text):\n \"\"\"Check the text.\"\"\"\n err = \"security.password\"\n msg = u\"Don't put passwords in plain text.\"\n\n pwd_regex = \"[:]? [\\S]{6,30}\"\n\n password = [\n \"the password is{}\".format(pwd_regex),\n \"my password is{}\".format(pwd_regex),\n \"the password's{}\".format(pwd_regex),\n \"my password's{}\".format(pwd_regex),\n \"^[pP]assword{}\".format(pwd_regex),\n ]\n\n return existence_check(text, password, err, msg)", "def hash_password(plain_text):\n \"\"\"Hash a plain text password\"\"\"\n # NOTE: despite the name this is a one-way hash not a reversible cypher\n hashed = pbkdf2_sha256.encrypt(plain_text, rounds=8000, salt_size=10)\n return unicode(hashed)", "def hash_password(plaintextpw: str,\n log_rounds: int = BCRYPT_DEFAULT_LOG_ROUNDS) -> str:\n \"\"\"\n Makes a hashed password (using a new salt) using ``bcrypt``.\n\n The hashed password includes the salt at its start, so no need to store a\n separate salt.\n \"\"\"\n salt = bcrypt.gensalt(log_rounds) # optional parameter governs complexity\n hashedpw = bcrypt.hashpw(plaintextpw, salt)\n return hashedpw" ]
[ 0.8291040658950806, 0.8201819062232971, 0.8061956167221069, 0.7911700010299683, 0.7780253291130066, 0.771220326423645, 0.7709529995918274, 0.7700600028038025, 0.7697321772575378, 0.7653699517250061, 0.760883092880249, 0.7587486505508423 ]
Check if version of repository is semantic
def validate(ctx, sandbox): """Check if version of repository is semantic """ m = RepoManager(ctx.obj['agile']) if not sandbox or m.can_release('sandbox'): click.echo(m.validate_version())
[ "def isValidSemver(version):\n \"\"\"Semantic version number - determines whether the version is qualified. The format is MAJOR.Minor.PATCH, more with https://semver.org/\"\"\"\n if version and isinstance(version, string_types):\n try:\n semver.parse(version)\n except (TypeError,ValueError):\n return False\n else:\n return True\n return False", "def validate_version(self, prefix='v'):\n \"\"\"Validate version by checking if it is a valid semantic version\n and its value is higher than latest github tag\n \"\"\"\n version = self.software_version()\n repo = self.github_repo()\n repo.releases.validate_tag(version, prefix)\n return version", "def semantic_version(tag):\n \"\"\"Get a valid semantic version for tag\n \"\"\"\n try:\n version = list(map(int, tag.split('.')))\n assert len(version) == 3\n return tuple(version)\n except Exception as exc:\n raise CommandError(\n 'Could not parse \"%s\", please use '\n 'MAJOR.MINOR.PATCH' % tag\n ) from exc", "def semver_newer(v1, v2):\n \"\"\"\n Verify (as semantic versions) if v1 < v2\n Patch versions can be different\n \"\"\"\n v1_parts = v1.split('.')\n v2_parts = v2.split('.')\n if len(v1_parts) < 3 or len(v2_parts) < 3:\n # one isn't a semantic version\n return False\n\n v1_major, v1_minor, v1_patch = get_version_parts(v1_parts, int)\n v2_major, v2_minor, v2_patch = get_version_parts(v2_parts, int)\n\n if v1_major > v2_major:\n return False\n\n if v1_major == v2_major and v1_minor >= v2_minor:\n return False\n\n return True", "def check(self, version):\n \"\"\"Check that a version is inside this SemanticVersionRange\n\n Args:\n version (SemanticVersion): The version to check\n\n Returns:\n bool: True if the version is included in the range, False if not\n \"\"\"\n\n for disjunct in self._disjuncts:\n if self._check_insersection(version, disjunct):\n return True\n\n return False", "def validate_current_versions(self): # type: () -> bool\n \"\"\"\n Can a version be found? Are all versions currently the same? Are they valid sem ver?\n :return:\n \"\"\"\n versions = self.all_current_versions()\n for _, version in versions.items():\n if \"Invalid Semantic Version\" in version:\n logger.error(\n \"Invalid versions, can't compare them, can't determine if in sync\"\n )\n return False\n\n if not versions:\n logger.warning(\"Found no versions, will use default 0.1.0\")\n return True\n\n if not self.all_versions_equal(versions):\n if self.almost_the_same_version([x for x in versions.values()]):\n # TODO: disable with strict option\n logger.warning(\"Version very by a patch level, will use greater.\")\n return True\n logger.error(\"Found various versions, how can we rationally pick?\")\n logger.error(unicode(versions))\n return False\n\n for _ in versions:\n return True\n return False", "def validate_version(version)\n return if SemanticPuppet::Version.valid?(version)\n\n err = _(\"version string cannot be parsed as a valid Semantic Version\")\n raise ArgumentError, _(\"Invalid 'version' field in metadata.json: %{err}\") % { err: err }\n end", "def _suggest_semantic_version(s):\n \"\"\"\n Try to suggest a semantic form for a version for which\n _suggest_normalized_version couldn't come up with anything.\n \"\"\"\n result = s.strip().lower()\n for pat, repl in _REPLACEMENTS:\n result = pat.sub(repl, result)\n if not result:\n result = '0.0.0'\n\n # Now look for numeric prefix, and separate it out from\n # the rest.\n #import pdb; pdb.set_trace()\n m = _NUMERIC_PREFIX.match(result)\n if not m:\n prefix = '0.0.0'\n suffix = result\n else:\n prefix = m.groups()[0].split('.')\n prefix = [int(i) for i in prefix]\n while len(prefix) < 3:\n prefix.append(0)\n if len(prefix) == 3:\n suffix = result[m.end():]\n else:\n suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]\n prefix = prefix[:3]\n prefix = '.'.join([str(i) for i in prefix])\n suffix = suffix.strip()\n if suffix:\n #import pdb; pdb.set_trace()\n # massage the suffix.\n for pat, repl in _SUFFIX_REPLACEMENTS:\n suffix = pat.sub(repl, suffix)\n\n if not suffix:\n result = prefix\n else:\n sep = '-' if 'dev' in suffix else '+'\n result = prefix + sep + suffix\n if not is_semver(result):\n result = None\n return result", "def version(**kwargs):\n \"\"\"\n Detects the new version according to git log and semver. Writes the new version\n number and commits it, unless the noop-option is True.\n \"\"\"\n retry = kwargs.get(\"retry\")\n if retry:\n click.echo('Retrying publication of the same version...')\n else:\n click.echo('Creating new version..')\n\n try:\n current_version = get_current_version()\n except GitError as e:\n click.echo(click.style(str(e), 'red'), err=True)\n return False\n\n click.echo('Current version: {0}'.format(current_version))\n level_bump = evaluate_version_bump(current_version, kwargs['force_level'])\n new_version = get_new_version(current_version, level_bump)\n\n if new_version == current_version and not retry:\n click.echo(click.style('No release will be made.', fg='yellow'))\n return False\n\n if kwargs['noop'] is True:\n click.echo('{0} Should have bumped from {1} to {2}.'.format(\n click.style('No operation mode.', fg='yellow'),\n current_version,\n new_version\n ))\n return False\n\n if config.getboolean('semantic_release', 'check_build_status'):\n click.echo('Checking build status..')\n owner, name = get_repository_owner_and_name()\n if not check_build_status(owner, name, get_current_head_hash()):\n click.echo(click.style('The build has failed', 'red'))\n return False\n click.echo(click.style('The build was a success, continuing the release', 'green'))\n\n if retry:\n # No need to make changes to the repo, we're just retrying.\n return True\n\n if config.get('semantic_release', 'version_source') == 'commit':\n set_new_version(new_version)\n commit_new_version(new_version)\n tag_new_version(new_version)\n click.echo('Bumping with a {0} version to {1}.'.format(level_bump, new_version))\n return True", "function isScm (version) {\n var scmPrefixes = ['git:', 'git+ssh:', 'https:', 'git+https:']\n var blacklisted = scmPrefixes.filter(function (prefix) {\n return version.indexOf(prefix) === 0\n })\n return !!blacklisted.length\n}", "def isvalid(self):\n \"\"\" Checks whether contents of repo are consistent with standard set. \"\"\"\n\n gcontents = [gf.rstrip('\\n') for gf in self.repo.bake('ls-files')()]\n fcontents = os.listdir(self.repopath)\n return all([sf in gcontents for sf in std_files]) and all([sf in fcontents for sf in std_files])", "function semanticVersionComparator (v1, v2) {\n const [major1, minor1, patch1] = v1.split('.').map(Number)\n const [major2, minor2, patch2] = v2.split('.').map(Number)\n if (major1 > major2) {\n return 1\n } else if (major1 === major2) {\n if (minor1 > minor2) {\n return 1\n } else if (minor1 === minor2) {\n if (patch1 > patch2) {\n return 1\n } else if (patch1 === patch2) {\n return 0\n } else {\n return -1\n }\n } else {\n return -1\n }\n } else {\n return -1\n }\n}" ]
[ 0.7900662422180176, 0.7560533881187439, 0.7444041967391968, 0.734630823135376, 0.7325699925422668, 0.7256373763084412, 0.7142989039421082, 0.7010870575904846, 0.700447142124176, 0.6989374756813049, 0.6981353759765625, 0.6979355812072754 ]
Reset transaction back to original state, discarding all uncompleted transactions.
def reset(self, force_flush_cache: bool = False) -> None: """ Reset transaction back to original state, discarding all uncompleted transactions. """ super(LDAPwrapper, self).reset() if len(self._transactions) == 0: raise RuntimeError("reset called outside a transaction.") self._transactions[-1] = []
[ "protected function resetTransactionNesting()\n {\n // Check whether to use a connector's built-in transaction methods\n if ($this->connector instanceof TransactionalDBConnector) {\n if ($this->transactionNesting > 0) {\n $this->connector->transactionRollback();\n }\n }\n $this->transactionNesting = 0;\n }", "def reset(self):\n \"\"\"\n Clear the values of all attributes of the transaction store.\n \"\"\"\n self.getsCounter = 0\n\n # dictionary of processed requests for each client. Value for each\n # client is a dictionary with request id as key and transaction id as\n # value\n self.processedRequests = {} # type: Dict[str, Dict[int, str]]\n\n # dictionary of responses to be sent for each client. Value for each\n # client is an asyncio Queue\n self.responses = {} # type: Dict[str, asyncio.Queue]\n\n # dictionary with key as transaction id and `Reply` as\n # value\n self.transactions = {}", "def rollback(self):\n \"\"\"Roll back the current transaction.\n\n Discard all statements executed since the transaction was begun.\n \"\"\"\n if hasattr(self.local, 'tx') and self.local.tx:\n tx = self.local.tx.pop()\n tx.rollback()\n self._flush_tables()", "def UndoTransaction(self):\n\t\t\"\"\"\tCancels any running transaction. \"\"\"\n\t\tfrom Ucs import ConfigMap\n\n\t\tself._transactionInProgress = False\n\t\tself._configMap = ConfigMap()", "private void resetAndCloseConnection() {\r\n if (conn == null) {\r\n return;\r\n }\r\n\r\n try {\r\n conn.setAutoCommit(originalAutoCommit);\r\n conn.setTransactionIsolation(originalIsolationLevel);\r\n } catch (SQLException e) {\r\n logger.error(\"Failed to reset connection in transaction(id=\" + id + \")\", e);\r\n } finally {\r\n JdbcUtil.closeQuietly(conn);\r\n conn = null;\r\n }\r\n }", "protected void transactionComplete() {\n\n if (state != STATE_TRAN_WRAPPER_INUSE) {\n IllegalStateException e = new IllegalStateException(\"transactionComplete: illegal state exception. State = \" + getStateString() + \" MCW = \"\n + mcWrapperObject_hexString);\n Object[] parms = new Object[] { \"transactionComplete\", e };\n Tr.error(tc, \"ILLEGAL_STATE_EXCEPTION_J2CA0079\", parms);\n throw e;\n }\n\n state = STATE_ACTIVE_INUSE;\n }", "public void finish() {\n\t\tif (mSuccessful) {\n\t\t\tmDb.setTransactionSuccessful();\n\t\t}\n\t\tmDb.endTransaction();\n\n\t\tif (mSuccessful) {\n\t\t\tfor (OnTransactionCommittedListener listener : mOnTransactionCommittedListeners) {\n\t\t\t\tlistener.onTransactionCommitted();\n\t\t\t}\n\t\t}\n\t}", "public Surface restoreTx () {\n int tsSize = transformStack.size();\n assert tsSize > 1 : \"Unbalanced save/restore\";\n transformStack.remove(--tsSize);\n lastTrans = transformStack.isEmpty() ? null : transformStack.get(tsSize-1);\n return this;\n }", "protected void reset()\n {\n this.active = false;\n synchronized (this.cachedChannelsNonTransactional)\n {\n for (ChannelProxy channel : this.cachedChannelsNonTransactional)\n {\n try\n {\n channel.getTargetChannel().close();\n }\n catch (Throwable ex)\n {\n this.logger.trace(\"Could not close cached Rabbit Channel\", ex);\n }\n }\n this.cachedChannelsNonTransactional.clear();\n }\n synchronized (this.cachedChannelsTransactional)\n {\n for (ChannelProxy channel : this.cachedChannelsTransactional)\n {\n try\n {\n channel.getTargetChannel().close();\n }\n catch (Throwable ex)\n {\n this.logger.trace(\"Could not close cached Rabbit Channel\", ex);\n }\n }\n this.cachedChannelsTransactional.clear();\n }\n this.active = true;\n }", "private void reset()\n {\n int count = 0;\n for (int i = 0; i < tableLength; i++)\n {\n long t = tableAt(i);\n count += Long.bitCount(t & ONE_MASK);\n tableAt(i, (t >>> 1) & RESET_MASK);\n }\n size = (size >>> 1) - (count >>> 2);\n }", "private void resolveIncompleteTransactions() throws SQLException {\n\n switch(transactionState) {\n case COMPLETED:\n //All we know for certain is that at least one commit/rollback was called. Do nothing.\n break;\n case STARTED:\n //At least one statement was created with auto-commit false & no commit/rollback.\n //Follow the default policy.\n if(conn != null && openStatements.size() > 0) {\n switch(incompleteTransactionPolicy) {\n case REPORT:\n throw new SQLException(\"Statement closed with incomplete transaction\", JDBConnection.SQLSTATE_INVALID_TRANSACTION_STATE);\n case COMMIT:\n if(!conn.isClosed()) {\n conn.commit();\n }\n break;\n case ROLLBACK:\n if(!conn.isClosed()) {\n conn.rollback();\n }\n }\n }\n break;\n }\n }", "public void rollback() {\n log.debug(\"Performing rollback\");\n while (!operationExecutors.isEmpty()) {\n CompensatingTransactionOperationExecutor rollbackOperation = operationExecutors.pop();\n try {\n rollbackOperation.rollback();\n } catch (Exception e) {\n throw new TransactionSystemException(\n \"Error occurred during rollback\", e);\n }\n }\n }" ]
[ 0.743079423904419, 0.7369043231010437, 0.7288176417350769, 0.7235282063484192, 0.717767059803009, 0.7141973376274109, 0.7109276056289673, 0.7049008011817932, 0.7026885747909546, 0.7019297480583191, 0.6986315846443176, 0.6958999633789062 ]
Object state is cached. When an update is required the update will be simulated on this cache, so that rollback information can be correct. This function retrieves the cached data.
def _cache_get_for_dn(self, dn: str) -> Dict[str, bytes]: """ Object state is cached. When an update is required the update will be simulated on this cache, so that rollback information can be correct. This function retrieves the cached data. """ # no cached item, retrieve from ldap self._do_with_retry( lambda obj: obj.search( dn, '(objectclass=*)', ldap3.BASE, attributes=['*', '+'])) results = self._obj.response if len(results) < 1: raise NoSuchObject("No results finding current value") if len(results) > 1: raise RuntimeError("Too many results finding current value") return results[0]['raw_attributes']
[ "def cache(self, bank, key, fun, loop_fun=None, **kwargs):\n '''\n Check cache for the data. If it is there, check to see if it needs to\n be refreshed.\n\n If the data is not there, or it needs to be refreshed, then call the\n callback function (``fun``) with any given ``**kwargs``.\n\n In some cases, the callback function returns a list of objects which\n need to be processed by a second function. If that is the case, then\n the second function is passed in as ``loop_fun``. Each item in the\n return list from the first function will be the only argument for the\n second function.\n '''\n expire_seconds = kwargs.get('expire', 86400) # 1 day\n\n updated = self.updated(bank, key)\n update_cache = False\n if updated is None:\n update_cache = True\n else:\n if int(time.time()) - updated > expire_seconds:\n update_cache = True\n\n data = self.fetch(bank, key)\n\n if not data or update_cache is True:\n if loop_fun is not None:\n data = []\n items = fun(**kwargs)\n for item in items:\n data.append(loop_fun(item))\n else:\n data = fun(**kwargs)\n self.store(bank, key, data)\n\n return data", "def _get_dataobject(self, name, multivalued):\n \"\"\"This function only gets called if the decorated property\n doesn't have a value in the cache.\"\"\"\n logger.debug(\"Querying server for uncached data object %s\", name)\n # This will retrieve the value and inject it into the cache\n self.update_view_data(properties=[name])\n return self._cache[name][0]", "def data(self):\n \"\"\"this property just calls ``get_data``\n but here you can serilalize your data or render as html\n these data will be saved to self.cached_content\n also will be accessable from template\n \"\"\"\n if self.is_obsolete():\n self.update_cache(self.get_data())\n return self.cache_data", "def _set_cache_(self, attr):\n \"\"\"Retrieve object information\"\"\"\n if attr == \"size\":\n oinfo = self.repo.odb.info(self.binsha)\n self.size = oinfo.size\n # assert oinfo.type == self.type, _assertion_msg_format % (self.binsha, oinfo.type, self.type)\n else:\n super(Object, self)._set_cache_(attr)", "def partial_update(self, request, *args, **kwargs):\n \"\"\" We do not include the mixin as we want only PATCH and no PUT \"\"\"\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data, partial=True, context=self.get_serializer_context())\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n if getattr(instance, '_prefetched_objects_cache', None): #pragma: no cover\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n\n return response.Response(serializer.data)", "def get(self, cls, id_field, id_val):\n \"\"\"\n Retrieve an object which `id_field` matches `id_val`. If it exists in\n the cache, it will be fetched from Redis. If not, it will be fetched\n via the `fetch` method and cached in Redis (unless the cache flag got\n invalidated in the meantime).\n \"\"\"\n cache_key, flag_key = self.get_keys(cls, id_field, id_val)\n\n result = self.get_cached_or_set_flag(keys=(cache_key, flag_key))\n\n # in Lua, arrays cannot hold nil values, so e.g. if [1, nil] is returned,\n # we'll only get [1] here. That's why we need to append None ourselves.\n if len(result) == 1:\n result.append(None)\n\n previous_flag, cached_data = result\n\n # if cached data was found, deserialize and return it\n if cached_data is not None:\n deserialized = self.deserialize(cls, cached_data)\n\n # verify that the cached object matches our expectations\n # if not, return from the persistant storage instead.\n if self.verify(cls, id_field, id_val, deserialized):\n return deserialized\n else:\n # invalidate the cache if it didn't pass verification\n self.invalidate(cls, id_field, id_val)\n\n obj = self.fetch(cls, id_field, id_val)\n\n # If the flag wasn't previously set, then we set it and we're responsible\n # for putting the item in the cache. Do this unless the cache got\n # invalidated and the flag was removed.\n if not previous_flag:\n obj_serialized = self.serialize(obj)\n self.cache(keys=(cache_key, flag_key), args=(obj_serialized,))\n\n return obj", "function get(key) {\n var state = getInternalState(this);\n if (isObject(key)) {\n var data = getWeakData(key);\n if (data === true) return uncaughtFrozenStore(state).get(key);\n return data ? data[state.id] : undefined;\n }\n }", "def state(cls, obj, state=None):\n \"\"\"\n Method to capture and restore option state. When called\n without any state supplied, the current state is\n returned. Then if this state is supplied back in a later call\n using the same object, the original state is restored.\n \"\"\"\n if state is None:\n ids = cls.capture_ids(obj)\n original_custom_keys = set(Store.custom_options().keys())\n return (ids, original_custom_keys)\n else:\n (ids, original_custom_keys) = state\n current_custom_keys = set(Store.custom_options().keys())\n for key in current_custom_keys.difference(original_custom_keys):\n del Store.custom_options()[key]\n cls.restore_ids(obj, ids)", "def flush_cache(self):\n '''\n Use a cache to save state changes to avoid opening a session for every change.\n The cache will be flushed at the end of the simulation, and when history is accessed.\n '''\n logger.debug('Flushing cache {}'.format(self.db_path))\n with self.db:\n for rec in self._tups:\n self.db.execute(\"replace into history(agent_id, t_step, key, value) values (?, ?, ?, ?)\", (rec.agent_id, rec.t_step, rec.key, rec.value))\n self._tups = list()", "def _get_cached(self):\n \"\"\"Gets a list of statements that the operation will affect during the real\n time update.\"\"\"\n lines = self.context.cachedstr[self.icached[0]:self.icached[1]]\n return self._get_statements(lines, self.icached[0])", "async def update(self):\n '''\n reload all cached information\n\n |coro|\n\n Notes\n -----\n This is a slow process, and will remove the cache before updating.\n Thus it is recomended to use the `*_force` properties, which will\n only update the cache after data is retrived.\n '''\n keys = self.extras.keys()\n self.extras = {}\n for key in keys:\n try:\n func = getattr(self, key, None)\n if callable(func):\n func()\n except:\n pass", "function AbstractCache(state, name, options) {\n this.state = state\n this.name = name\n this.options = options\n this._update = this._update.bind(this)\n\n this._setLock = false\n this._setCallbacks = []\n\n this._needsRefresh = false\n this.state.on('refreshCache', function (needsRefresh) {\n this._needsRefresh = needsRefresh\n }.bind(this))\n\n\n // Build our own `run` instance-method to accurately reflect the fact that\n // `run` is always asynchronous and requires an argument to declare itself\n // as such to our API.\n var protoRun = this.run\n this.run = function(done) {\n if (this.options.run) {\n return this.options.run.apply(this, arguments)\n } else {\n return protoRun.apply(this, arguments)\n }\n }.bind(this)\n}" ]
[ 0.7165476679801941, 0.6867960691452026, 0.681489884853363, 0.6709518432617188, 0.670445442199707, 0.6700876951217651, 0.6654559969902039, 0.6644994020462036, 0.664233386516571, 0.6624510288238525, 0.6611480712890625, 0.6602258682250977 ]
Are there uncommitted changes?
def is_dirty(self) -> bool: """ Are there uncommitted changes? """ if len(self._transactions) == 0: raise RuntimeError("is_dirty called outside a transaction.") if len(self._transactions[-1]) > 0: return True return False
[ "def is_changed():\n \"\"\" Checks if current project has any noncommited changes. \"\"\"\n executed, changed_lines = execute_git('status --porcelain', output=False)\n merge_not_finished = mod_path.exists('.git/MERGE_HEAD')\n return changed_lines.strip() or merge_not_finished", "public function hasUnpersistedChanges()\n {\n $unitOfWork = $this->entityManager->getUnitOfWork();\n $unitOfWork->computeChangeSets();\n\n if ($unitOfWork->getScheduledEntityInsertions() !== []\n || $unitOfWork->getScheduledEntityUpdates() !== []\n || $unitOfWork->getScheduledEntityDeletions() !== []\n || $unitOfWork->getScheduledCollectionDeletions() !== []\n || $unitOfWork->getScheduledCollectionUpdates() !== []\n ) {\n return true;\n }\n\n return false;\n }", "def is_dirty(self, untracked=False) -> bool:\n \"\"\"\n Checks if the current repository contains uncommitted or untracked changes\n\n Returns: true if the repository is clean\n \"\"\"\n result = False\n if not self.index_is_empty():\n LOGGER.error('index is not empty')\n result = True\n changed_files = self.changed_files()\n if bool(changed_files):\n\n LOGGER.error(f'Repo has %s modified files: %s', len(changed_files), changed_files)\n result = True\n if untracked:\n result = result or bool(self.untracked_files())\n return result", "private function hasUncommittedEvents()\n {\n $reflector = new \\ReflectionClass(EventSourcedAggregateRoot::class);\n $property = $reflector->getProperty('uncommittedEvents');\n\n $property->setAccessible(true);\n $uncommittedEvents = $property->getValue($this);\n\n return !empty($uncommittedEvents);\n }", "async function changesToRelease () {\n const lastCommitWasRelease = new RegExp(`^Bump v[0-9.]*(-beta[0-9.]*)?(-nightly[0-9.]*)?$`, 'g')\n const lastCommit = await GitProcess.exec(['log', '-n', '1', `--pretty=format:'%s'`], gitDir)\n return !lastCommitWasRelease.test(lastCommit.stdout)\n}", "public function hasChanges() {\n\t\t$this->begin();\n\t\t$lastLine = exec('git status');\n\t\t$this->end();\n\t\treturn (strpos($lastLine, 'nothing to commit')) === false; // FALSE => changes\n\t}", "def get_uncommitted_changes():\n '''\n Retrieve a list of all uncommitted changes on the device.\n Requires PANOS version 8.0.0 or greater.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' panos.get_uncommitted_changes\n\n '''\n _required_version = '8.0.0'\n if not __proxy__['panos.is_required_version'](_required_version):\n return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version)\n\n query = {'type': 'op',\n 'cmd': '<show><config><list><changes></changes></list></config></show>'}\n\n return __proxy__['panos.call'](query)", "def is_dirty(using=None):\n \"\"\"\n Returns True if the current transaction requires a commit for changes to\n happen.\n \"\"\"\n if using is None:\n dirty = False\n for using in tldap.backend.connections:\n connection = tldap.backend.connections[using]\n if connection.is_dirty():\n dirty = True\n return dirty\n connection = tldap.backend.connections[using]\n return connection.is_dirty()", "public function isEmptyChanges()\n {\n foreach ($this->originals as $name => $orignial) {\n if ($this->hasChange($name)) {\n return false;\n }\n }\n\n return true;\n }", "private boolean executeGitHasUncommitted() throws MojoFailureException,\n CommandLineException {\n boolean uncommited = false;\n\n // 1 if there were differences and 0 means no differences\n\n // git diff --no-ext-diff --ignore-submodules --quiet --exit-code\n final CommandResult diffCommandResult = executeGitCommandExitCode(\n \"diff\", \"--no-ext-diff\", \"--ignore-submodules\", \"--quiet\",\n \"--exit-code\");\n\n String error = null;\n\n if (diffCommandResult.getExitCode() == SUCCESS_EXIT_CODE) {\n // git diff-index --cached --quiet --ignore-submodules HEAD --\n final CommandResult diffIndexCommandResult = executeGitCommandExitCode(\n \"diff-index\", \"--cached\", \"--quiet\", \"--ignore-submodules\",\n \"HEAD\", \"--\");\n if (diffIndexCommandResult.getExitCode() != SUCCESS_EXIT_CODE) {\n error = diffIndexCommandResult.getError();\n uncommited = true;\n }\n } else {\n error = diffCommandResult.getError();\n uncommited = true;\n }\n\n if (StringUtils.isNotBlank(error)) {\n throw new MojoFailureException(error);\n }\n\n return uncommited;\n }", "public boolean isCommitted() {\n // 182383\n if (_headersWritten) {\n\n if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE))\n logger.logp(Level.FINE, CLASS_NAME, \"isCommitted\" , \"headersWritten=true\",\"[\"+this+\"]\");\n\n return true;\n }\n // 182383\n\n // LIBERTY if (_gotWriter || _gotOutputStream) {\n if (_gotOutputStream)\n {\n // LIBERTY if (_responseBuffer == null)\n // LIBERTY return false;\n if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE))\n logger.logp(Level.FINE, CLASS_NAME, \"isCommitted\" , \"responseBuffer isCommitted=\"+_response.isCommitted(),\"[\"+this+\"]\");\n\n // LIBERTY return _responseBuffer.isCommitted();\n return _response.isCommitted();\n }\n if (_gotWriter && _bufferedWriter != null)\n {\n return _bufferedWriter.isCommitted();\n }\n else\n {\n if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE))\n logger.logp(Level.FINE, CLASS_NAME, \"isCommitted\" , \"false\",\"[\"+this+\"]\");\n return false;\n }\n }", "function isClean() {\n var args = [\n 'diff-index',\n '--quiet',\n 'HEAD',\n '.'\n ];\n\n return git(args)\n .then(function() {\n return true;\n })\n .catch(function(err) {\n if (err.code === 1) {\n return false;\n }\n return Promise.reject(err);\n });\n }" ]
[ 0.7411776781082153, 0.7345345616340637, 0.7306894063949585, 0.722993791103363, 0.7170324921607971, 0.7102890610694885, 0.7102832794189453, 0.7063295245170593, 0.7046818733215332, 0.7036717534065247, 0.7031397819519043, 0.7016614079475403 ]
End a transaction. Must not be dirty when doing so. ie. commit() or rollback() must be called if changes made. If dirty, changes will be discarded.
def leave_transaction_management(self) -> None: """ End a transaction. Must not be dirty when doing so. ie. commit() or rollback() must be called if changes made. If dirty, changes will be discarded. """ if len(self._transactions) == 0: raise RuntimeError("leave_transaction_management called outside transaction") elif len(self._transactions[-1]) > 0: raise RuntimeError("leave_transaction_management called with uncommited rollbacks") else: self._transactions.pop()
[ "def end_transaction\n case @transaction_stack.length\n when 0\n PEROBS.log.fatal 'No ongoing transaction to end'\n when 1\n # All transactions completed successfully. Write all modified objects\n # into the backend storage.\n @transaction_stack.pop.each { |id| @transaction_objects[id]._sync }\n @transaction_objects = ::Hash.new\n else\n # A nested transaction completed successfully. We add the list of\n # modified objects to the list of the enclosing transaction.\n transactions = @transaction_stack.pop\n # Merge the two lists\n @transaction_stack.push(@transaction_stack.pop + transactions)\n # Ensure that each object is only included once in the list.\n @transaction_stack.last.uniq!\n end\n end", "def end(self, sql=None):\n \"\"\"Commit the current transaction.\"\"\"\n self._transaction = False\n try:\n end = self._con.end\n except AttributeError:\n return self._con.query(sql or 'end')\n else:\n if sql:\n return end(sql=sql)\n else:\n return end()", "@Override\n public void end() {\n final EntityManager em = entityManager.get();\n\n // Let's not penalize users for calling end() multiple times.\n if (null == em) {\n return;\n }\n\n final EntityTransaction tx = em.getTransaction();\n\n if(tx.isActive()) {\n LOG.warn(\"There was an active transaction at the end of a request\");\n tx.commit();\n }\n\n em.close();\n entityManager.remove();\n }", "async def close(self):\n \"\"\"Close this transaction.\n\n If this transaction is the base transaction in a begin/commit\n nesting, the transaction will rollback(). Otherwise, the\n method returns.\n\n This is used to cancel a Transaction without affecting the scope of\n an enclosing transaction.\n \"\"\"\n if not self._parent._is_active:\n return\n if self._parent is self:\n await self.rollback()\n else:\n self._is_active = False", "def close(self):\n \"\"\"\n Close this transaction.\n\n If this transaction is the base transaction in a begin/commit\n nesting, the transaction will rollback(). Otherwise, the\n method returns.\n\n This is used to cancel a Transaction without affecting the scope of\n an enclosing transaction.\n \"\"\"\n if not self._connection or not self._parent:\n return\n if not self._parent._is_active:\n # pragma: no cover\n self._connection = None\n # self._parent = None\n return\n if self._parent is self:\n yield from self.rollback()\n else:\n self._is_active = False\n self._connection = None\n self._parent = None", "@Override\r\n public void end(Xid xid, int flags) throws XAException {\r\n if (logger.logDebug()) {\r\n debug(\"ending transaction xid = \" + xid);\r\n }\r\n\r\n // Check preconditions\r\n\r\n if (flags != XAResource.TMSUSPEND && flags != XAResource.TMFAIL\r\n && flags != XAResource.TMSUCCESS) {\r\n throw new CloudSpannerXAException(CloudSpannerXAException.INVALID_FLAGS,\r\n Code.INVALID_ARGUMENT, XAException.XAER_INVAL);\r\n }\r\n\r\n if (xid == null) {\r\n throw new CloudSpannerXAException(CloudSpannerXAException.XID_NOT_NULL, Code.INVALID_ARGUMENT,\r\n XAException.XAER_INVAL);\r\n }\r\n\r\n if (state != STATE_ACTIVE || !currentXid.equals(xid)) {\r\n throw new CloudSpannerXAException(CloudSpannerXAException.END_WITHOUT_START,\r\n Code.FAILED_PRECONDITION, XAException.XAER_PROTO);\r\n }\r\n\r\n // Check implementation deficiency preconditions\r\n if (flags == XAResource.TMSUSPEND) {\r\n throw new CloudSpannerXAException(CloudSpannerXAException.SUSPEND_NOT_IMPLEMENTED,\r\n Code.UNIMPLEMENTED, XAException.XAER_RMERR);\r\n }\r\n\r\n // We ignore TMFAIL. It's just a hint to the RM. We could roll back\r\n // immediately\r\n // if TMFAIL was given.\r\n\r\n // All clear. We don't have any real work to do.\r\n state = STATE_ENDED;\r\n }", "public static void endTransaction() {\n ifNotNullThen(localConnection.get(),\n () -> {\n Connection connection = localConnection.get();\n ifThen(connection.isRollbackOnClose(), connection::close);\n localConnection.remove();\n });\n }", "def end_transaction(result = nil)\n return nil unless (transaction = current_transaction)\n\n self.current_transaction = nil\n\n transaction.done result\n\n enqueue.call transaction\n\n transaction\n end", "public final void terminateTransaction() {\n if (trace) {\n log.tracef(\"Terminating transaction. Is read only? %s. Is commit? %s\", readOnly, committed);\n }\n long execTime = timeService.timeDuration(initTime, NANOSECONDS);\n if (readOnly) {\n if (committed) {\n incrementValue(NUM_COMMITTED_RO_TX);\n addValue(RO_TX_SUCCESSFUL_EXECUTION_TIME, execTime);\n } else {\n incrementValue(NUM_ABORTED_RO_TX);\n addValue(RO_TX_ABORTED_EXECUTION_TIME, execTime);\n }\n } else {\n if (committed) {\n incrementValue(NUM_COMMITTED_WR_TX);\n addValue(WR_TX_SUCCESSFUL_EXECUTION_TIME, execTime);\n } else {\n incrementValue(NUM_ABORTED_WR_TX);\n addValue(WR_TX_ABORTED_EXECUTION_TIME, execTime);\n }\n }\n\n terminate();\n }", "void endTransaction(Session session) {\n\n try {\n writeLock.lock();\n\n long timestamp = session.transactionTimestamp;\n\n synchronized (liveTransactionTimestamps) {\n session.isTransaction = false;\n\n int index = liveTransactionTimestamps.indexOf(timestamp);\n\n liveTransactionTimestamps.remove(index);\n }\n\n mergeExpiredTransactions(session);\n } finally {\n writeLock.unlock();\n }\n }", "public void doFinally() {\n if (conn != null) {\n try {\n if ((isolation != Connection.TRANSACTION_NONE)\n && (isolation != origIsolation)) {\n conn.setTransactionIsolation(origIsolation);\n }\n conn.setAutoCommit(true);\n conn.close();\n } catch (SQLException e) {\n // Not much we can do\n }\n }\n conn = null;\n isolation = Connection.TRANSACTION_NONE;\n }", "@Override\n public void end(Xid xid, int flags) throws XAException {\n if (LOGGER.isLoggable(Level.FINEST)) {\n debug(\"ending transaction xid = \" + xid);\n }\n\n // Check preconditions\n\n if (flags != XAResource.TMSUSPEND && flags != XAResource.TMFAIL\n && flags != XAResource.TMSUCCESS) {\n throw new PGXAException(GT.tr(\"Invalid flags {0}\", flags), XAException.XAER_INVAL);\n }\n\n if (xid == null) {\n throw new PGXAException(GT.tr(\"xid must not be null\"), XAException.XAER_INVAL);\n }\n\n if (state != State.ACTIVE || !currentXid.equals(xid)) {\n throw new PGXAException(GT.tr(\"tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}\", state, xid, currentXid, preparedXid),\n XAException.XAER_PROTO);\n }\n\n // Check implementation deficiency preconditions\n if (flags == XAResource.TMSUSPEND) {\n throw new PGXAException(GT.tr(\"suspend/resume not implemented\"), XAException.XAER_RMERR);\n }\n\n // We ignore TMFAIL. It's just a hint to the RM. We could roll back immediately\n // if TMFAIL was given.\n\n // All clear. We don't have any real work to do.\n state = State.ENDED;\n }" ]
[ 0.7613028883934021, 0.7468214631080627, 0.7365153431892395, 0.7276989221572876, 0.7214362621307373, 0.7120302319526672, 0.7110478281974792, 0.7089458107948303, 0.7019887566566467, 0.701962411403656, 0.6976029276847839, 0.6961627006530762 ]
Attempt to commit all changes to LDAP database. i.e. forget all rollbacks. However stay inside transaction management.
def commit(self) -> None: """ Attempt to commit all changes to LDAP database. i.e. forget all rollbacks. However stay inside transaction management. """ if len(self._transactions) == 0: raise RuntimeError("commit called outside transaction") # If we have nested transactions, we don't actually commit, but push # rollbacks up to previous transaction. if len(self._transactions) > 1: for on_rollback in reversed(self._transactions[-1]): self._transactions[-2].insert(0, on_rollback) _debug("commit") self.reset()
[ "def commit(using=None):\n \"\"\"\n Does the commit itself and resets the dirty flag.\n \"\"\"\n if using is None:\n for using in tldap.backend.connections:\n connection = tldap.backend.connections[using]\n connection.commit()\n return\n connection = tldap.backend.connections[using]\n connection.commit()", "protected void doCommit(DefaultTransactionStatus status) {\r\n\r\n ContextSourceAndDataSourceTransactionObject actualTransactionObject = (ContextSourceAndDataSourceTransactionObject) status\r\n .getTransaction();\r\n\r\n try {\r\n super.doCommit(new DefaultTransactionStatus(actualTransactionObject\r\n .getDataSourceTransactionObject(), status\r\n .isNewTransaction(), status.isNewSynchronization(), status\r\n .isReadOnly(), status.isDebug(), status\r\n .getSuspendedResources()));\r\n } catch (TransactionException ex) {\r\n if (isRollbackOnCommitFailure()) {\r\n logger.debug(\"Failed to commit db resource, rethrowing\", ex);\r\n // If we are to rollback on commit failure, just rethrow the\r\n // exception - this will cause a rollback to be performed on\r\n // both resources.\r\n throw ex;\r\n } else {\r\n logger\r\n .warn(\"Failed to commit and resource is rollbackOnCommit not set -\"\r\n + \" proceeding to commit ldap resource.\");\r\n }\r\n }\r\n ldapManagerDelegate.doCommit(new DefaultTransactionStatus(\r\n actualTransactionObject.getLdapTransactionObject(), status\r\n .isNewTransaction(), status.isNewSynchronization(),\r\n status.isReadOnly(), status.isDebug(), status\r\n .getSuspendedResources()));\r\n }", "def commit(self):\n \"\"\"Commits the current transaction.\"\"\"\n if self._transaction_nesting_level == 0:\n raise DBALConnectionError.no_active_transaction()\n if self._is_rollback_only:\n raise DBALConnectionError.commit_failed_rollback_only()\n\n self.ensure_connected()\n if self._transaction_nesting_level == 1:\n self._driver.commit()\n elif self._nest_transactions_with_savepoints:\n self.release_savepoint(self._get_nested_transaction_savepoint_name())\n\n self._transaction_nesting_level -= 1\n\n if not self._auto_commit and self._transaction_nesting_level == 0:\n self.begin_transaction()", "def commit(self):\n \"\"\"Commit the current transaction.\n\n Make all statements executed since the transaction was begun permanent.\n \"\"\"\n if hasattr(self.local, 'tx') and self.local.tx:\n tx = self.local.tx.pop()\n tx.commit()\n self._flush_tables()", "public void localCommit()\r\n {\r\n if (log.isDebugEnabled()) log.debug(\"commit was called\");\r\n if (!this.isInLocalTransaction)\r\n {\r\n throw new TransactionNotInProgressException(\"Not in transaction, call begin() before commit()\");\r\n }\r\n try\r\n {\r\n if(!broker.isManaged())\r\n {\r\n if (batchCon != null)\r\n {\r\n batchCon.commit();\r\n }\r\n else if (con != null)\r\n {\r\n con.commit();\r\n }\r\n }\r\n else\r\n {\r\n if(log.isDebugEnabled()) log.debug(\r\n \"Found managed environment setting in PB, will skip Connection.commit() call\");\r\n }\r\n }\r\n catch (SQLException e)\r\n {\r\n log.error(\"Commit on underlying connection failed, try to rollback connection\", e);\r\n this.localRollback();\r\n throw new TransactionAbortedException(\"Commit on connection failed\", e);\r\n }\r\n finally\r\n {\r\n this.isInLocalTransaction = false;\r\n restoreAutoCommitState();\r\n this.releaseConnection();\r\n }\r\n }", "def commit_to(self, db: BaseDB) -> None:\n \"\"\"\n Trying to commit changes when nothing has been written will raise a\n ValidationError\n \"\"\"\n self.logger.debug2('persist storage root to data store')\n if self._trie_nodes_batch is None:\n raise ValidationError(\n \"It is invalid to commit an account's storage if it has no pending changes. \"\n \"Always check storage_lookup.has_changed_root before attempting to commit.\"\n )\n self._trie_nodes_batch.commit_to(db, apply_deletes=False)\n self._clear_changed_root()", "public void commit() throws RollbackException,\n HeuristicMixedException,\n HeuristicRollbackException,\n SecurityException,\n IllegalStateException,\n SystemException\n {\n if (status == Status.STATUS_UNKNOWN)\n throw new IllegalStateException(\"Status unknown\");\n\n if (status == Status.STATUS_MARKED_ROLLBACK)\n throw new IllegalStateException(\"Status marked rollback\");\n\n finish(true);\n }", "def commit_transaction(self, log=None):\n \"\"\"Commit a transaction, with *log* as the log entry.\"\"\"\n self.current_transaction['rollback'].pop('log')\n self.current_transaction['rollback'].pop('rollforward')\n self.contents['log'].insert(\n 0, log and log or self.current_transaction['log'])\n self.contents['rollback'] = self.current_transaction['rollback']\n self.contents['rollforward'] = None # We can't roll forward anymore\n self.current_transaction = None\n self._sync_to_disk()", "def CommitAll(close=None):\n \"\"\"\n Commit all transactions according Local.conn\n \"\"\"\n if close:\n warnings.simplefilter('default')\n warnings.warn(\"close parameter will not need at all.\", DeprecationWarning)\n \n for k, v in engine_manager.items():\n session = v.session(create=False)\n if session:\n session.commit()", "public void commit(DBTransaction dbTran) {\r\n // For extreme logging\r\n if (m_logger.isTraceEnabled()) {\r\n dbTran.traceMutations(m_logger);\r\n }\r\n try {\r\n long timestamp = Utils.getTimeMicros();\r\n commitMutations(dbTran, timestamp);\r\n commitDeletes(dbTran, timestamp);\r\n } finally {\r\n dbTran.clear();\r\n }\r\n }", "private void commitTransaction() {\r\n Tenant tenant = Tenant.getTenant(m_tableDef);\r\n DBTransaction dbTran = DBService.instance(tenant).startTransaction();\r\n m_parentTran.applyUpdates(dbTran);\r\n DBService.instance(tenant).commit(dbTran);\r\n m_parentTran.clear();\r\n }", "def commit():\n \"\"\" Commit changes and release the write lock \"\"\"\n\n session_token = request.headers['session_token']\n repository = request.headers['repository']\n\n #===\n current_user = have_authenticated_user(request.environ['REMOTE_ADDR'], repository, session_token)\n if current_user is False: return fail(user_auth_fail_msg)\n\n #===\n repository_path = config['repositories'][repository]['path']\n\n def with_exclusive_lock():\n if not varify_user_lock(repository_path, session_token): return fail(lock_fail_msg)\n\n #===\n data_store = versioned_storage(repository_path)\n if not data_store.have_active_commit(): return fail(no_active_commit_msg)\n\n result = {}\n if request.headers['mode'] == 'commit':\n new_head = data_store.commit(request.headers['commit_message'], current_user['username'])\n result = {'head' : new_head}\n else:\n data_store.rollback()\n\n # Release the user lock\n update_user_lock(repository_path, None)\n return success(result)\n return lock_access(repository_path, with_exclusive_lock)" ]
[ 0.753044068813324, 0.7456525564193726, 0.7398972511291504, 0.7347856760025024, 0.7339320182800293, 0.7309517860412598, 0.7281948924064636, 0.7263627052307129, 0.726161777973175, 0.7256393432617188, 0.7246240973472595, 0.7228472232818604 ]
Roll back to previous database state. However stay inside transaction management.
def rollback(self) -> None: """ Roll back to previous database state. However stay inside transaction management. """ if len(self._transactions) == 0: raise RuntimeError("rollback called outside transaction") _debug("rollback:", self._transactions[-1]) # if something goes wrong here, nothing we can do about it, leave # database as is. try: # for every rollback action ... for on_rollback in self._transactions[-1]: # execute it _debug("--> rolling back", on_rollback) self._do_with_retry(on_rollback) except: # noqa: E722 _debug("--> rollback failed") exc_class, exc, tb = sys.exc_info() raise tldap.exceptions.RollbackError( "FATAL Unrecoverable rollback error: %r" % exc) finally: # reset everything to clean state _debug("--> rollback success") self.reset()
[ "def rollback(self):\n \"\"\"\n Rolls back changes to this database.\n \"\"\"\n with self.native(writeAccess=True) as conn:\n return self._rollback(conn)", "def rollback(self, sql=None):\n \"\"\"Rollback the current transaction.\"\"\"\n self._transaction = False\n try:\n rollback = self._con.rollback\n except AttributeError:\n return self._con.query(sql or 'rollback')\n else:\n if sql:\n return rollback(sql=sql)\n else:\n return rollback()", "def rollback(self):\n \"\"\"\n .. seealso:: :py:meth:`sqlite3.Connection.rollback`\n \"\"\"\n\n try:\n self.check_connection()\n except NullDatabaseConnectionError:\n return\n\n logger.debug(\"rollback: path='{}'\".format(self.database_path))\n\n self.connection.rollback()", "async def rollback(self):\n \"\"\"Roll back this transaction.\"\"\"\n if not self._parent._is_active:\n return\n await self._do_rollback()\n self._is_active = False", "def rollback(self):\n \"\"\"Roll back the current transaction.\n\n Discard all statements executed since the transaction was begun.\n \"\"\"\n if hasattr(self.local, 'tx') and self.local.tx:\n tx = self.local.tx.pop()\n tx.rollback()\n self._flush_tables()", "private function rollback()\n {\n // We only need to rollback if we are in a transaction. Otherwise the resulting\n // error would hide the real problem why rollback was called. We might not be\n // in a transaction when not using the transactional locking behavior or when\n // two callbacks (e.g. destroy and write) are invoked that both fail.\n if ($this->inTransaction) {\n if ('sqlite' === $this->driver) {\n $this->pdo->exec('ROLLBACK');\n } else {\n $this->pdo->rollBack();\n }\n $this->inTransaction = false;\n }\n }", "def rollback(self):\n \"\"\"Cancels any database changes done during the current transaction.\"\"\"\n if self._transaction_nesting_level == 0:\n raise DBALConnectionError.no_active_transaction()\n\n self.ensure_connected()\n if self._transaction_nesting_level == 1:\n self._transaction_nesting_level = 0\n self._driver.rollback()\n self._is_rollback_only = False\n if not self._auto_commit:\n self.begin_transaction()\n elif self._nest_transactions_with_savepoints:\n self.rollback_savepoint(self._get_nested_transaction_savepoint_name())\n self._transaction_nesting_level -= 1\n else:\n self._is_rollback_only = True\n self._transaction_nesting_level -= 1", "def rollback(self):\n \"\"\"Roll back a transaction on the database.\"\"\"\n self._check_state()\n database = self._session._database\n api = database.spanner_api\n metadata = _metadata_with_prefix(database.name)\n api.rollback(self._session.name, self._transaction_id, metadata=metadata)\n self._rolled_back = True\n del self._session._transaction", "def rollback(self):\n \"\"\"\n Roll back transaction which is currently in progress.\n \"\"\"\n try:\n if self._autocommit:\n return\n\n if not self._conn or not self._conn.is_connected():\n return\n\n if not self._conn.tds72_transaction:\n return\n\n self._main_cursor._rollback(cont=True,\n isolation_level=self._isolation_level)\n except socket.error as e:\n if e.errno in (errno.ENETRESET, errno.ECONNRESET, errno.EPIPE):\n return\n self._conn.close()\n raise\n except ClosedConnectionError:\n pass", "void rollback() {\n\t\tif (connection != null) {\n\t\t\ttry {\n\t\t\t\tconnection.rollback();\n\t\t\t} catch (SQLException e) {\n\t\t\t\tthrow new UroborosqlSQLException(e);\n\t\t\t}\n\t\t}\n\t\tclearState();\n\t}", "def rollback(self):\n \"\"\"Rolls back the current transaction.\n\n This method has necessary side-effects:\n\n - Sets the current transaction's ID to None.\n \"\"\"\n try:\n # No need to use the response it contains nothing.\n self._client._datastore_api.rollback(self.project, self._id)\n finally:\n super(Transaction, self).rollback()\n # Clear our own ID in case this gets accidentally reused.\n self._id = None", "public void rollback() throws SQLException {\n\n cmdPrologue();\n\n lock.lock();\n try {\n\n if (inTransaction()) {\n executeQuery(\"ROLLBACK\");\n }\n\n } catch (Exception e) {\n /* eat exception */\n } finally {\n lock.unlock();\n }\n }" ]
[ 0.8180977702140808, 0.7944473028182983, 0.7880658507347107, 0.7859667539596558, 0.7858035564422607, 0.7817387580871582, 0.7748211622238159, 0.7712699174880981, 0.7701363563537598, 0.7673389911651611, 0.7662378549575806, 0.7652589678764343 ]
Process action. oncommit is a callback to execute action, onrollback is a callback to execute if the oncommit() has been called and a rollback is required
def _process(self, on_commit: UpdateCallable, on_rollback: UpdateCallable) -> Any: """ Process action. oncommit is a callback to execute action, onrollback is a callback to execute if the oncommit() has been called and a rollback is required """ _debug("---> commiting", on_commit) result = self._do_with_retry(on_commit) if len(self._transactions) > 0: # add statement to rollback log in case something goes wrong self._transactions[-1].insert(0, on_rollback) return result
[ "def process_remove_action(processors, action, argument):\n \"\"\"Process action removals.\"\"\"\n for processor in processors:\n processor(action, argument)\n db.session.commit()", "def process_action(self):\n \"\"\"\n Process the action and update the related object, returns a boolean if a change is made.\n \"\"\"\n if self.publish_version == self.UNPUBLISH_CHOICE:\n actioned = self._unpublish()\n else:\n actioned = self._publish()\n\n # Only log if an action was actually taken\n if actioned:\n self._log_action()\n\n return actioned", "def process_allow_action(processors, action, argument):\n \"\"\"Process allow action.\"\"\"\n for processor in processors:\n processor(action, argument)\n db.session.commit()", "def after_action(action_type, action):\n '''\n If utilizing the :mod:`pecan.hooks` ``TransactionHook``, allows you\n to flag a controller method to perform a callable action after the\n action_type is successfully issued.\n\n :param action: The callable to call after the commit is successfully\n issued. '''\n\n if action_type not in ('commit', 'rollback'):\n raise Exception('action_type (%s) is not valid' % action_type)\n\n def deco(func):\n _cfg(func).setdefault('after_%s' % action_type, []).append(action)\n return func\n return deco", "public void runCommit(boolean forceRollback) throws HeuristicMixedException, HeuristicRollbackException, RollbackException {\n if (trace) {\n log.tracef(\"runCommit(forceRollback=%b) invoked in transaction with Xid=%s\", forceRollback, xid);\n }\n if (forceRollback) {\n markRollbackOnly(new RollbackException(FORCE_ROLLBACK_MESSAGE));\n }\n\n int notifyAfterStatus = 0;\n\n try {\n if (status == Status.STATUS_MARKED_ROLLBACK) {\n notifyAfterStatus = Status.STATUS_ROLLEDBACK;\n rollbackResources();\n } else {\n notifyAfterStatus = Status.STATUS_COMMITTED;\n commitResources();\n }\n } finally {\n notifyAfterCompletion(notifyAfterStatus);\n DummyBaseTransactionManager.setTransaction(null);\n }\n throwRollbackExceptionIfAny(forceRollback);\n }", "public void commit_one_phase() throws RollbackException, HeuristicMixedException, HeuristicHazardException, HeuristicRollbackException, SecurityException, IllegalStateException, SystemException {\n if (tc.isEntryEnabled())\n Tr.entry(tc, \"commit_one_phase\");\n\n // This call is only valid for a single subordinate - treat as a \"superior\"\n _subordinate = false;\n\n try {\n processCommit();\n } finally {\n if (tc.isEntryEnabled())\n Tr.exit(tc, \"commit_one_phase\");\n }\n }", "function processActionGroup(_ref2) {\n\t\t\tvar _ref2$updateSchemaNam = _ref2.updateSchemaName,\n\t\t\t updateSchemaName = _ref2$updateSchemaNam === undefined ? undefined : _ref2$updateSchemaNam,\n\t\t\t _ref2$store = _ref2.store,\n\t\t\t store = _ref2$store === undefined ? _store : _ref2$store,\n\t\t\t _ref2$error = _ref2.error,\n\t\t\t error = _ref2$error === undefined ? {} : _ref2$error,\n\t\t\t _ref2$res = _ref2.res,\n\t\t\t res = _ref2$res === undefined ? {} : _ref2$res,\n\t\t\t _ref2$actionGroup = _ref2.actionGroup,\n\t\t\t actionGroup = _ref2$actionGroup === undefined ? {} : _ref2$actionGroup;\n\n\t\t\tif (actionGroup == undefined) return;\n\n\t\t\tvar actionNames = Object.keys(actionGroup);\n\n\t\t\tactionNames.forEach(function (actionName) {\n\n\t\t\t\tvar action = actionGroup[actionName];\n\n\t\t\t\t// TODO: check for required fields: branch, location, operation, value || valueFunction, location || locationFunction\n\t\t\t\t// updateIn, update + updateIn, update\n\n\t\t\t\t// destructure action values used in processing\n\t\t\t\tvar valueFunction = action.valueFunction,\n\t\t\t\t value = action.value,\n\t\t\t\t shouldDispatch = action.shouldDispatch,\n\t\t\t\t uiEventFunction = action.uiEventFunction,\n\t\t\t\t updateFunction = action.updateFunction,\n\t\t\t\t location = action.location,\n\t\t\t\t locationFunction = action.locationFunction,\n\t\t\t\t operation = action.operation;\n\n\t\t\t\t// create action to be processed\n\n\t\t\t\tvar $action = {};\n\n\t\t\t\t// update value\n\t\t\t\t$action.value = valueFunction ? valueFunction({ error: error, res: res, store: store, value: value }) : value;\n\n\t\t\t\t// update location\n\t\t\t\t$action.location = locationFunction ? locationFunction({ error: error, res: res, store: store, value: value }) : location;\n\n\t\t\t\t// add type\n\t\t\t\t$action.type = $action.location[0];\n\n\t\t\t\t// trim first value from location\n\t\t\t\t$action.location = $action.location.slice(1);\n\n\t\t\t\t// add name\n\t\t\t\t$action.name = actionName;\n\n\t\t\t\t// add update function params\n\t\t\t\t$action.updateFunction = updateFunction ? updateFunction.bind(null, { res: res, error: error, store: store, fromJS: _immutable.fromJS, value: value }) : undefined;\n\n\t\t\t\t// add operation\n\t\t\t\tif ($action.updateFunction) {\n\t\t\t\t\t$action.operation = 'updateIn';\n\t\t\t\t} else if (!$action.value) {\n\t\t\t\t\t$action.operation = 'deleteIn';\n\t\t\t\t} else {\n\t\t\t\t\t$action.operation = 'setIn';\n\t\t\t\t}\n\n\t\t\t\t// TODO: add meta information about the updateSchemaCreator\n\n\t\t\t\t// dispatch action depending on fire\n\t\t\t\tif (shouldDispatch == undefined || shouldDispatch({ error: error, res: res, store: store, value: value })) {\n\n\t\t\t\t\t// dispatch the action here\n\t\t\t\t\tstore.dispatch($action);\n\n\t\t\t\t\t// fire ui event\n\t\t\t\t\tif (uiEventFunction) uiEventFunction({ action: action, value: value, res: res, error: error, store: store });\n\t\t\t\t}\n\t\t\t});\n\t\t}", "def process_deny_action(processors, action, argument):\n \"\"\"Process deny action.\"\"\"\n for processor in processors:\n processor(action, argument)\n db.session.commit()", "function(data) {\n\t\t\t\tvar item = data.items;\n\t\t\t\tvar progress = serviceRegistry.getService(\"orion.page.progress\"); //$NON-NLS-0$\n\t\t\t\tvar service = serviceRegistry.getService(\"orion.git.provider\"); //$NON-NLS-0$\n\t\t\t\tvar headLocation = item.Location.replace(item.Name, \"HEAD\"); //$NON-NLS-0$\n\t\t\t\tvar msg = i18nUtil.formatMessage(messages[\"RevertingCommit\"], item.Name);\n\t\t\t\tprogress.progress(service.doRevert(headLocation, item.Name), msg).then(function(jsonData) {\n\t\t\t\t\tvar display = [];\n\t\t\t\t\tif (jsonData.Result === \"OK\") { //$NON-NLS-0$\n\t\t\t\t\t\t// operation succeeded\n\t\t\t\t\t\tdisplay.Severity = \"Ok\"; //$NON-NLS-0$\n\t\t\t\t\t\tdisplay.HTML = false;\n\t\t\t\t\t\tdisplay.Message = jsonData.Result;\n\t\t\t\t\t}\n\t\t\t\t\t// handle special cases\n\t\t\t\t\telse if (jsonData.Result === \"FAILURE\") { //$NON-NLS-0$\n\t\t\t\t\t\tdisplay.Severity = \"Warning\"; //$NON-NLS-0$\n\t\t\t\t\t\tdisplay.HTML = true;\n\t\t\t\t\t\tdisplay.Message = \"<span>\" + jsonData.Result + messages[\". Could not revert into active branch\"] + \"</span>\"; //$NON-NLS-1$ //$NON-NLS-0$\n\t\t\t\t\t} \n\t\t\t\t\t// handle other cases\n\t\t\t\t\telse {\n\t\t\t\t\t\tdisplay.Severity = \"Warning\"; //$NON-NLS-0$\n\t\t\t\t\t\tdisplay.HTML = false;\n\t\t\t\t\t\tdisplay.Message = jsonData.Result;\n\t\t\t\t\t}\n\t\t\t\t\tserviceRegistry.getService(\"orion.page.message\").setProgressResult(display); //$NON-NLS-0$\n\t\t\t\t\tdispatchModelEventOn({type: \"modelChanged\", action: \"revert\"}); //$NON-NLS-1$ //$NON-NLS-0$\n\t\t\t\t}, displayErrorOnStatus);\n\n\t\t\t}", "private void stage3CommitProcessing(int state) throws SystemException, RollbackException, HeuristicMixedException, HeuristicHazardException {\n if (tc.isEntryEnabled())\n Tr.entry(tc, \"stage3CommitProcessing\", TransactionState.stateToString(state));\n\n switch (state) {\n case TransactionState.STATE_ROLLING_BACK: // could be retries\n case TransactionState.STATE_ROLLED_BACK:\n //\n // As we rolled back we need to throw a RollbackException\n // unless the rollback was triggered by a SystemException or heuristic\n // in which case we should throw a SystemException or heuristic\n //\n if (_systemExceptionOccured) {\n final SystemException se = new SystemException();\n if (tc.isEntryEnabled())\n Tr.exit(tc, \"stage3CommitProcessing\", se);\n throw se;\n } else if (_heuristicOnPrepare != null) {\n if (tc.isEntryEnabled())\n Tr.exit(tc, \"stage3CommitProcessing\", _heuristicOnPrepare);\n if (_heuristicOnPrepare instanceof HeuristicMixedException)\n throw (HeuristicMixedException) _heuristicOnPrepare;\n\n throw (HeuristicHazardException) _heuristicOnPrepare;\n }\n\n // PK19059 starts here\n final RollbackException rbe;\n if (getOriginalException() == null) {\n rbe = new RollbackException();\n } else {\n rbe = new RollbackException();\n rbe.initCause(getOriginalException());\n }\n // PK19059 ends here\n if (tc.isEntryEnabled())\n Tr.exit(tc, \"stage3CommitProcessing\", rbe);\n throw rbe;\n }\n\n if (tc.isEntryEnabled())\n Tr.exit(tc, \"stage3CommitProcessing\");\n }", "public void addCommitAction(final Runnable action) throws HibernateException\n\t{\n\t\tif (action == null)\n\t\t\treturn; // ignore null actions\n\n\t\taddAction(new BaseSessionEventListener()\n\t\t{\n\t\t\t@Override\n\t\t\tpublic void transactionCompletion(final boolean successful)\n\t\t\t{\n\t\t\t\tif (successful)\n\t\t\t\t\taction.run();\n\t\t\t}\n\t\t});\n\t}", "def run_process(self, process):\n \"\"\"Runs a single action.\"\"\"\n message = u'#{bright}'\n message += u'{} '.format(str(process)[:68]).ljust(69, '.')\n\n stashed = False\n if self.unstaged_changes and not self.include_unstaged_changes:\n out, err, code = self.git.stash(keep_index=True, quiet=True)\n stashed = code == 0\n\n try:\n result = process(files=self.files, cwd=self.cwd, fix=self.fix)\n\n # Check for modified files\n out, err, code = self.git.status(porcelain=True, untracked_files='no')\n for line in out.splitlines():\n file_status = Status(line)\n\n # Make sure the file is one of the files that was processed\n if file_status.path in self.files and file_status.is_modified:\n mtime = os.path.getmtime(file_status.path) if os.path.exists(file_status.path) else 0\n if mtime > self.file_mtimes.get(file_status.path, 0):\n self.file_mtimes[file_status.path] = mtime\n result.add_modified_file(file_status.path)\n if self.stage_modified_files:\n self.git.add(file_status.path)\n\n except: # noqa: E722\n raise\n finally:\n if stashed:\n self.git.reset(hard=True, quiet=True)\n self.git.stash.pop(index=True, quiet=True)\n\n if result.is_success:\n message += u' #{green}[SUCCESS]'\n elif result.is_failure:\n message += u' #{red}[FAILURE]'\n elif result.is_skip:\n message += u' #{cyan}[SKIPPED]'\n elif result.is_error:\n message += u' #{red}[ERROR!!]'\n\n return result, message" ]
[ 0.7157956957817078, 0.7037403583526611, 0.6952687501907349, 0.6922255754470825, 0.6873927712440491, 0.6869584321975708, 0.6860842704772949, 0.6845464110374451, 0.674855649471283, 0.674591064453125, 0.6743784546852112, 0.6739253401756287 ]
Add a DN to the LDAP database; See ldap module. Doesn't return a result if transactions enabled.
def add(self, dn: str, mod_list: dict) -> None: """ Add a DN to the LDAP database; See ldap module. Doesn't return a result if transactions enabled. """ _debug("add", self, dn, mod_list) # if rollback of add required, delete it def on_commit(obj): obj.add(dn, None, mod_list) def on_rollback(obj): obj.delete(dn) # process this action return self._process(on_commit, on_rollback)
[ "def add(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Add a DN to the LDAP database; See ldap module. Doesn't return a result\n if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.add_s(dn, mod_list))", "def modify(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Modify a DN in the LDAP database; See ldap module. Doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.modify_s(dn, mod_list))", "def set_dn(self,dn):\n \"\"\"\n Use the domain column in the process table to store the DN\n\n dn = dn to be added\n \"\"\"\n try:\n domain_col = self.table['process']['orderedcol'].index('domain')\n for row_idx in range(len(self.table['process']['stream'])):\n row_list = list(self.table['process']['stream'][row_idx])\n row_list[domain_col] = dn\n self.table['process']['stream'][row_idx] = tuple(row_list)\n except ValueError:\n self.table['process']['column']['domain'] = 'lstring'\n self.table['process']['orderedcol'].append('domain')\n for row_idx in range(len(self.table['process']['stream'])):\n row_list = list(self.table['process']['stream'][row_idx])\n row_list.append(dn)\n self.table['process']['stream'][row_idx] = tuple(row_list)", "def modify_no_rollback(self, dn: str, mod_list: dict):\n \"\"\"\n Modify a DN in the LDAP database; See ldap module. Doesn't return a\n result if transactions enabled.\n \"\"\"\n\n _debug(\"modify_no_rollback\", self, dn, mod_list)\n result = self._do_with_retry(lambda obj: obj.modify_s(dn, mod_list))\n _debug(\"--\")\n\n return result", "def add(connect_spec, dn, attributes):\n '''Add an entry to an LDAP database.\n\n :param connect_spec:\n See the documentation for the ``connect_spec`` parameter for\n :py:func:`connect`.\n\n :param dn:\n Distinguished name of the entry.\n\n :param attributes:\n Non-empty dict mapping each of the new entry's attributes to a\n non-empty iterable of values.\n\n :returns:\n ``True`` if successful, raises an exception otherwise.\n\n CLI example:\n\n .. code-block:: bash\n\n salt '*' ldap3.add \"{\n 'url': 'ldaps://ldap.example.com/',\n 'bind': {\n 'method': 'simple',\n 'password': 'secret',\n },\n }\" \"dn='dc=example,dc=com'\" \"attributes={'example': 'values'}\"\n '''\n l = connect(connect_spec)\n # convert the \"iterable of values\" to lists in case that's what\n # addModlist() expects (also to ensure that the caller's objects\n # are not modified)\n attributes = dict(((attr, salt.utils.data.encode(list(vals)))\n for attr, vals in six.iteritems(attributes)))\n log.info('adding entry: dn: %s attributes: %s', repr(dn), repr(attributes))\n\n if 'unicodePwd' in attributes:\n attributes['unicodePwd'] = [_format_unicode_password(x) for x in attributes['unicodePwd']]\n\n modlist = ldap.modlist.addModlist(attributes),\n try:\n l.c.add_s(dn, modlist)\n except ldap.LDAPError as e:\n _convert_exception(e)\n return True", "def _request_add_dns_record(self, record):\n \"\"\"Sends Add_DNS_Record request\"\"\"\n return self._request_internal(\"Add_DNS_Record\",\n domain=self.domain,\n record=record)", "def add(self, distinguished_name, object_class, attributes):\n \"\"\"\n Add object to LDAP.\n\n Args:\n distinguished_name: the DN of the LDAP record to be added\n object_class: The objectClass of the record to be added.\n This is a list of length >= 1.\n attributes: a dictionary of LDAP attributes to add\n See ldap_tools.api.group.API#__ldap_attr\n\n \"\"\"\n self.conn.add(distinguished_name, object_class, attributes)", "def delete(self, dn: str) -> None:\n \"\"\"\n delete a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.delete_s(dn))", "public void addNum(double d) {\n if( isUUID() ) { addNA(); return; }\n if(_id == null || d != 0) {\n if(_ls != null)switch_to_doubles();\n if( _ds == null || _sparseLen >= _ds.length ) {\n append2slowd();\n // call addNum again since append2slow might have flipped to sparse\n addNum(d);\n assert _sparseLen <= _len;\n return;\n }\n if(_id != null)_id[_sparseLen] = _len;\n _ds[_sparseLen++] = d;\n }\n _len++;\n assert _sparseLen <= _len;\n }", "def modify(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Modify a DN in the LDAP database; See ldap module. Doesn't return a\n result if transactions enabled.\n \"\"\"\n\n _debug(\"modify\", self, dn, mod_list)\n\n # need to work out how to reverse changes in mod_list; result in revlist\n revlist = {}\n\n # get the current cached attributes\n result = self._cache_get_for_dn(dn)\n\n # find the how to reverse mod_list (for rollback) and put result in\n # revlist. Also simulate actions on cache.\n for mod_type, l in six.iteritems(mod_list):\n for mod_op, mod_vals in l:\n\n _debug(\"attribute:\", mod_type)\n if mod_type in result:\n _debug(\"attribute cache:\", result[mod_type])\n else:\n _debug(\"attribute cache is empty\")\n _debug(\"attribute modify:\", (mod_op, mod_vals))\n\n if mod_vals is not None:\n if not isinstance(mod_vals, list):\n mod_vals = [mod_vals]\n\n if mod_op == ldap3.MODIFY_ADD:\n # reverse of MODIFY_ADD is MODIFY_DELETE\n reverse = (ldap3.MODIFY_DELETE, mod_vals)\n\n elif mod_op == ldap3.MODIFY_DELETE and len(mod_vals) > 0:\n # Reverse of MODIFY_DELETE is MODIFY_ADD, but only if value\n # is given if mod_vals is None, this means all values where\n # deleted.\n reverse = (ldap3.MODIFY_ADD, mod_vals)\n\n elif mod_op == ldap3.MODIFY_DELETE \\\n or mod_op == ldap3.MODIFY_REPLACE:\n if mod_type in result:\n # If MODIFY_DELETE with no values or MODIFY_REPLACE\n # then we have to replace all attributes with cached\n # state\n reverse = (\n ldap3.MODIFY_REPLACE,\n tldap.modlist.escape_list(result[mod_type])\n )\n else:\n # except if we have no cached state for this DN, in\n # which case we delete it.\n reverse = (ldap3.MODIFY_DELETE, [])\n\n else:\n raise RuntimeError(\"mod_op of %d not supported\" % mod_op)\n\n reverse = [reverse]\n _debug(\"attribute reverse:\", reverse)\n if mod_type in result:\n _debug(\"attribute cache:\", result[mod_type])\n else:\n _debug(\"attribute cache is empty\")\n\n revlist[mod_type] = reverse\n\n _debug(\"--\")\n _debug(\"mod_list:\", mod_list)\n _debug(\"revlist:\", revlist)\n _debug(\"--\")\n\n # now the hard stuff is over, we get to the easy stuff\n def on_commit(obj):\n obj.modify(dn, mod_list)\n\n def on_rollback(obj):\n obj.modify(dn, revlist)\n\n return self._process(on_commit, on_rollback)", "public void addNum(double d) {\n if( isUUID() || isString() ) { addNA(); return; }\n boolean predicate = _sparseNA ? !Double.isNaN(d) : isSparseZero()?d != 0:true;\n if(predicate) {\n if(_ms != null) {\n if((long)d == d){\n addNum((long)d,0);\n return;\n }\n switch_to_doubles();\n }\n //if ds not big enough\n if(_sparseLen == _ds.length ) {\n append2slowd();\n // call addNum again since append2slowd might have flipped to sparse\n addNum(d);\n assert _sparseLen <= _len;\n return;\n }\n if(_id != null)_id[_sparseLen] = _len;\n _ds[_sparseLen] = d;\n _sparseLen++;\n }\n _len++;\n assert _sparseLen <= _len;\n }", "def dyndns_add(nameserver, name, rdata, type=\"A\", ttl=10):\n \"\"\"Send a DNS add message to a nameserver for \"name\" to have a new \"rdata\"\ndyndns_add(nameserver, name, rdata, type=\"A\", ttl=10) -> result code (0=ok)\n\nexample: dyndns_add(\"ns1.toto.com\", \"dyn.toto.com\", \"127.0.0.1\")\nRFC2136\n\"\"\"\n zone = name[name.find(\".\") + 1:]\n r = sr1(IP(dst=nameserver) / UDP() / DNS(opcode=5,\n qd=[DNSQR(qname=zone, qtype=\"SOA\")], # noqa: E501\n ns=[DNSRR(rrname=name, type=\"A\",\n ttl=ttl, rdata=rdata)]),\n verbose=0, timeout=5)\n if r and r.haslayer(DNS):\n return r.getlayer(DNS).rcode\n else:\n return -1" ]
[ 0.8631446361541748, 0.7442304491996765, 0.7425011992454529, 0.7303652167320251, 0.7275208830833435, 0.7104989886283875, 0.7049483060836792, 0.7048552632331848, 0.704656183719635, 0.7016550898551941, 0.7011139392852783, 0.7001202702522278 ]
Modify a DN in the LDAP database; See ldap module. Doesn't return a result if transactions enabled.
def modify(self, dn: str, mod_list: dict) -> None: """ Modify a DN in the LDAP database; See ldap module. Doesn't return a result if transactions enabled. """ _debug("modify", self, dn, mod_list) # need to work out how to reverse changes in mod_list; result in revlist revlist = {} # get the current cached attributes result = self._cache_get_for_dn(dn) # find the how to reverse mod_list (for rollback) and put result in # revlist. Also simulate actions on cache. for mod_type, l in six.iteritems(mod_list): for mod_op, mod_vals in l: _debug("attribute:", mod_type) if mod_type in result: _debug("attribute cache:", result[mod_type]) else: _debug("attribute cache is empty") _debug("attribute modify:", (mod_op, mod_vals)) if mod_vals is not None: if not isinstance(mod_vals, list): mod_vals = [mod_vals] if mod_op == ldap3.MODIFY_ADD: # reverse of MODIFY_ADD is MODIFY_DELETE reverse = (ldap3.MODIFY_DELETE, mod_vals) elif mod_op == ldap3.MODIFY_DELETE and len(mod_vals) > 0: # Reverse of MODIFY_DELETE is MODIFY_ADD, but only if value # is given if mod_vals is None, this means all values where # deleted. reverse = (ldap3.MODIFY_ADD, mod_vals) elif mod_op == ldap3.MODIFY_DELETE \ or mod_op == ldap3.MODIFY_REPLACE: if mod_type in result: # If MODIFY_DELETE with no values or MODIFY_REPLACE # then we have to replace all attributes with cached # state reverse = ( ldap3.MODIFY_REPLACE, tldap.modlist.escape_list(result[mod_type]) ) else: # except if we have no cached state for this DN, in # which case we delete it. reverse = (ldap3.MODIFY_DELETE, []) else: raise RuntimeError("mod_op of %d not supported" % mod_op) reverse = [reverse] _debug("attribute reverse:", reverse) if mod_type in result: _debug("attribute cache:", result[mod_type]) else: _debug("attribute cache is empty") revlist[mod_type] = reverse _debug("--") _debug("mod_list:", mod_list) _debug("revlist:", revlist) _debug("--") # now the hard stuff is over, we get to the easy stuff def on_commit(obj): obj.modify(dn, mod_list) def on_rollback(obj): obj.modify(dn, revlist) return self._process(on_commit, on_rollback)
[ "def modify(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Modify a DN in the LDAP database; See ldap module. Doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.modify_s(dn, mod_list))", "def modify_no_rollback(self, dn: str, mod_list: dict):\n \"\"\"\n Modify a DN in the LDAP database; See ldap module. Doesn't return a\n result if transactions enabled.\n \"\"\"\n\n _debug(\"modify_no_rollback\", self, dn, mod_list)\n result = self._do_with_retry(lambda obj: obj.modify_s(dn, mod_list))\n _debug(\"--\")\n\n return result", "def modify(connect_spec, dn, directives):\n '''Modify an entry in an LDAP database.\n\n :param connect_spec:\n See the documentation for the ``connect_spec`` parameter for\n :py:func:`connect`.\n\n :param dn:\n Distinguished name of the entry.\n\n :param directives:\n Iterable of directives that indicate how to modify the entry.\n Each directive is a tuple of the form ``(op, attr, vals)``,\n where:\n\n * ``op`` identifies the modification operation to perform.\n One of:\n\n * ``'add'`` to add one or more values to the attribute\n\n * ``'delete'`` to delete some or all of the values from the\n attribute. If no values are specified with this\n operation, all of the attribute's values are deleted.\n Otherwise, only the named values are deleted.\n\n * ``'replace'`` to replace all of the attribute's values\n with zero or more new values\n\n * ``attr`` names the attribute to modify\n\n * ``vals`` is an iterable of values to add or delete\n\n :returns:\n ``True`` if successful, raises an exception otherwise.\n\n CLI example:\n\n .. code-block:: bash\n\n salt '*' ldap3.modify \"{\n 'url': 'ldaps://ldap.example.com/',\n 'bind': {\n 'method': 'simple',\n 'password': 'secret'}\n }\" dn='cn=admin,dc=example,dc=com'\n directives=\"('add', 'example', ['example_val'])\"\n '''\n l = connect(connect_spec)\n # convert the \"iterable of values\" to lists in case that's what\n # modify_s() expects (also to ensure that the caller's objects are\n # not modified)\n modlist = [(getattr(ldap, 'MOD_' + op.upper()), attr, list(vals))\n for op, attr, vals in directives]\n\n for idx, mod in enumerate(modlist):\n if mod[1] == 'unicodePwd':\n modlist[idx] = (mod[0], mod[1],\n [_format_unicode_password(x) for x in mod[2]])\n\n modlist = salt.utils.data.decode(modlist, to_str=True, preserve_tuples=True)\n try:\n l.c.modify_s(dn, modlist)\n except ldap.LDAPError as e:\n _convert_exception(e)\n return True", "def change(connect_spec, dn, before, after):\n '''Modify an entry in an LDAP database.\n\n This does the same thing as :py:func:`modify`, but with a simpler\n interface. Instead of taking a list of directives, it takes a\n before and after view of an entry, determines the differences\n between the two, computes the directives, and executes them.\n\n Any attribute value present in ``before`` but missing in ``after``\n is deleted. Any attribute value present in ``after`` but missing\n in ``before`` is added. Any attribute value in the database that\n is not mentioned in either ``before`` or ``after`` is not altered.\n Any attribute value that is present in both ``before`` and\n ``after`` is ignored, regardless of whether that attribute value\n exists in the database.\n\n :param connect_spec:\n See the documentation for the ``connect_spec`` parameter for\n :py:func:`connect`.\n\n :param dn:\n Distinguished name of the entry.\n\n :param before:\n The expected state of the entry before modification. This is\n a dict mapping each attribute name to an iterable of values.\n\n :param after:\n The desired state of the entry after modification. This is a\n dict mapping each attribute name to an iterable of values.\n\n :returns:\n ``True`` if successful, raises an exception otherwise.\n\n CLI example:\n\n .. code-block:: bash\n\n salt '*' ldap3.change \"{\n 'url': 'ldaps://ldap.example.com/',\n 'bind': {\n 'method': 'simple',\n 'password': 'secret'}\n }\" dn='cn=admin,dc=example,dc=com'\n before=\"{'example_value': 'before_val'}\"\n after=\"{'example_value': 'after_val'}\"\n '''\n l = connect(connect_spec)\n # convert the \"iterable of values\" to lists in case that's what\n # modifyModlist() expects (also to ensure that the caller's dicts\n # are not modified)\n before = dict(((attr, salt.utils.data.encode(list(vals)))\n for attr, vals in six.iteritems(before)))\n after = dict(((attr, salt.utils.data.encode(list(vals)))\n for attr, vals in six.iteritems(after)))\n\n if 'unicodePwd' in after:\n after['unicodePwd'] = [_format_unicode_password(x) for x in after['unicodePwd']]\n\n modlist = ldap.modlist.modifyModlist(before, after)\n\n try:\n l.c.modify_s(dn, modlist)\n except ldap.LDAPError as e:\n _convert_exception(e)\n return True", "def add(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Add a DN to the LDAP database; See ldap module. Doesn't return a result\n if transactions enabled.\n \"\"\"\n\n _debug(\"add\", self, dn, mod_list)\n\n # if rollback of add required, delete it\n def on_commit(obj):\n obj.add(dn, None, mod_list)\n\n def on_rollback(obj):\n obj.delete(dn)\n\n # process this action\n return self._process(on_commit, on_rollback)", "def add(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Add a DN to the LDAP database; See ldap module. Doesn't return a result\n if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.add_s(dn, mod_list))", "def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:\n \"\"\"\n rename a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n raise NotImplementedError()", "def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:\n \"\"\"\n rename a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(\n lambda obj: obj.rename_s(dn, new_rdn, new_base_dn))", "def _request_modify_dns_record(self, record):\n \"\"\"Sends Modify_DNS_Record request\"\"\"\n return self._request_internal(\"Modify_DNS_Record\",\n domain=self.domain,\n record=record)", "def delete(self, dn: str) -> None:\n \"\"\"\n delete a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.delete_s(dn))", "def update(zone, name, ttl, rdtype, data, nameserver='127.0.0.1', timeout=5,\n replace=False, port=53, **kwargs):\n '''\n Add, replace, or update a DNS record.\n nameserver must be an IP address and the minion running this module\n must have update privileges on that server.\n If replace is true, first deletes all records for this name and type.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt ns1 ddns.update example.com host1 60 A 10.0.0.1\n '''\n name = six.text_type(name)\n\n if name[-1:] == '.':\n fqdn = name\n else:\n fqdn = '{0}.{1}'.format(name, zone)\n\n request = dns.message.make_query(fqdn, rdtype)\n answer = dns.query.udp(request, nameserver, timeout, port)\n\n rdtype = dns.rdatatype.from_text(rdtype)\n rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)\n\n keyring = _get_keyring(_config('keyfile', **kwargs))\n keyname = _config('keyname', **kwargs)\n keyalgorithm = _config('keyalgorithm',\n **kwargs) or 'HMAC-MD5.SIG-ALG.REG.INT'\n\n is_exist = False\n for rrset in answer.answer:\n if rdata in rrset.items:\n if ttl == rrset.ttl:\n if len(answer.answer) >= 1 or len(rrset.items) >= 1:\n is_exist = True\n break\n\n dns_update = dns.update.Update(zone, keyring=keyring, keyname=keyname,\n keyalgorithm=keyalgorithm)\n if replace:\n dns_update.replace(name, ttl, rdata)\n elif not is_exist:\n dns_update.add(name, ttl, rdata)\n else:\n return None\n answer = dns.query.udp(dns_update, nameserver, timeout, port)\n if answer.rcode() > 0:\n return False\n return True", "public static boolean executeModifyOperation(final String currentDn, final ConnectionFactory connectionFactory, final LdapEntry entry) {\n final Map<String, Set<String>> attributes = entry.getAttributes().stream()\n .collect(Collectors.toMap(LdapAttribute::getName, ldapAttribute -> new HashSet<>(ldapAttribute.getStringValues())));\n\n return executeModifyOperation(currentDn, connectionFactory, attributes);\n }" ]
[ 0.8853018879890442, 0.844825029373169, 0.782258152961731, 0.7694916725158691, 0.7637309432029724, 0.7588505148887634, 0.7512859106063843, 0.7361394762992859, 0.7337108254432678, 0.7156156301498413, 0.7155494093894958, 0.713196337223053 ]
Modify a DN in the LDAP database; See ldap module. Doesn't return a result if transactions enabled.
def modify_no_rollback(self, dn: str, mod_list: dict): """ Modify a DN in the LDAP database; See ldap module. Doesn't return a result if transactions enabled. """ _debug("modify_no_rollback", self, dn, mod_list) result = self._do_with_retry(lambda obj: obj.modify_s(dn, mod_list)) _debug("--") return result
[ "def modify(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Modify a DN in the LDAP database; See ldap module. Doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.modify_s(dn, mod_list))", "def modify(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Modify a DN in the LDAP database; See ldap module. Doesn't return a\n result if transactions enabled.\n \"\"\"\n\n _debug(\"modify\", self, dn, mod_list)\n\n # need to work out how to reverse changes in mod_list; result in revlist\n revlist = {}\n\n # get the current cached attributes\n result = self._cache_get_for_dn(dn)\n\n # find the how to reverse mod_list (for rollback) and put result in\n # revlist. Also simulate actions on cache.\n for mod_type, l in six.iteritems(mod_list):\n for mod_op, mod_vals in l:\n\n _debug(\"attribute:\", mod_type)\n if mod_type in result:\n _debug(\"attribute cache:\", result[mod_type])\n else:\n _debug(\"attribute cache is empty\")\n _debug(\"attribute modify:\", (mod_op, mod_vals))\n\n if mod_vals is not None:\n if not isinstance(mod_vals, list):\n mod_vals = [mod_vals]\n\n if mod_op == ldap3.MODIFY_ADD:\n # reverse of MODIFY_ADD is MODIFY_DELETE\n reverse = (ldap3.MODIFY_DELETE, mod_vals)\n\n elif mod_op == ldap3.MODIFY_DELETE and len(mod_vals) > 0:\n # Reverse of MODIFY_DELETE is MODIFY_ADD, but only if value\n # is given if mod_vals is None, this means all values where\n # deleted.\n reverse = (ldap3.MODIFY_ADD, mod_vals)\n\n elif mod_op == ldap3.MODIFY_DELETE \\\n or mod_op == ldap3.MODIFY_REPLACE:\n if mod_type in result:\n # If MODIFY_DELETE with no values or MODIFY_REPLACE\n # then we have to replace all attributes with cached\n # state\n reverse = (\n ldap3.MODIFY_REPLACE,\n tldap.modlist.escape_list(result[mod_type])\n )\n else:\n # except if we have no cached state for this DN, in\n # which case we delete it.\n reverse = (ldap3.MODIFY_DELETE, [])\n\n else:\n raise RuntimeError(\"mod_op of %d not supported\" % mod_op)\n\n reverse = [reverse]\n _debug(\"attribute reverse:\", reverse)\n if mod_type in result:\n _debug(\"attribute cache:\", result[mod_type])\n else:\n _debug(\"attribute cache is empty\")\n\n revlist[mod_type] = reverse\n\n _debug(\"--\")\n _debug(\"mod_list:\", mod_list)\n _debug(\"revlist:\", revlist)\n _debug(\"--\")\n\n # now the hard stuff is over, we get to the easy stuff\n def on_commit(obj):\n obj.modify(dn, mod_list)\n\n def on_rollback(obj):\n obj.modify(dn, revlist)\n\n return self._process(on_commit, on_rollback)", "def modify(connect_spec, dn, directives):\n '''Modify an entry in an LDAP database.\n\n :param connect_spec:\n See the documentation for the ``connect_spec`` parameter for\n :py:func:`connect`.\n\n :param dn:\n Distinguished name of the entry.\n\n :param directives:\n Iterable of directives that indicate how to modify the entry.\n Each directive is a tuple of the form ``(op, attr, vals)``,\n where:\n\n * ``op`` identifies the modification operation to perform.\n One of:\n\n * ``'add'`` to add one or more values to the attribute\n\n * ``'delete'`` to delete some or all of the values from the\n attribute. If no values are specified with this\n operation, all of the attribute's values are deleted.\n Otherwise, only the named values are deleted.\n\n * ``'replace'`` to replace all of the attribute's values\n with zero or more new values\n\n * ``attr`` names the attribute to modify\n\n * ``vals`` is an iterable of values to add or delete\n\n :returns:\n ``True`` if successful, raises an exception otherwise.\n\n CLI example:\n\n .. code-block:: bash\n\n salt '*' ldap3.modify \"{\n 'url': 'ldaps://ldap.example.com/',\n 'bind': {\n 'method': 'simple',\n 'password': 'secret'}\n }\" dn='cn=admin,dc=example,dc=com'\n directives=\"('add', 'example', ['example_val'])\"\n '''\n l = connect(connect_spec)\n # convert the \"iterable of values\" to lists in case that's what\n # modify_s() expects (also to ensure that the caller's objects are\n # not modified)\n modlist = [(getattr(ldap, 'MOD_' + op.upper()), attr, list(vals))\n for op, attr, vals in directives]\n\n for idx, mod in enumerate(modlist):\n if mod[1] == 'unicodePwd':\n modlist[idx] = (mod[0], mod[1],\n [_format_unicode_password(x) for x in mod[2]])\n\n modlist = salt.utils.data.decode(modlist, to_str=True, preserve_tuples=True)\n try:\n l.c.modify_s(dn, modlist)\n except ldap.LDAPError as e:\n _convert_exception(e)\n return True", "def change(connect_spec, dn, before, after):\n '''Modify an entry in an LDAP database.\n\n This does the same thing as :py:func:`modify`, but with a simpler\n interface. Instead of taking a list of directives, it takes a\n before and after view of an entry, determines the differences\n between the two, computes the directives, and executes them.\n\n Any attribute value present in ``before`` but missing in ``after``\n is deleted. Any attribute value present in ``after`` but missing\n in ``before`` is added. Any attribute value in the database that\n is not mentioned in either ``before`` or ``after`` is not altered.\n Any attribute value that is present in both ``before`` and\n ``after`` is ignored, regardless of whether that attribute value\n exists in the database.\n\n :param connect_spec:\n See the documentation for the ``connect_spec`` parameter for\n :py:func:`connect`.\n\n :param dn:\n Distinguished name of the entry.\n\n :param before:\n The expected state of the entry before modification. This is\n a dict mapping each attribute name to an iterable of values.\n\n :param after:\n The desired state of the entry after modification. This is a\n dict mapping each attribute name to an iterable of values.\n\n :returns:\n ``True`` if successful, raises an exception otherwise.\n\n CLI example:\n\n .. code-block:: bash\n\n salt '*' ldap3.change \"{\n 'url': 'ldaps://ldap.example.com/',\n 'bind': {\n 'method': 'simple',\n 'password': 'secret'}\n }\" dn='cn=admin,dc=example,dc=com'\n before=\"{'example_value': 'before_val'}\"\n after=\"{'example_value': 'after_val'}\"\n '''\n l = connect(connect_spec)\n # convert the \"iterable of values\" to lists in case that's what\n # modifyModlist() expects (also to ensure that the caller's dicts\n # are not modified)\n before = dict(((attr, salt.utils.data.encode(list(vals)))\n for attr, vals in six.iteritems(before)))\n after = dict(((attr, salt.utils.data.encode(list(vals)))\n for attr, vals in six.iteritems(after)))\n\n if 'unicodePwd' in after:\n after['unicodePwd'] = [_format_unicode_password(x) for x in after['unicodePwd']]\n\n modlist = ldap.modlist.modifyModlist(before, after)\n\n try:\n l.c.modify_s(dn, modlist)\n except ldap.LDAPError as e:\n _convert_exception(e)\n return True", "def add(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Add a DN to the LDAP database; See ldap module. Doesn't return a result\n if transactions enabled.\n \"\"\"\n\n _debug(\"add\", self, dn, mod_list)\n\n # if rollback of add required, delete it\n def on_commit(obj):\n obj.add(dn, None, mod_list)\n\n def on_rollback(obj):\n obj.delete(dn)\n\n # process this action\n return self._process(on_commit, on_rollback)", "def add(self, dn: str, mod_list: dict) -> None:\n \"\"\"\n Add a DN to the LDAP database; See ldap module. Doesn't return a result\n if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.add_s(dn, mod_list))", "def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:\n \"\"\"\n rename a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n raise NotImplementedError()", "def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:\n \"\"\"\n rename a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(\n lambda obj: obj.rename_s(dn, new_rdn, new_base_dn))", "def _request_modify_dns_record(self, record):\n \"\"\"Sends Modify_DNS_Record request\"\"\"\n return self._request_internal(\"Modify_DNS_Record\",\n domain=self.domain,\n record=record)", "def delete(self, dn: str) -> None:\n \"\"\"\n delete a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.delete_s(dn))", "def update(zone, name, ttl, rdtype, data, nameserver='127.0.0.1', timeout=5,\n replace=False, port=53, **kwargs):\n '''\n Add, replace, or update a DNS record.\n nameserver must be an IP address and the minion running this module\n must have update privileges on that server.\n If replace is true, first deletes all records for this name and type.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt ns1 ddns.update example.com host1 60 A 10.0.0.1\n '''\n name = six.text_type(name)\n\n if name[-1:] == '.':\n fqdn = name\n else:\n fqdn = '{0}.{1}'.format(name, zone)\n\n request = dns.message.make_query(fqdn, rdtype)\n answer = dns.query.udp(request, nameserver, timeout, port)\n\n rdtype = dns.rdatatype.from_text(rdtype)\n rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data)\n\n keyring = _get_keyring(_config('keyfile', **kwargs))\n keyname = _config('keyname', **kwargs)\n keyalgorithm = _config('keyalgorithm',\n **kwargs) or 'HMAC-MD5.SIG-ALG.REG.INT'\n\n is_exist = False\n for rrset in answer.answer:\n if rdata in rrset.items:\n if ttl == rrset.ttl:\n if len(answer.answer) >= 1 or len(rrset.items) >= 1:\n is_exist = True\n break\n\n dns_update = dns.update.Update(zone, keyring=keyring, keyname=keyname,\n keyalgorithm=keyalgorithm)\n if replace:\n dns_update.replace(name, ttl, rdata)\n elif not is_exist:\n dns_update.add(name, ttl, rdata)\n else:\n return None\n answer = dns.query.udp(dns_update, nameserver, timeout, port)\n if answer.rcode() > 0:\n return False\n return True", "public static boolean executeModifyOperation(final String currentDn, final ConnectionFactory connectionFactory, final LdapEntry entry) {\n final Map<String, Set<String>> attributes = entry.getAttributes().stream()\n .collect(Collectors.toMap(LdapAttribute::getName, ldapAttribute -> new HashSet<>(ldapAttribute.getStringValues())));\n\n return executeModifyOperation(currentDn, connectionFactory, attributes);\n }" ]
[ 0.8853018879890442, 0.8312879800796509, 0.782258152961731, 0.7694916725158691, 0.7637309432029724, 0.7588505148887634, 0.7512859106063843, 0.7361394762992859, 0.7337108254432678, 0.7156156301498413, 0.7155494093894958, 0.713196337223053 ]
delete a dn in the ldap database; see ldap module. doesn't return a result if transactions enabled.
def delete(self, dn: str) -> None: """ delete a dn in the ldap database; see ldap module. doesn't return a result if transactions enabled. """ _debug("delete", self) # get copy of cache result = self._cache_get_for_dn(dn) # remove special values that can't be added def delete_attribute(name): if name in result: del result[name] delete_attribute('entryUUID') delete_attribute('structuralObjectClass') delete_attribute('modifiersName') delete_attribute('subschemaSubentry') delete_attribute('entryDN') delete_attribute('modifyTimestamp') delete_attribute('entryCSN') delete_attribute('createTimestamp') delete_attribute('creatorsName') delete_attribute('hasSubordinates') delete_attribute('pwdFailureTime') delete_attribute('pwdChangedTime') # turn into mod_list list. mod_list = tldap.modlist.addModlist(result) _debug("revlist:", mod_list) # on commit carry out action; on rollback restore cached state def on_commit(obj): obj.delete(dn) def on_rollback(obj): obj.add(dn, None, mod_list) return self._process(on_commit, on_rollback)
[ "def delete(self, dn: str) -> None:\n \"\"\"\n delete a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(lambda obj: obj.delete_s(dn))", "def delete(connect_spec, dn):\n '''Delete an entry from an LDAP database.\n\n :param connect_spec:\n See the documentation for the ``connect_spec`` parameter for\n :py:func:`connect`.\n\n :param dn:\n Distinguished name of the entry.\n\n :returns:\n ``True`` if successful, raises an exception otherwise.\n\n CLI example:\n\n .. code-block:: bash\n\n salt '*' ldap3.delete \"{\n 'url': 'ldaps://ldap.example.com/',\n 'bind': {\n 'method': 'simple',\n 'password': 'secret'}\n }\" dn='cn=admin,dc=example,dc=com'\n '''\n l = connect(connect_spec)\n log.info('deleting entry: dn: %s', repr(dn))\n try:\n l.c.delete_s(dn)\n except ldap.LDAPError as e:\n _convert_exception(e)\n return True", "public void delete(String dn) throws LDAPException {\n DeleteRequest deleteRequest = new DeleteRequest(dn);\n delete(deleteRequest);\n }", "def delete_record(self, domain, recordid, params=None):\n ''' /v1/dns/delete_record\n POST - account\n Deletes an individual DNS record\n\n Link: https://www.vultr.com/api/#dns_delete_record\n '''\n params = update_params(params, {\n 'domain': domain,\n 'RECORDID': recordid\n })\n return self.request('/v1/dns/delete_record', params, 'POST')", "def delete(python_data: LdapObject, database: Optional[Database] = None) -> None:\n \"\"\" Delete a LdapObject from the database. \"\"\"\n dn = python_data.get_as_single('dn')\n assert dn is not None\n\n database = get_database(database)\n connection = database.connection\n\n connection.delete(dn)", "public function delete($dn)\n {\n if ($this->suppressErrors) {\n return @ldap_delete($this->getConnection(), $dn);\n }\n\n return ldap_delete($this->getConnection(), $dn);\n }", "def delete_record(self, dns_type, name):\n \"\"\"\n Delete a dns record\n :param dns_type:\n :param name:\n :return:\n \"\"\"\n record = self.get_record(dns_type, name)\n content = self.request(\n urllib.parse.urljoin(self.api_url, self.zone['id'] + '/dns_records/' + record['id']),\n 'delete'\n )\n return content['result']['id']", "def delete(sld, tld, nameserver):\n '''\n Deletes a nameserver. Returns ``True`` if the nameserver was deleted\n successfully\n\n sld\n SLD of the domain name\n\n tld\n TLD of the domain name\n\n nameserver\n Nameserver to delete\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' namecheap_domains_ns.delete sld tld nameserver\n '''\n opts = salt.utils.namecheap.get_opts('namecheap.domains.ns.delete')\n opts['SLD'] = sld\n opts['TLD'] = tld\n opts['Nameserver'] = nameserver\n\n response_xml = salt.utils.namecheap.post_request(opts)\n if response_xml is None:\n return False\n\n domainnsdeleteresult = response_xml.getElementsByTagName('DomainNSDeleteResult')[0]\n return salt.utils.namecheap.string_to_value(domainnsdeleteresult.getAttribute('IsSuccess'))", "public function delete($dn)\n\t{\n\t\tif (!$this->isBound || !$this->isConnected())\n\t\t{\n\t\t\treturn false;\n\t\t}\n\n\t\treturn ldap_delete($this->resource, $dn);\n\t}", "def delete(gandi, fqdn, name, type, force):\n \"\"\"Delete record entry for a domain.\"\"\"\n domains = gandi.dns.list()\n domains = [domain['fqdn'] for domain in domains]\n if fqdn not in domains:\n gandi.echo('Sorry domain %s does not exist' % fqdn)\n gandi.echo('Please use one of the following: %s' % ', '.join(domains))\n return\n\n if not force:\n if not name and not type:\n prompt = (\"Are you sure to delete all records for domain %s ?\" %\n fqdn)\n elif name and not type:\n prompt = (\"Are you sure to delete all '%s' name records for \"\n \"domain %s ?\" % (name, fqdn))\n else:\n prompt = (\"Are you sure to delete all '%s' records of type %s \"\n \"for domain %s ?\" % (name, type, fqdn))\n\n proceed = click.confirm(prompt)\n\n if not proceed:\n return\n\n result = gandi.dns.del_record(fqdn, name, type)\n gandi.echo('Delete successful.')\n return result", "public static base_response delete(nitro_service client, String labelname) throws Exception {\n\t\tdnspolicylabel deleteresource = new dnspolicylabel();\n\t\tdeleteresource.labelname = labelname;\n\t\treturn deleteresource.delete_resource(client);\n\t}", "def delete_domain(self, domain, params=None):\n ''' /v1/dns/delete_domain\n POST - account\n Delete a domain name (and all associated records)\n\n Link: https://www.vultr.com/api/#dns_delete_domain\n '''\n params = update_params(params, {'domain': domain})\n return self.request('/v1/dns/delete_domain', params, 'POST')" ]
[ 0.8880426287651062, 0.7907469868659973, 0.7566363215446472, 0.754315197467804, 0.7537108659744263, 0.7459157705307007, 0.7336453795433044, 0.7319474816322327, 0.7308675050735474, 0.7284370064735413, 0.7258713841438293, 0.7221714854240417 ]
rename a dn in the ldap database; see ldap module. doesn't return a result if transactions enabled.
def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None: """ rename a dn in the ldap database; see ldap module. doesn't return a result if transactions enabled. """ _debug("rename", self, dn, new_rdn, new_base_dn) # split up the parameters split_dn = tldap.dn.str2dn(dn) split_newrdn = tldap.dn.str2dn(new_rdn) assert(len(split_newrdn) == 1) # make dn unqualified rdn = tldap.dn.dn2str(split_dn[0:1]) # make newrdn fully qualified dn tmplist = [split_newrdn[0]] if new_base_dn is not None: tmplist.extend(tldap.dn.str2dn(new_base_dn)) old_base_dn = tldap.dn.dn2str(split_dn[1:]) else: tmplist.extend(split_dn[1:]) old_base_dn = None newdn = tldap.dn.dn2str(tmplist) _debug("--> commit ", self, dn, new_rdn, new_base_dn) _debug("--> rollback", self, newdn, rdn, old_base_dn) # on commit carry out action; on rollback reverse rename def on_commit(obj): obj.modify_dn(dn, new_rdn, new_superior=new_base_dn) def on_rollback(obj): obj.modify_dn(newdn, rdn, new_superior=old_base_dn) return self._process(on_commit, on_rollback)
[ "def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:\n \"\"\"\n rename a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n raise NotImplementedError()", "def rename(self, dn: str, new_rdn: str, new_base_dn: Optional[str] = None) -> None:\n \"\"\"\n rename a dn in the ldap database; see ldap module. doesn't return a\n result if transactions enabled.\n \"\"\"\n\n return self._do_with_retry(\n lambda obj: obj.rename_s(dn, new_rdn, new_base_dn))", "def rename(python_data: LdapObject, new_base_dn: str = None,\n database: Optional[Database] = None, **kwargs) -> LdapObject:\n \"\"\" Move/rename a LdapObject in the database. \"\"\"\n table = type(python_data)\n dn = python_data.get_as_single('dn')\n assert dn is not None\n\n database = get_database(database)\n connection = database.connection\n\n # extract key and value from kwargs\n if len(kwargs) == 1:\n name, value = list(kwargs.items())[0]\n\n # work out the new rdn of the object\n split_new_rdn = [[(name, value, 1)]]\n\n field = _get_field_by_name(table, name)\n assert field.db_field\n\n python_data = python_data.merge({\n name: value,\n })\n\n elif len(kwargs) == 0:\n split_new_rdn = [str2dn(dn)[0]]\n else:\n assert False\n\n new_rdn = dn2str(split_new_rdn)\n\n connection.rename(\n dn,\n new_rdn,\n new_base_dn,\n )\n\n if new_base_dn is not None:\n split_base_dn = str2dn(new_base_dn)\n else:\n split_base_dn = str2dn(dn)[1:]\n\n tmp_list = [split_new_rdn[0]]\n tmp_list.extend(split_base_dn)\n\n new_dn = dn2str(tmp_list)\n\n python_data = python_data.merge({\n 'dn': new_dn,\n })\n return python_data", "def rename(dn, new_rdn, delete_old, *args)\n log_dispatch(:rename, dn, new_rdn, delete_old, *args)\n adapter.rename(dn, new_rdn.to_str, delete_old, *args)\n end", "function rename_group($group_id, $new_name, &$new_gid)\n {\n $group_cache = $this->_fetch_groups();\n $old_dn = $group_cache[$group_id]['dn'];\n $new_rdn = \"cn=\" . rcube_ldap_generic::quote_string($new_name, true);\n $new_gid = self::dn_encode($new_rdn . ',' . $this->groups_base_dn);\n\n if (!$this->ldap->rename($old_dn, $new_rdn, null, true)) {\n $this->set_error(self::ERROR_SAVING, 'errorsaving');\n return false;\n }\n\n if ($this->cache) {\n $this->cache->remove('groups');\n }\n\n return $new_name;\n }", "public function rename($dn, $newdn, $newparent, $deleteolddn)\n\t{\n\t\tif (!$this->isBound || !$this->isConnected())\n\t\t{\n\t\t\treturn false;\n\t\t}\n\n\t\treturn ldap_rename($this->resource, $dn, $newdn, $newparent, $deleteolddn);\n\t}", "public function rename($dn, $newRdn, $newParent, $deleteOldRdn = false)\n {\n return ldap_rename($this->getConnection(), $dn, $newRdn, $newParent, $deleteOldRdn);\n }", "def database_rename(object_id, input_params={}, always_retry=True, **kwargs):\n \"\"\"\n Invokes the /database-xxxx/rename API method.\n\n For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename\n \"\"\"\n return DXHTTPRequest('/%s/rename' % object_id, input_params, always_retry=always_retry, **kwargs)", "public function rename($dn, $newrdn, $newparent = null, $deleteoldrdn = true)\n {\n $this->_debug(\"C: Rename $dn to $newrdn\");\n\n if (!ldap_rename($this->conn, $dn, $newrdn, $newparent, $deleteoldrdn)) {\n $this->_error(\"ldap_rename() failed with \" . ldap_error($this->conn));\n return false;\n }\n\n $this->_debug(\"S: OK\");\n return true;\n }", "@Override\n\tpublic void rename(final Name oldDn, final Name newDn) {\n\t\texecuteReadWrite(new ContextExecutor() {\n\t\t\tpublic Object executeWithContext(DirContext ctx) throws javax.naming.NamingException {\n\t\t\t\tctx.rename(oldDn, newDn);\n\t\t\t\treturn null;\n\t\t\t}\n\t\t});\n\t}", "public void rename(String dn, String newDn) throws WIMException {\n TimedDirContext ctx = iContextManager.getDirContext();\n iContextManager.checkWritePermission(ctx);\n try {\n try {\n ctx.rename(dn, newDn);\n } catch (NamingException e) {\n if (!ContextManager.isConnectionException(e)) {\n throw e;\n }\n ctx = iContextManager.reCreateDirContext(ctx, e.toString());\n ctx.rename(dn, newDn);\n }\n } catch (NamingException e) {\n String msg = Tr.formatMessage(tc, WIMMessageKey.NAMING_EXCEPTION, WIMMessageHelper.generateMsgParms(e.toString(true)));\n throw new WIMSystemException(WIMMessageKey.NAMING_EXCEPTION, msg, e);\n } finally {\n iContextManager.releaseDirContext(ctx);\n }\n }", "public void rollback() {\n log.debug(\"Rolling back rename operation\");\n try {\n ldapOperations.rename(newDn, originalDn);\n } catch (Exception e) {\n log.warn(\"Unable to rollback rename operation. \" + \"originalDn: \"\n + newDn + \"; newDn: \" + originalDn);\n }\n }" ]
[ 0.9035313725471497, 0.8929044604301453, 0.8117704391479492, 0.790888786315918, 0.7474083304405212, 0.7388917207717896, 0.7381210923194885, 0.7348619699478149, 0.7344306111335754, 0.7342362999916077, 0.7287878394126892, 0.7264547348022461 ]
for testing purposes only. always fail in commit
def fail(self) -> None: """ for testing purposes only. always fail in commit """ _debug("fail") # on commit carry out action; on rollback reverse rename def on_commit(_obj): raise_testfailure("commit") def on_rollback(_obj): raise_testfailure("rollback") return self._process(on_commit, on_rollback)
[ "BlockInfo[] setReplication(String src,\n short replication,\n int[] oldReplication\n ) throws IOException {\n waitForReady();\n BlockInfo[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication);\n if (fileBlocks != null) // log replication change\n fsImage.getEditLog().logSetReplication(src, replication);\n return fileBlocks;\n }", "public void commitBlockSynchronization(Block block,\n long newgenerationstamp, long newlength,\n boolean closeFile, boolean deleteblock, DatanodeID[] newtargets\n ) throws IOException {\n namesystem.commitBlockSynchronization(block,\n newgenerationstamp, newlength, closeFile, deleteblock, newtargets);\n }", "public void commitBlockSynchronization(Block block,\n long newgenerationstamp, long newlength,\n boolean closeFile, boolean deleteblock, DatanodeID[] newtargets\n ) throws IOException {\n IOException last = new IOException(\"No DatanodeProtocol found.\");\n for (int i = 0; i < numProtocol; i++) {\n try {\n if (node[i] != null) {\n node[i].commitBlockSynchronization(block, newgenerationstamp,\n newlength, closeFile,\n deleteblock, newtargets);\n return;\n }\n } catch (IOException e) {\n last = e;\n LOG.info(\"Server \" + i + \" failed at commitBlockSynchronization.\", e);\n }\n }\n throw last; // fail if all DatanodeProtocol object failed.\n }", "def commit(message, parents = nil, actor = nil, last_tree = nil, head = 'master')\n commit_tree_sha = nil\n if parents.is_a?(Hash)\n commit_tree_sha = parents[:commit_tree_sha]\n actor = parents[:actor]\n committer = parents[:committer]\n author = parents[:author]\n last_tree = parents[:last_tree]\n head = parents[:head]\n committed_date = parents[:committed_date]\n authored_date = parents[:authored_date]\n parents = parents[:parents]\n end\n\n committer ||= actor\n author ||= committer\n\n if commit_tree_sha\n tree_sha1 = commit_tree_sha\n else\n tree_sha1 = write_tree(self.tree, self.current_tree)\n end\n\n # don't write identical commits\n return false if tree_sha1 == last_tree\n\n contents = []\n contents << ['tree', tree_sha1].join(' ')\n parents.each do |p|\n contents << ['parent', p].join(' ')\n end if parents\n\n committer ||= begin\n config = Config.new(self.repo)\n Actor.new(config['user.name'], config['user.email'])\n end\n author ||= committer\n committed_date ||= Time.now\n authored_date ||= committed_date\n\n contents << ['author', author.output(authored_date)].join(' ')\n contents << ['committer', committer.output(committed_date)].join(' ')\n contents << ''\n contents << message\n\n contents = contents.join(\"\\n\")\n @last_commit_size = contents.size\n commit_sha1 = self.repo.git.put_raw_object(contents, 'commit')\n\n self.repo.update_ref(head, commit_sha1) if head\n commit_sha1\n end", "private BlockStoreLocation commitBlockInternal(long sessionId, long blockId)\n throws BlockAlreadyExistsException, InvalidWorkerStateException, BlockDoesNotExistException,\n IOException {\n long lockId = mLockManager.lockBlock(sessionId, blockId, BlockLockType.WRITE);\n try {\n // When committing TempBlockMeta, the final BlockMeta calculates the block size according to\n // the actual file size of this TempBlockMeta. Therefore, commitTempBlockMeta must happen\n // after moving actual block file to its committed path.\n BlockStoreLocation loc;\n String srcPath;\n String dstPath;\n TempBlockMeta tempBlockMeta;\n try (LockResource r = new LockResource(mMetadataReadLock)) {\n checkTempBlockOwnedBySession(sessionId, blockId);\n tempBlockMeta = mMetaManager.getTempBlockMeta(blockId);\n srcPath = tempBlockMeta.getPath();\n dstPath = tempBlockMeta.getCommitPath();\n loc = tempBlockMeta.getBlockLocation();\n }\n\n // Heavy IO is guarded by block lock but not metadata lock. This may throw IOException.\n FileUtils.move(srcPath, dstPath);\n\n try (LockResource r = new LockResource(mMetadataWriteLock)) {\n mMetaManager.commitTempBlockMeta(tempBlockMeta);\n } catch (BlockAlreadyExistsException | BlockDoesNotExistException\n | WorkerOutOfSpaceException e) {\n throw Throwables.propagate(e); // we shall never reach here\n }\n return loc;\n } finally {\n mLockManager.unlockBlock(lockId);\n }\n }", "function cloneAndSetCommitTask(commitTask, msg, noVerbose) {\n\tcommitter.cloneAndSetCommit(commitTask.commit, msg, commitTask.namespace, noVerbose);\n}", "ApiFuture<List<WriteResult>> commit(@Nullable ByteString transactionId) {\n Tracing.getTracer()\n .getCurrentSpan()\n .addAnnotation(\n \"CloudFirestore.Commit\",\n ImmutableMap.of(\"numDocuments\", AttributeValue.longAttributeValue(mutations.size())));\n\n final CommitRequest.Builder request = CommitRequest.newBuilder();\n request.setDatabase(firestore.getDatabaseName());\n\n for (Mutation mutation : mutations) {\n Preconditions.checkState(\n mutation.document != null || mutation.transform != null,\n \"Either a write or transform must be set\");\n\n if (mutation.precondition != null) {\n (mutation.document != null ? mutation.document : mutation.transform)\n .setCurrentDocument(mutation.precondition);\n }\n\n if (mutation.document != null) {\n request.addWrites(mutation.document);\n }\n\n if (mutation.transform != null) {\n request.addWrites(mutation.transform);\n }\n }\n\n if (transactionId != null) {\n request.setTransaction(transactionId);\n }\n\n committed = true;\n\n ApiFuture<CommitResponse> response =\n firestore.sendRequest(request.build(), firestore.getClient().commitCallable());\n\n return ApiFutures.transform(\n response,\n new ApiFunction<CommitResponse, List<WriteResult>>() {\n @Override\n public List<WriteResult> apply(CommitResponse commitResponse) {\n List<com.google.firestore.v1.WriteResult> writeResults =\n commitResponse.getWriteResultsList();\n\n List<WriteResult> result = new ArrayList<>();\n\n Preconditions.checkState(\n request.getWritesCount() == writeResults.size(),\n \"Expected one write result per operation, but got %s results for %s operations.\",\n writeResults.size(),\n request.getWritesCount());\n\n Iterator<Mutation> mutationIterator = mutations.iterator();\n Iterator<com.google.firestore.v1.WriteResult> responseIterator =\n writeResults.iterator();\n\n while (mutationIterator.hasNext()) {\n Mutation mutation = mutationIterator.next();\n\n // Don't return both write results for a write that contains a transform, as the fact\n // that we have to split one write operation into two distinct write requests is an\n // implementation detail.\n if (mutation.document != null && mutation.transform != null) {\n // The document transform is always sent last and produces the latest update time.\n responseIterator.next();\n }\n\n result.add(\n WriteResult.fromProto(responseIterator.next(), commitResponse.getCommitTime()));\n }\n\n return result;\n }\n });\n }", "@SuppressWarnings({ \"unchecked\", \"rawtypes\", \"unused\" })\n private void commitToVoldemort(List<String> storeNamesToCommit) {\n\n if(logger.isDebugEnabled()) {\n logger.debug(\"Trying to commit to Voldemort\");\n }\n\n boolean hasError = false;\n if(nodesToStream == null || nodesToStream.size() == 0) {\n if(logger.isDebugEnabled()) {\n logger.debug(\"No nodes to stream to. Returning.\");\n }\n return;\n }\n\n for(Node node: nodesToStream) {\n\n for(String store: storeNamesToCommit) {\n if(!nodeIdStoreInitialized.get(new Pair(store, node.getId())))\n continue;\n\n nodeIdStoreInitialized.put(new Pair(store, node.getId()), false);\n\n DataOutputStream outputStream = nodeIdStoreToOutputStreamRequest.get(new Pair(store,\n node.getId()));\n\n try {\n ProtoUtils.writeEndOfStream(outputStream);\n outputStream.flush();\n DataInputStream inputStream = nodeIdStoreToInputStreamRequest.get(new Pair(store,\n node.getId()));\n VAdminProto.UpdatePartitionEntriesResponse.Builder updateResponse = ProtoUtils.readToBuilder(inputStream,\n VAdminProto.UpdatePartitionEntriesResponse.newBuilder());\n if(updateResponse.hasError()) {\n hasError = true;\n }\n\n } catch(IOException e) {\n logger.error(\"Exception during commit\", e);\n hasError = true;\n if(!faultyNodes.contains(node.getId()))\n faultyNodes.add(node.getId());\n }\n }\n\n }\n\n if(streamingresults == null) {\n logger.warn(\"StreamingSession may not have been initialized since Variable streamingresults is null. Skipping callback \");\n return;\n }\n\n // remove redundant callbacks\n if(hasError) {\n\n logger.info(\"Invoking the Recovery Callback\");\n Future future = streamingresults.submit(recoveryCallback);\n try {\n future.get();\n\n } catch(InterruptedException e1) {\n MARKED_BAD = true;\n logger.error(\"Recovery Callback failed\", e1);\n throw new VoldemortException(\"Recovery Callback failed\");\n } catch(ExecutionException e1) {\n MARKED_BAD = true;\n logger.error(\"Recovery Callback failed during execution\", e1);\n throw new VoldemortException(\"Recovery Callback failed during execution\");\n }\n } else {\n if(logger.isDebugEnabled()) {\n logger.debug(\"Commit successful\");\n logger.debug(\"calling checkpoint callback\");\n }\n Future future = streamingresults.submit(checkpointCallback);\n try {\n future.get();\n\n } catch(InterruptedException e1) {\n logger.warn(\"Checkpoint callback failed!\", e1);\n } catch(ExecutionException e1) {\n logger.warn(\"Checkpoint callback failed during execution!\", e1);\n }\n }\n\n }", "function Commit(relLbl, relType, relRx, bumpRx, cmo, pver, nver, gitCliSubstitute, ch, pkgPath, pkgPathBower,\n\t\tpkgPropSync, buildDir, branch, slug, username, reponame, gitToken, npmToken) {\n\tvar cm = typeof cmo === 'string' ? cmo : cmo.message;\n\tthis.versionRegExp = typeof cmo === 'object' && cmo.matcher ? cmo.matcher : relRx;\n\tvar rv = cm.match(this.versionRegExp);\n\tif ((!rv || !rv.length) && typeof cmo === 'object' && typeof cmo.altMessage === 'string') {\n\t\tcm = cmo.altMessage;\n\t\tthis.versionRegExp = cmo.altMatcher || cmo.matcher;\n\t\trv = cm.match(this.versionRegExp);\n\t}\n\tif (!rv) {\n\n\t\trv = [];\n\t}\n\tvar self = this;\n\tvar vt = 0, si = -1;\n\tthis.gitCliSubstitute = gitCliSubstitute;\n\tthis.pkgPath = pkgPath;\n\tthis.pkgPathBower = pkgPathBower;\n\tthis.pkgPropSync = pkgPropSync;\n\tthis.hash = ch;\n\tthis.buildDir = buildDir;\n\tthis.branch = branch;\n\tthis.slug = slug;\n\tthis.username = username;\n\tthis.reponame = reponame;\n\tthis.gitToken = gitToken || '';\n\tthis.npmToken = npmToken || '';\n\tthis.releaseId = null;\n\tthis.releaseAssets = [];\n\tthis.skipTasks = [];\n\tthis.skipTaskGen = function() {\n\t\treturn committer.skipTaskGen(false, Array.prototype.slice.call(arguments, 0));\n\t};\n\tthis.skipTaskCheck = function(task) {\n\t\treturn self.skipTasks && self.skipTasks.indexOf(task) >= 0;\n\t};\n\tif (cm) {\n\t\t// extract skip tasks in format: [skip someTask]\n\t\tcm.replace(regexSkips, function(m, t) {\n\t\t\tself.skipTasks.push(t);\n\t\t});\n\t}\n\tthis.hasGitToken = typeof gitToken === 'function' ? gitToken().length > 0 : typeof gitToken === 'string'\n\t\t\t&& gitToken.length > 0;\n\tthis.hasNpmToken = typeof npmToken === 'function' ? npmToken().length > 0 : typeof npmToken === 'string'\n\t\t\t&& npmToken.length > 0;\n\tthis.message = cm;\n\tthis.versionMatch = rv;\n\tthis.versionBumpedIndices = [];\n\tthis.versionPrevIndices = [];\n\tthis.versionVacant = function() {\n\t\treturn isNaN(this.versionMajor) && isNaN(this.versionMinor) && isNaN(this.versionPatch)\n\t\t\t\t&& !this.versionPrerelease;\n\t};\n\tthis.versionLabel = rv.length > 1 ? rv[1] : '';\n\tthis.versionLabelSep = rv.length > 2 ? rv[2] : '';\n\tthis.versionType = rv.length > 3 ? rv[3] : '';\n\tthis.prev = pver instanceof Commit ? pver : typeof pver === 'string' ? new Commit(relLbl, relType, relRx, bumpRx,\n\t\t\t(self.versionLabel || relLbl) + (self.versionLabelSep || ' ') + (self.versionType || relType) + pver, null,\n\t\t\tself) : {\n\t\tversion : '0.0.0',\n\t\tversionMatch : []\n\t};\n\tthis.versionPrereleaseChar = rv.length > 10 ? rv[10] : '';\n\tthis.versionMajor = rv.length > 5 ? verMatchVal(5) : 0;\n\tthis.versionMinor = rv.length > 7 ? verMatchVal(7) : 0;\n\tthis.versionPatch = rv.length > 9 ? verMatchVal(9) : 0;\n\tvar versionSuffix = verSuffix(11);\n\tthis.versionPrerelease = versionSuffix.prerelease;\n\tthis.versionMetadata = versionSuffix.metadata;\n\tthis.version = this.versionPrevIndices.length || this.versionBumpedIndices.length ? vver() : rv.length > 4 ? rv[4]\n\t\t\t: '';\n\tthis.versionTag = rv.length > 4 ? rv[3] + this.version : '';\n\tthis.versionTrigger = vmtchs(1, 11, [ 4 ]);\n\tthis.versionPkg = function(opts, cb) {\n\t\tvar pth = opts.altPkgPath || self.pkgPath || pkgPath;\n\t\tvar pthb = opts.altPkgPathBower || self.pkgPathBower || pkgPathBower;\n\t\tvar pkgp = self.pkgPropSync || pkgPropSync;\n\t\t// update package (if needed)\n\t\tvar pd = pkgUpdate(pth);\n\t\t// update bower package when any of the sync properties do not match\n\t\t// what's in the master package\n\t\tvar pdb = pkgUpdate(pthb, pd, pkgp);\n\t\tif (typeof cb === 'function') {\n\t\t\tcb(pd, pdb);\n\t\t}\n\t\treturn {\n\t\t\tpkg : pd,\n\t\t\tpkgBower : pdb\n\t\t};\n\t\t// updates a package for a given path, parent package data (from\n\t\t// previous call to same function)\n\t\t// an optional array of properties to match can be passed that will be\n\t\t// matched against the parent package\n\t\tfunction pkgUpdate(pth, prt, props) {\n\t\t\tvar rtn = {\n\t\t\t\tpath : pth || '',\n\t\t\t\tprops : Array.isArray(props) ? props : null,\n\t\t\t\toldVer : '',\n\t\t\t\tversion : '',\n\t\t\t\tpropChangeCount : 0\n\t\t\t};\n\t\t\tif (!rtn.path) {\n\t\t\t\treturn rtn;\n\t\t\t}\n\t\t\trtn.pkg = rbot.file.readJSON(rtn.path);\n\t\t\trtn.pkgParent = prt;\n\t\t\trtn.oldVer = rtn.pkg.version;\n\t\t\trtn.u = !opts.revert && !opts.next && pkgPropUpd(rtn, true, false, false);\n\t\t\trtn.n = !opts.revert && opts.next && pkgPropUpd(rtn, false, true, false);\n\t\t\trtn.r = opts.revert && pkgPropUpd(rtn, false, false, true);\n\t\t\tif (rtn.u || rtn.n || rtn.r) {\n\t\t\t\tif (rtn.propChangeCount > 0) {\n\t\t\t\t\trtn.pkgStr = JSON.stringify(rtn.pkg, opts.replacer, opts.space);\n\t\t\t\t\tif (!opts.readOnly) {\n\t\t\t\t\t\trbot.file.write(rtn.path, typeof opts.altWrite === 'function' ? opts.altWrite(rtn,\n\t\t\t\t\t\t\t\topts.replacer, opts.space) : rtn.pkgStr);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn rtn;\n\t\t}\n\t\t// updates the package version or a set of properties from a parent\n\t\t// package for a given package data element and flag\n\t\tfunction pkgPropUpd(pd, u, n, r) {\n\t\t\tvar v = null;\n\t\t\tif (u && pd.oldVer !== self.version && self.version) {\n\t\t\t\tv = self.version;\n\t\t\t} else if (n && pd.oldVer !== self.next.version && self.next.version) {\n\t\t\t\tv = self.next.version;\n\t\t\t} else if (r && self.prev.version) {\n\t\t\t\tv = self.prev.version;\n\t\t\t}\n\t\t\tpd.version = v;\n\t\t\tif (v && !pd.props) {\n\t\t\t\tpd.pkg.version = v;\n\t\t\t\tpd.propChangeCount++;\n\t\t\t}\n\t\t\tif (pd.props && pd.pkgParent) {\n\t\t\t\tpd.props.forEach(function pkgProp(p) {\n\t\t\t\t\tif (pd.pkgParent[p] && (!pd.pkg[p] || pd.pkgParent[p] !== pd.pkg[p])) {\n\t\t\t\t\t\t// sync parent package property with the current one\n\t\t\t\t\t\tpd.pkg[p] = pd.pkgParent[p];\n\t\t\t\t\t\tpd.propChangeCount++;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t\treturn v;\n\t\t}\n\t};\n\tthis.versionValidate = function() {\n\t\tif (!validate(self.version)) {\n\t\t\treturn false;\n\t\t} else if (self.prev.version && semver.gte(self.prev.version, self.version)) {\n\t\t\tthrow new Error(self.version + ' must be higher than the previous release version ' + self.prev.version);\n\t\t} else if (self.next.version && semver.lte(self.next.version, self.version)) {\n\t\t\tthrow new Error(self.version + ' must be lower than the next release version ' + self.next.version);\n\t\t}\n\t\treturn true;\n\t};\n\tthis.next = nver === true && this.version ? new Commit(relLbl, relType, relRx, bumpRx, {\n\t\tmatcher : bumpRx,\n\t\tmessage : cm,\n\t\taltMatcher : relRx,\n\t\taltMessage : (self.versionLabel || relLbl) + (self.versionLabelSep || ' ') + vver(true, true)\n\t}, self) : nver instanceof Commit ? nver : {\n\t\tversion : ''\n\t};\n\tfunction validate(v, q) {\n\t\tif (!v) {\n\t\t\tif (!q) {\n\t\t\t\trbot.log.verbose('Non-release commit ' + (v || ''));\n\t\t\t}\n\t\t\treturn false;\n\t\t} else if (!self.hasGitToken) {\n\t\t\tthrow new Error('No Git token found, version: ' + v);\n\t\t} else if (!semver.valid(v)) {\n\t\t\tthrow new Error('Invalid release version: ' + v);\n\t\t}\n\t\treturn true;\n\t}\n\t// parse out bump/current version characters from version slot\n\tfunction verMatchVal(i) {\n\t\tvar v = self.versionMatch[i];\n\t\tvar vr = 0;\n\t\tvar vl = self.prev.versionMatch && self.prev.versionMatch.length > i && self.prev.versionMatch[i] ? +self.prev.versionMatch[i]\n\t\t\t\t: vr;\n\t\tsi++;\n\t\t// reset the last index so the tests will be accurate\n\t\tregexVerBump.lastIndex = 0;\n\t\tregexVerCurr.lastIndex = 0;\n\t\tif (v && regexVerBump.test(v)) {\n\t\t\t// increment the value for the given slot\n\t\t\tself.versionBumpedIndices.push(si);\n\t\t\tvar bcnt = 0;\n\t\t\tv.replace(regexVerBump, function vBump(m) {\n\t\t\t\tbcnt += m.length;\n\t\t\t});\n\t\t\tvr = vl + bcnt;\n\t\t} else if (v && regexVerCurr.test(v)) {\n\t\t\t// use the last release value for the given slot\n\t\t\tself.versionPrevIndices.push(si);\n\t\t\tvr = vl;\n\t\t} else if (v) {\n\t\t\t// faster parseInt using unary operator\n\t\t\tvr = +v;\n\t\t}\n\t\tvt += vr;\n\t\treturn vr;\n\t}\n\t// parse out bump/current version characters from prerelease/metadata\n\tfunction verSuffix(i) {\n\t\tvar rtn = {\n\t\t\tprerelease : '',\n\t\t\tmetadata : '',\n\t\t\tprereleaseVersions : null\n\t\t};\n\t\tif (self.versionMatch.length <= i) {\n\t\t\treturn rtn;\n\t\t}\n\t\tvar v = self.versionMatch[i];\n\t\t// reset the last index so the tests will be accurate\n\t\tregexVerCurrBumpNum.lastIndex = 0;\n\t\t// replace place holders with current or bumped version\n\t\tvar vi = -1;\n\t\tvar pvers = self.prev && self.prev.versionPrerelease && self.prev.versionPrerelease.match(regexVerNum);\n\t\tvar mdi = v.lastIndexOf(charVerMeta);\n\t\tvar lsti = v.length - 1;\n\t\tv = v.replace(regexVerCurrBumpNum, function verCurrRepl(m, cg, bg, ng, off) {\n\t\t\tvi++;\n\t\t\tif (ng) {\n\t\t\t\treturn m;\n\t\t\t}\n\t\t\tif (cg) {\n\t\t\t\t// match previous rerelease version slots with the current place\n\t\t\t\t// holder slot (if any)\n\t\t\t\tself.versionPrevIndices.push(++si);\n\t\t\t\treturn pvers && pvers.length > vi ? pvers[vi] : 0;\n\t\t\t} else if (bg) {\n\t\t\t\t// only increment when the bump is not the last instance or is\n\t\t\t\t// the last instance, but there is metadata following its\n\t\t\t\t// occurrence\n\t\t\t\tif (mdi === lsti || mdi !== off || (off + m.length - 1) === lsti) {\n\t\t\t\t\tself.versionBumpedIndices.push(++si);\n\t\t\t\t\treturn (pvers && pvers.length > vi ? +pvers[vi] : 0) + m.length;\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn m;\n\t\t});\n\t\t// separate metadata from the prerelease\n\t\tmdi = v.indexOf(charVerMeta);\n\t\tif (mdi >= 0) {\n\t\t\trtn.prerelease = mdi === 0 ? '' : v.substring(0, mdi);\n\t\t\trtn.metadata = v.substring(mdi);\n\t\t} else {\n\t\t\trtn.prerelease = v;\n\t\t}\n\t\treturn rtn;\n\t}\n\t// reconstruct version w/optional incrementation\n\tfunction vver(pt, inc) {\n\t\treturn (pt ? vv(3) : '') + vv(5, self.versionMajor) + vv(6) + vv(7, self.versionMinor) + vv(8)\n\t\t\t\t+ vv(9, self.versionPatch, inc && !self.versionPrereleaseChar) + vv(10)\n\t\t\t\t+ vv(11, self.versionPrerelease, inc && self.versionPrereleaseChar) + vv(12, self.versionMetadata);\n\t}\n\t// gets a version slot based upon a match index or passed value w/optional\n\t// incrementation\n\tfunction vv(i, v, inc) {\n\t\tvar vn = !isNaN(v);\n\t\tvar nv = vn ? v : v || self.versionMatch[i] || '';\n\t\tif (inc && vn) {\n\t\t\treturn +nv + 1;\n\t\t} else if (inc) {\n\t\t\t// increment the last numeric value sequence\n\t\t\treturn nv.replace(regexVerLast, function vvInc(m) {\n\t\t\t\treturn +m + 1;\n\t\t\t});\n\t\t}\n\t\treturn nv;\n\t}\n\t// reconstructs the version matches\n\tfunction vmtchs(start, end, skips) {\n\t\tvar s = '';\n\t\tfor (var i = start; i <= end; i++) {\n\t\t\tif (skips && skips.indexOf(i) >= 0) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\ts += self.versionMatch[i] || '';\n\t\t}\n\t\treturn s;\n\t}\n}", "private Map<String, Collection<String>> buildCommitGraph(List<Commit> commits) {\r\n\t\t// multimap api doesn't quite fit what we want to do here\r\n\t\tMap<String, Collection<String>> rt = new HashMap<>();\r\n\t\t\r\n\t\t// TODO build graph for svn\r\n\t\t\r\n\t\tfor (Commit commit : commits) {\r\n\t\t\tString revisionNumber = commit.getScmRevisionNumber();\r\n\t\t\tboolean alreadyExists = false;\r\n\t\t\t\r\n\t\t\tList<String> parentRevisionNumbers = commit.getScmParentRevisionNumbers();\r\n\t\t\tif (parentRevisionNumbers == null) {\r\n\t\t\t\talreadyExists = rt.put(revisionNumber, new ArrayList<>()) != null;\r\n\t\t\t} else {\r\n\t\t\t\talreadyExists = rt.put(revisionNumber, parentRevisionNumbers) != null;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tif (alreadyExists) {\r\n\t\t\t\tlogger.warn(\"Error encountered building pipeline: multiple commits exist for revision number \" + revisionNumber);\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\treturn rt;\r\n\t}", "public NfsCommitResponse commit(long offsetToCommit, int dataSizeToCommit) throws IOException {\n return getNfs().wrapped_sendCommit(makeCommitRequest(offsetToCommit, dataSizeToCommit));\n }", "private Long performCommit(JPACommit commit) throws EDBException {\n synchronized (entityManager) {\n long timestamp = System.currentTimeMillis();\n try {\n beginTransaction();\n persistCommitChanges(commit, timestamp);\n commitTransaction();\n } catch (Exception ex) {\n try {\n rollbackTransaction();\n } catch (Exception e) {\n throw new EDBException(\"Failed to rollback transaction to EDB\", e);\n }\n throw new EDBException(\"Failed to commit transaction to EDB\", ex);\n }\n return timestamp;\n }\n }" ]
[ 0.7161982655525208, 0.7137223482131958, 0.7107377648353577, 0.7037357687950134, 0.7004218101501465, 0.6989426612854004, 0.698544979095459, 0.6984926462173462, 0.6982054710388184, 0.6975885033607483, 0.6955729722976685, 0.6953396797180176 ]
Utility function made to reproduce range() with unit integer step but with the added possibility of specifying a condition on the looping variable (e.g. var % 2 == 0)
def __experimental_range(start, stop, var, cond, loc={}): '''Utility function made to reproduce range() with unit integer step but with the added possibility of specifying a condition on the looping variable (e.g. var % 2 == 0) ''' locals().update(loc) if start < stop: for __ in range(start, stop): locals()[var] = __ if eval(cond, globals(), locals()): yield __ else: for __ in range(start, stop, -1): locals()[var] = __ if eval(cond, globals(), locals()): yield __
[ "function(n, num, step, everyFn) {\n return rangeEvery(new Range(n, num), step, false, everyFn);\n }", "def range(start, finish, step):\n \"\"\"Like built-in :func:`~builtins.range`, but with float support\"\"\"\n value = start\n while value <= finish:\n yield value\n value += step", "function createLoopWithCount(count, steps, opts) {\n return function aLoop(context, callback) {\n\n let count2 = count;\n if (typeof count === 'string') {\n count2 = template(count, context);\n }\n\n let from = parseLoopCount(count2).from;\n let to = parseLoopCount(count2).to;\n\n let i = from;\n let newContext = context;\n let loopIndexVar = (opts && opts.loopValue) || '$loopCount';\n let loopElementVar = (opts && opts.loopElement) || '$loopElement';\n // Should we stop early because the value of \"over\" is not an array\n let abortEarly = false;\n\n let overValues = null;\n let loopValue = i; // default to the current iteration of the loop, ie same as $loopCount\n if (typeof opts.overValues !== 'undefined') {\n if (opts.overValues && typeof opts.overValues === 'object') {\n overValues = opts.overValues;\n loopValue = overValues[i];\n } else if (opts.overValues && typeof opts.overValues === 'string') {\n overValues = context.vars[opts.overValues];\n if (L.isArray(overValues)) {\n loopValue = overValues[i];\n } else {\n abortEarly = true;\n }\n }\n }\n\n newContext.vars[loopElementVar] = loopValue;\n newContext.vars[loopIndexVar] = i;\n\n let shouldContinue = true;\n\n A.whilst(\n function test() {\n if (abortEarly) {\n return false;\n }\n if (opts.whileTrue) {\n return shouldContinue;\n }\n if (overValues !== null) {\n return i !== overValues.length;\n } else {\n return i < to || to === -1;\n }\n },\n function repeated(cb) {\n let zero = function(cb2) {\n return cb2(null, newContext);\n };\n let steps2 = L.flatten([zero, steps]);\n\n A.waterfall(steps2, function(err, context2) {\n if (err) {\n return cb(err, context2);\n }\n i++;\n newContext = context2;\n\n newContext.vars[loopIndexVar]++;\n if (overValues !== null) {\n newContext.vars[loopElementVar] = overValues[i];\n }\n\n if (opts.whileTrue) {\n opts.whileTrue(context2, function done(b) {\n shouldContinue = b;\n return cb(err, context2);\n });\n } else {\n return cb(err, context2);\n }\n });\n },\n function(err, finalContext) {\n if (typeof finalContext === 'undefined') {\n // this happens if test() returns false immediately, e.g. with\n // nested loops where one of the inner loops goes over an\n // empty array\n return callback(err, newContext);\n }\n return callback(err, finalContext);\n });\n };\n}", "function range(start, stop, step){\n utils.assertType(start, 'unit', 'start');\n utils.assertType(stop, 'unit', 'stop');\n if (step) {\n utils.assertType(step, 'unit', 'step');\n if (0 == step.val) {\n throw new Error('ArgumentError: \"step\" argument must not be zero');\n }\n } else {\n step = new nodes.Unit(1);\n }\n var list = new nodes.Expression;\n for (var i = start.val; i <= stop.val; i += step.val) {\n list.push(new nodes.Unit(i, start.type));\n }\n return list;\n}", "def drange(start, stop, step=1.0, include_stop=False):\n \"\"\"\n Generate between 2 numbers w/ optional step, optionally include upper bound\n \"\"\"\n if step == 0:\n step = 0.01\n r = start\n\n if include_stop:\n while r <= stop:\n yield r\n r += step\n r = round(r, 10)\n else:\n while r < stop:\n yield r\n r += step\n r = round(r, 10)", "private CompiledForeachRangeArgs calculateRangeArgs(ForNode forNode, Scope scope) {\n RangeArgs rangeArgs = RangeArgs.createFromNode(forNode).get();\n ForNonemptyNode nonEmptyNode = (ForNonemptyNode) forNode.getChild(0);\n ImmutableList.Builder<Statement> initStatements = ImmutableList.builder();\n Expression startExpression =\n computeRangeValue(\n SyntheticVarName.foreachLoopRangeStart(nonEmptyNode),\n rangeArgs.start(),\n 0,\n scope,\n initStatements);\n Expression stepExpression =\n computeRangeValue(\n SyntheticVarName.foreachLoopRangeStep(nonEmptyNode),\n rangeArgs.increment(),\n 1,\n scope,\n initStatements);\n Expression endExpression =\n computeRangeValue(\n SyntheticVarName.foreachLoopRangeEnd(nonEmptyNode),\n Optional.of(rangeArgs.limit()),\n Integer.MAX_VALUE,\n scope,\n initStatements);\n\n return new AutoValue_SoyNodeCompiler_CompiledForeachRangeArgs(\n startExpression, endExpression, stepExpression, initStatements.build());\n }", "function range(start, stop, step) {\n\n if (stop === undefined) return (0, _range2._range)(0, start, 1);\n\n if (step === undefined) return (0, _range2._range)(start, stop, 1);\n\n return (0, _range2._range)(start, stop, step);\n}", "public SDVariable range(String name, double from, double to, double step, DataType dataType) {\n SDVariable ret = f().range(from, to, step, dataType);\n return updateVariableNameAndReference(ret, name);\n }", "function RangeIterator(start, end, step) {\n this.start = start;\n this.end = (end !== undefined) ? end : Math.pow(2,32); // or should this be Number.MAX_VALUE\n this.step = (step !== undefined)\n ? step\n : (this.end >= this.start)\n ? 1\n : -1;\n this.reset();\n}", "function calculateStep(min, max, tickCount, allowDecimals, correctionFactor = 0) {\n // dirty hack (for recharts' test)\n if (!Number.isFinite((max - min) / (tickCount - 1))) {\n return {\n step: new Decimal(0),\n tickMin: new Decimal(0),\n tickMax: new Decimal(0),\n };\n }\n\n // The step which is easy to understand between two ticks\n const step = getFormatStep(\n new Decimal(max).sub(min).div(tickCount - 1),\n allowDecimals,\n correctionFactor,\n );\n\n // A medial value of ticks\n let middle;\n\n // When 0 is inside the interval, 0 should be a tick\n if (min <= 0 && max >= 0) {\n middle = new Decimal(0);\n } else {\n // calculate the middle value\n middle = new Decimal(min).add(max).div(2);\n // minus modulo value\n middle = middle.sub(new Decimal(middle).mod(step));\n }\n\n let belowCount = Math.ceil(middle.sub(min).div(step).toNumber());\n let upCount = Math.ceil(new Decimal(max).sub(middle).div(step)\n .toNumber());\n const scaleCount = belowCount + upCount + 1;\n\n if (scaleCount > tickCount) {\n // When more ticks need to cover the interval, step should be bigger.\n return calculateStep(min, max, tickCount, allowDecimals, correctionFactor + 1);\n } if (scaleCount < tickCount) {\n // When less ticks can cover the interval, we should add some additional ticks\n upCount = max > 0 ? upCount + (tickCount - scaleCount) : upCount;\n belowCount = max > 0 ? belowCount : belowCount + (tickCount - scaleCount);\n }\n\n return {\n step,\n tickMin: middle.sub(new Decimal(belowCount).mul(step)),\n tickMax: middle.add(new Decimal(upCount).mul(step)),\n };\n}", "function range (start, limit, step) {\n start = _forceToNumber(start);\n limit = _forceToNumber(limit);\n step = arguments.length === 3 ? _forceToNumber(step) : 1;\n\n if (step === 0) {\n return limit === start ? [] : [start];\n }\n\n var len = Math.max(Math.ceil((limit - start) / step), 0);\n var result = Array(len);\n\n for (var i = 0, last = start; i < len; i++) {\n result[i] = last;\n last += step;\n }\n\n return result;\n}", "function range(start, stop, step) {\n if (start != null && typeof start != 'number') {\n throw new Error('start must be a number or null');\n }\n if (stop != null && typeof stop != 'number') {\n throw new Error('stop must be a number or null');\n }\n if (step != null && typeof step != 'number') {\n throw new Error('step must be a number or null');\n }\n if (stop == null) {\n stop = start || 0;\n start = 0;\n }\n if (step == null) {\n step = stop > start ? 1 : -1;\n }\n var toReturn = [];\n var increasing = start < stop; //← here’s the change\n for (; increasing ? start < stop : start > stop; start += step) {\n toReturn.push(start);\n }\n return toReturn;\n}" ]
[ 0.7309202551841736, 0.7157431244850159, 0.7096465229988098, 0.6999817490577698, 0.6988758444786072, 0.6909863352775574, 0.6883873343467712, 0.6878114342689514, 0.6876500248908997, 0.6873431205749512, 0.6870449781417847, 0.6841531991958618 ]
Create a new "for loop" line as a replacement for the original code.
def create_for(line, search_result): '''Create a new "for loop" line as a replacement for the original code. ''' try: return line.format(search_result.group("indented_for"), search_result.group("var"), search_result.group("start"), search_result.group("stop"), search_result.group("cond")) except IndexError: return line.format(search_result.group("indented_for"), search_result.group("var"), search_result.group("start"), search_result.group("stop"))
[ "private void forLoop(Env env, Scope scope, Writer writer) {\r\n\t\tCtrl ctrl = scope.getCtrl();\r\n\t\tObject outer = scope.get(\"for\");\r\n\t\tForLoopStatus forLoopStatus = new ForLoopStatus(outer);\r\n\t\tscope.setLocal(\"for\", forLoopStatus);\r\n\t\t\r\n\t\tExpr init = forCtrl.getInit();\r\n\t\tExpr cond = forCtrl.getCond();\r\n\t\tExpr update = forCtrl.getUpdate();\r\n\t\t\r\n\t\tctrl.setLocalAssignment();\r\n\t\tfor (init.eval(scope); cond == null || Logic.isTrue(cond.eval(scope)); update.eval(scope)) {\r\n\t\t\tctrl.setWisdomAssignment();\r\n\t\t\tstat.exec(env, scope, writer);\r\n\t\t\tctrl.setLocalAssignment();\r\n\t\t\tforLoopStatus.nextState();\r\n\t\t\t\r\n\t\t\tif (ctrl.isJump()) {\r\n\t\t\t\tif (ctrl.isBreak()) {\r\n\t\t\t\t\tctrl.setJumpNone();\r\n\t\t\t\t\tbreak ;\r\n\t\t\t\t} else if (ctrl.isContinue()) {\r\n\t\t\t\t\tctrl.setJumpNone();\r\n\t\t\t\t\tcontinue ;\r\n\t\t\t\t} else {\r\n\t\t\t\t\tctrl.setWisdomAssignment();\r\n\t\t\t\t\treturn ;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tctrl.setWisdomAssignment();\r\n\t\tif (_else != null && forLoopStatus.getIndex() == 0) {\r\n\t\t\t_else.exec(env, scope, writer);\r\n\t\t}\r\n\t}", "def loop(self):\n \"\"\"\n loop : 'for' init; ctrl; inc block\n \"\"\"\n self.eat(TokenTypes.FOR_LOOP)\n init = NoOp()\n if self.cur_token.type != TokenTypes.SEMI_COLON:\n init = self.assign_statement()\n else:\n self.eat(TokenTypes.SEMI_COLON)\n\n ctrl = NoOp()\n if self.cur_token.type != TokenTypes.SEMI_COLON:\n ctrl = self.expression()\n self.eat(TokenTypes.SEMI_COLON)\n\n inc = NoOp()\n if self.cur_token.type != TokenTypes.LBRACE:\n inc = self.assign_statement()\n\n block = self.block()\n return ForLoop(init, ctrl, inc, block)", "private Statement handleForeachLoop(\n ForNonemptyNode node,\n Expression limit,\n Function<Expression, Expression> getDataItemFunction) {\n // Build some local variable names.\n String varName = node.getVarName();\n String varPrefix = varName + node.getForNodeId();\n\n // TODO(b/32224284): A more consistent pattern for local variable management.\n String loopIndexName = varPrefix + \"Index\";\n String dataName = varPrefix + \"Data\";\n\n Expression loopIndex = id(loopIndexName);\n VariableDeclaration data =\n VariableDeclaration.builder(dataName).setRhs(getDataItemFunction.apply(loopIndex)).build();\n\n // Populate the local var translations with the translations from this node.\n templateTranslationContext\n .soyToJsVariableMappings()\n .put(varName, id(dataName))\n .put(varName + \"__isFirst\", loopIndex.doubleEquals(number(0)))\n .put(varName + \"__isLast\", loopIndex.doubleEquals(limit.minus(number(1))))\n .put(varName + \"__index\", loopIndex);\n\n // Generate the loop body.\n Statement foreachBody = Statement.of(data, visitChildrenReturningCodeChunk(node));\n\n // Create the entire for block.\n return forLoop(loopIndexName, limit, foreachBody);\n }", "def mangle_mako_loop(node, printer):\n \"\"\"converts a for loop into a context manager wrapped around a for loop\n when access to the `loop` variable has been detected in the for loop body\n \"\"\"\n loop_variable = LoopVariable()\n node.accept_visitor(loop_variable)\n if loop_variable.detected:\n node.nodes[-1].has_loop_context = True\n match = _FOR_LOOP.match(node.text)\n if match:\n printer.writelines(\n 'loop = __M_loop._enter(%s)' % match.group(2),\n 'try:'\n #'with __M_loop(%s) as loop:' % match.group(2)\n )\n text = 'for %s in loop:' % match.group(1)\n else:\n raise SyntaxError(\"Couldn't apply loop context: %s\" % node.text)\n else:\n text = node.text\n return text", "protected XExpression _generate(XForLoopExpression forLoop, IAppendable it, IExtraLanguageGeneratorContext context) {\n\t\tit.append(\"for \"); //$NON-NLS-1$\n\t\tfinal String varName = it.declareUniqueNameVariable(forLoop.getDeclaredParam(), forLoop.getDeclaredParam().getSimpleName());\n\t\tit.append(varName);\n\t\tit.append(\" in \"); //$NON-NLS-1$\n\t\tgenerate(forLoop.getForExpression(), it, context);\n\t\tit.append(\":\"); //$NON-NLS-1$\n\t\tit.increaseIndentation().newLine();\n\t\tfinal XExpression last = generate(forLoop.getEachExpression(), it, context);\n\t\tit.decreaseIndentation();\n\t\treturn last;\n\t}", "def parse_for(self):\n \"\"\"Parse a for loop.\"\"\"\n lineno = self.stream.expect('name:for').lineno\n target = self.parse_assign_target(extra_end_rules=('name:in',))\n self.stream.expect('name:in')\n iter = self.parse_tuple(with_condexpr=False,\n extra_end_rules=('name:recursive',))\n test = None\n if self.stream.skip_if('name:if'):\n test = self.parse_expression()\n recursive = self.stream.skip_if('name:recursive')\n body = self.parse_statements(('name:endfor', 'name:else'))\n if next(self.stream).value == 'endfor':\n else_ = []\n else:\n else_ = self.parse_statements(('name:endfor',), drop_needle=True)\n return nodes.For(target, iter, body, else_, test,\n recursive, lineno=lineno)", "def make_for_loop(loop_body_instrs, else_body_instrs, context):\n \"\"\"\n Make an ast.For node.\n \"\"\"\n # Instructions from start until GET_ITER are the builders for the iterator\n # expression.\n iterator_expr = make_expr(\n popwhile(not_a(instrs.GET_ITER), loop_body_instrs, side='left')\n )\n\n # Next is the GET_ITER instruction, which we don't need.\n loop_body_instrs.popleft()\n\n # Next is FOR_ITER, which is the jump target for Continue nodes.\n top_of_loop = loop_body_instrs.popleft()\n\n # This can be a STORE_* or an UNPACK_SEQUENCE followed by some number of\n # stores.\n target = make_assign_target(\n loop_body_instrs.popleft(),\n loop_body_instrs,\n stack=[],\n )\n\n body, orelse_body = make_loop_body_and_orelse(\n top_of_loop, loop_body_instrs, else_body_instrs, context\n )\n\n return ast.For(\n target=target,\n iter=iterator_expr,\n body=body,\n orelse=orelse_body,\n )", "def _compile(cls, lines):\n '''Return both variable names used in the #for loop in the\n current line.'''\n m = cls.RE_FOR.match(lines.current)\n if m is None:\n raise DefineBlockError(\n 'Incorrect block definition at line {}, {}\\nShould be '\n 'something like: #for @item in @items:'\n .format(lines.pos, lines.current))\n return m.group(1), m.group(2).replace('.', '-')", "def _at_for(self, calculator, rule, scope, block):\n \"\"\"\n Implements @for\n \"\"\"\n var, _, name = block.argument.partition(' from ')\n frm, _, through = name.partition(' through ')\n if through:\n inclusive = True\n else:\n inclusive = False\n frm, _, through = frm.partition(' to ')\n frm = calculator.calculate(frm)\n through = calculator.calculate(through)\n try:\n frm = int(float(frm))\n through = int(float(through))\n except ValueError:\n return\n\n if frm > through:\n # DEVIATION: allow reversed '@for .. from .. through' (same as enumerate() and range())\n frm, through = through, frm\n rev = reversed\n else:\n rev = lambda x: x\n var = var.strip()\n var = calculator.do_glob_math(var)\n var = normalize_var(var)\n\n inner_rule = rule.copy()\n inner_rule.unparsed_contents = block.unparsed_contents\n if not self.should_scope_loop_in_rule(inner_rule):\n # DEVIATION: Allow not creating a new namespace\n inner_rule.namespace = rule.namespace\n\n if inclusive:\n through += 1\n for i in rev(range(frm, through)):\n inner_rule.namespace.set_variable(var, Number(i))\n self.manage_children(inner_rule, scope)", "protected XExpression _generate(XBasicForLoopExpression forLoop, IAppendable it, IExtraLanguageGeneratorContext context) {\n\t\tfor (final XExpression expr : forLoop.getInitExpressions()) {\n\t\t\tgenerate(expr, it, context);\n\t\t\tit.newLine();\n\t\t}\n\t\tit.append(\"while \"); //$NON-NLS-1$\n\t\tgenerate(forLoop.getExpression(), it, context);\n\t\tit.append(\":\"); //$NON-NLS-1$\n\t\tit.increaseIndentation().newLine();\n\t\tfinal XExpression last = generate(forLoop.getEachExpression(), it, context);\n\t\tfor (final XExpression expr : forLoop.getUpdateExpressions()) {\n\t\t\tit.newLine();\n\t\t\tgenerate(expr, it, context);\n\t\t}\n\t\tit.decreaseIndentation();\n\t\treturn last;\n\t}", "def do_for(parser, token):\n '''\n {% for a, b, c in iterable %}\n\n {% endfor %}\n\n We create the structure:\n\n with ContextWrapper(context) as context:\n for a, b, c in iterable:\n context.update(a=a, b=b, c=c)\n ...\n\n If there is a {% empty %} clause, we create:\n\n if iterable:\n { above code }\n else:\n { empty clause }\n '''\n code = ast.parse('for %s: pass' % token, mode='exec')\n\n # Grab the ast.For node\n loop = code.body[0]\n # Wrap its source iterable\n loop.iter = visitor.visit(loop.iter)\n\n # Get the body of the loop\n body, end = parser.parse_nodes_until('endfor', 'empty')\n\n # Build a list of target variable names\n if isinstance(loop.target, ast.Tuple):\n targets = [elt.id for elt in loop.target.elts]\n else:\n targets = [loop.target.id]\n\n kwargs = [\n ast.keyword(arg=elt, value=_a.Name(elt))\n for elt in targets\n ]\n\n # Insert our update call at the start of the loop body\n body.insert(0, ast.Expr(value=_a.Call(\n _a.Attribute(_a.Name('context'), 'update'),\n keywords=kwargs\n )))\n loop.body = body\n\n node = _create_with_scope([loop], [])\n\n if end == 'empty':\n # Now we wrap our for block in:\n # if loop.iter:\n # else:\n empty, _ = parser.parse_nodes_until('endfor')\n\n node = ast.If(\n test=loop.iter,\n body=[node],\n orelse=empty\n )\n\n return node", "public static Statement forLoop(String localVar, Expression limit, Statement body) {\n return For.create(localVar, Expression.number(0), limit, Expression.number(1), body);\n }" ]
[ 0.7122229933738708, 0.7033212780952454, 0.6984545588493347, 0.6954953074455261, 0.691508948802948, 0.6905860304832458, 0.6879541873931885, 0.6808813214302063, 0.6780126690864563, 0.6776462197303772, 0.6738662123680115, 0.672441840171814 ]
Function __setitem__ Set a parameter of a foreman object as a dict @param key: The key to modify @param attribute: The data @return RETURN: The API result
def setOverrideValue(self, attributes, hostName): """ Function __setitem__ Set a parameter of a foreman object as a dict @param key: The key to modify @param attribute: The data @return RETURN: The API result """ self['override'] = True attrType = type(attributes) if attrType is dict: self['parameter_type'] = 'hash' elif attrType is list: self['parameter_type'] = 'array' else: self['parameter_type'] = 'string' orv = self.getOverrideValueForHost(hostName) if orv: orv['value'] = attributes return True else: return self.api.create('{}/{}/{}'.format(self.objName, self.key, 'override_values'), {"override_value": {"match": "fqdn={}".format(hostName), "value": attributes}})
[ "def set(self, key, value, **kwargs):\n \"\"\"Create or update the object.\n\n Args:\n key (str): The key of the object to create/update\n value (str): The value to set for the object\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabSetError: If an error occured\n\n Returns:\n obj: The created/updated attribute\n \"\"\"\n path = '%s/%s' % (self.path, key.replace('/', '%2F'))\n data = {'value': value}\n server_data = self.gitlab.http_put(path, post_data=data, **kwargs)\n return self._obj_cls(self, server_data)", "def _set_k8s_attribute(obj, attribute, value):\n \"\"\"\n Set a specific value on a kubernetes object's attribute\n\n obj\n an object from Kubernetes Python API client\n attribute\n Should be a Kubernetes API style attribute (with camelCase)\n value\n Can be anything (string, list, dict, k8s objects) that can be\n accepted by the k8s python client\n \"\"\"\n current_value = None\n attribute_name = None\n # All k8s python client objects have an 'attribute_map' property\n # which has as keys python style attribute names (api_client)\n # and as values the kubernetes JSON API style attribute names\n # (apiClient). We want to allow users to use the JSON API style attribute\n # names only.\n for python_attribute, json_attribute in obj.attribute_map.items():\n if json_attribute == attribute:\n attribute_name = python_attribute\n break\n else:\n raise ValueError('Attribute must be one of {}'.format(obj.attribute_map.values()))\n\n if hasattr(obj, attribute_name):\n current_value = getattr(obj, attribute_name)\n\n if current_value is not None:\n # This will ensure that current_value is something JSONable,\n # so a dict, list, or scalar\n current_value = SERIALIZATION_API_CLIENT.sanitize_for_serialization(\n current_value\n )\n\n if isinstance(current_value, dict):\n # Deep merge our dictionaries!\n setattr(obj, attribute_name, merge_dictionaries(current_value, value))\n elif isinstance(current_value, list):\n # Just append lists\n setattr(obj, attribute_name, current_value + value)\n else:\n # Replace everything else\n setattr(obj, attribute_name, value)", "def _set_property_dict_item(obj, prop, key, value):\n ''' Sets the dict item key of the attr from obj.\n\n Basicaly it does getattr(obj, prop)[key] = value.\n\n\n For the disk device we added some checks to make\n device changes on the CLI saver.\n '''\n attr = getattr(obj, prop)\n if prop == 'devices':\n device_type = value['type']\n\n if device_type == 'disk':\n\n if 'path' not in value:\n raise SaltInvocationError(\n \"path must be given as parameter\"\n )\n\n if value['path'] != '/' and 'source' not in value:\n raise SaltInvocationError(\n \"source must be given as parameter\"\n )\n\n for k in value.keys():\n if k.startswith('__'):\n del value[k]\n\n attr[key] = value\n\n else: # config\n attr[key] = six.text_type(value)\n\n pylxd_save_object(obj)\n\n return _pylxd_model_to_dict(obj)", "def attribute(self, attribute_id, action='GET', params=None):\n \"\"\"\n Gets the attribute from a Group/Indicator or Victim\n\n\n Args:\n action:\n params:\n attribute_id:\n\n Returns: attribute json\n\n \"\"\"\n if params is None:\n params = {}\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n if action == 'GET':\n return self.tc_requests.get_attribute(\n self.api_type,\n self.api_sub_type,\n self.unique_id,\n attribute_id,\n owner=self.owner,\n params=params,\n )\n\n if action == 'DELETE':\n return self.tc_requests.delete_attribute(\n self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner\n )\n\n self._tcex.handle_error(925, ['action', 'attribute', 'action', 'action', action])\n return None", "def set(self, key, value, **kwargs):\n \"\"\"\n Set the value of a Parameter in the ParameterSet.\n\n If :func:`get` would retrieve a Parameter, this will set the\n value of that parameter.\n\n Or you can provide 'value@...' or 'default_unit@...', etc\n to specify what attribute to set.\n\n :parameter str key: the twig (called key here to be analagous\n to a normal dict)\n :parameter value: value to set\n :parameter **kwargs: other filter parameters (must result in\n returning a single :class:`Parameter`)\n :return: the value of the :class:`Parameter` after setting the\n new value (including converting units if applicable)\n \"\"\"\n twig = key\n\n method = None\n twigsplit = re.findall(r\"[\\w']+\", twig)\n if twigsplit[0] == 'value':\n twig = '@'.join(twigsplit[1:])\n method = 'set_value'\n elif twigsplit[0] == 'quantity':\n twig = '@'.join(twigsplit[1:])\n method = 'set_quantity'\n elif twigsplit[0] in ['unit', 'default_unit']:\n twig = '@'.join(twigsplit[1:])\n method = 'set_default_unit'\n elif twigsplit[0] in ['timederiv']:\n twig = '@'.join(twigsplit[1:])\n method = 'set_timederiv'\n elif twigsplit[0] in ['description']:\n raise KeyError(\"cannot set {} of {}\".format(twigsplit[0], '@'.join(twigsplit[1:])))\n\n if self._bundle is not None and self._bundle.get_setting('dict_set_all').get_value() and len(self.filter(twig=twig, **kwargs)) > 1:\n # then we need to loop through all the returned parameters and call set on them\n for param in self.filter(twig=twig, **kwargs).to_list():\n self.set('{}@{}'.format(method, param.twig) if method is not None else param.twig, value)\n else:\n\n if method is None:\n return self.set_value(twig=twig, value=value, **kwargs)\n else:\n param = self.get_parameter(twig=twig, **kwargs)\n\n return getattr(param, method)(value)", "def set(self, key, value, **kwargs):\n '''\n Store a new value at the given key\n kwargs can hold `cas` and `flags` params\n '''\n return requests.put(\n '{}/{}/kv/{}'.format(\n self.master, pyconsul.__consul_api_version__, key),\n data=value,\n params=kwargs\n )", "def set_(key, value, service=None, profile=None): # pylint: disable=W0613\n '''\n Set a key/value pair in the REST interface\n '''\n return query(key, value, service, profile)", "def attr(self, *args):\n '''Add the specific attribute to the attribute dictionary\n with key ``name`` and value ``value`` and return ``self``.'''\n attr = self._attr\n if not args:\n return attr or {}\n result, adding = self._attrdata('attr', *args)\n if adding:\n for key, value in result.items():\n if DATARE.match(key):\n self.data(key[5:], value)\n else:\n if attr is None:\n self._extra['attr'] = attr = {}\n attr[key] = value\n result = self\n return result", "def get_item(self, table_name, key, attributes_to_get=None,\n consistent_read=False, object_hook=None):\n \"\"\"\n Return a set of attributes for an item that matches\n the supplied key.\n\n :type table_name: str\n :param table_name: The name of the table containing the item.\n\n :type key: dict\n :param key: A Python version of the Key data structure\n defined by DynamoDB.\n\n :type attributes_to_get: list\n :param attributes_to_get: A list of attribute names.\n If supplied, only the specified attribute names will\n be returned. Otherwise, all attributes will be returned.\n\n :type consistent_read: bool\n :param consistent_read: If True, a consistent read\n request is issued. Otherwise, an eventually consistent\n request is issued.\n \"\"\"\n data = {'TableName': table_name,\n 'Key': key}\n if attributes_to_get:\n data['AttributesToGet'] = attributes_to_get\n if consistent_read:\n data['ConsistentRead'] = True\n json_input = json.dumps(data)\n response = self.make_request('GetItem', json_input,\n object_hook=object_hook)\n if not response.has_key('Item'):\n raise dynamodb_exceptions.DynamoDBKeyNotFoundError(\n \"Key does not exist.\"\n )\n return response", "def _set_attrib(name, key, value, param, root=None, validate=True):\n '''\n Set a parameter in /etc/shadow\n '''\n pre_info = info(name, root=root)\n\n # If the user is not present or the attribute is already present,\n # we return early\n if not pre_info['name']:\n return False\n\n if value == pre_info[key]:\n return True\n\n cmd = ['chage']\n\n if root is not None:\n cmd.extend(('-R', root))\n\n cmd.extend((param, value, name))\n\n ret = not __salt__['cmd.run'](cmd, python_shell=False)\n if validate:\n ret = info(name, root=root).get(key) == value\n return ret", "def _direct_set(self, key, value):\n '''\n _direct_set - INTERNAL USE ONLY!!!!\n\n Directly sets a value on the underlying dict, without running through the setitem logic\n\n '''\n dict.__setitem__(self, key, value)\n return value", "def set_attribute(self, id, value, version=1):\n \"\"\"\n Set attribute to a specific value\n\n :param id: id of the attribute\n :param value: value of the attribute\n :param version: version of the attribute (default = 1)\n \"\"\"\n attributes = self._get_attributes(cache=True)\n formatted_id = '{0}'.format(id)\n attributes['attributes_values'][formatted_id] = value\n response = self.requester.patch(\n '/{endpoint}/custom-attributes-values/{id}',\n endpoint=self.endpoint, id=self.id,\n payload={\n 'attributes_values': attributes['attributes_values'],\n 'version': version\n }\n )\n cache_key = self.requester.get_full_url(\n '/{endpoint}/custom-attributes-values/{id}',\n endpoint=self.endpoint, id=self.id\n )\n self.requester.cache.put(cache_key, response)\n return response.json()" ]
[ 0.6773245930671692, 0.6657776832580566, 0.6655950546264648, 0.6570065021514893, 0.6535754799842834, 0.6526169180870056, 0.6494777202606201, 0.646148145198822, 0.6444801092147827, 0.642432689666748, 0.6374896168708801, 0.6358796954154968 ]
Spits out the timedelta in days.
def get_interval_timedelta(self): """ Spits out the timedelta in days. """ now_datetime = timezone.now() current_month_days = monthrange(now_datetime.year, now_datetime.month)[1] # Two weeks if self.interval == reminders_choices.INTERVAL_2_WEEKS: interval_timedelta = datetime.timedelta(days=14) # One month elif self.interval == reminders_choices.INTERVAL_ONE_MONTH: interval_timedelta = datetime.timedelta(days=current_month_days) # Three months elif self.interval == reminders_choices.INTERVAL_THREE_MONTHS: three_months = now_datetime + relativedelta(months=+3) interval_timedelta = three_months - now_datetime # Six months elif self.interval == reminders_choices.INTERVAL_SIX_MONTHS: six_months = now_datetime + relativedelta(months=+6) interval_timedelta = six_months - now_datetime # One year elif self.interval == reminders_choices.INTERVAL_ONE_YEAR: one_year = now_datetime + relativedelta(years=+1) interval_timedelta = one_year - now_datetime return interval_timedelta
[ "def timedelta_days(days: int) -> timedelta64:\n \"\"\"\n Convert a duration in days to a NumPy ``timedelta64`` object.\n \"\"\"\n int_days = int(days)\n if int_days != days:\n raise ValueError(\"Fractional days passed to timedelta_days: \"\n \"{!r}\".format(days))\n try:\n # Do not pass e.g. 27.0; that will raise a ValueError.\n # Must be an actual int:\n return timedelta64(int_days, 'D')\n except ValueError as e:\n raise ValueError(\"Failure in timedelta_days; value was {!r}; original \"\n \"error was: {}\".format(days, e))", "def days(self,local=False):\n \"\"\" Returns the number of days of difference\n \"\"\"\n delta = self.delta(local)\n return delta.days", "def delta(self,local=False):\n \"\"\" Returns the number of days of difference\n \"\"\"\n (s,e) = self.get(local)\n return e-s", "def timedelta_seconds(timedelta):\r\n \"\"\"Returns the total timedelta duration in seconds.\"\"\"\r\n return (timedelta.total_seconds() if hasattr(timedelta, \"total_seconds\")\r\n else timedelta.days * 24 * 3600 + timedelta.seconds +\r\n timedelta.microseconds / 1000000.)", "def format_timedelta(t, timedelta_format=None):\n \"\"\"Cast given object to a Timestamp and return a nicely formatted string\"\"\"\n timedelta_str = str(pd.Timedelta(t))\n try:\n days_str, time_str = timedelta_str.split(' days ')\n except ValueError:\n # catch NaT and others that don't split nicely\n return timedelta_str\n else:\n if timedelta_format == 'date':\n return days_str + ' days'\n elif timedelta_format == 'time':\n return time_str\n else:\n return timedelta_str", "def _ndays(self, start_date, ndays):\n \"\"\"\n Compute an end date given a start date and a number of days.\n \"\"\"\n if not getattr(self.args, 'start-date') and not self.config.get('start-date', None):\n raise Exception('start-date must be provided when ndays is used.')\n\n d = date(*map(int, start_date.split('-')))\n d += timedelta(days=ndays)\n\n return d.strftime('%Y-%m-%d')", "def timedelta_to_days(td):\n \"\"\"\n Convert a `datetime.timedelta` object to a total number of days.\n\n Parameters\n ----------\n td : `datetime.timedelta` instance\n\n Returns\n -------\n days : float\n Total number of days in the `datetime.timedelta` object.\n\n Examples\n --------\n >>> td = datetime.timedelta(4.5)\n >>> td\n datetime.timedelta(4, 43200)\n >>> timedelta_to_days(td)\n 4.5\n\n \"\"\"\n seconds_in_day = 24. * 3600.\n\n days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day\n\n return days", "def format_timedelta_ticks(x, pos, n_decimals):\n \"\"\"\n Convert seconds to 'D days HH:MM:SS.F'\n \"\"\"\n s, ns = divmod(x, 1e9)\n m, s = divmod(s, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n decimals = int(ns * 10**(n_decimals - 9))\n s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s))\n if n_decimals > 0:\n s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals)\n if d != 0:\n s = '{:d} days '.format(int(d)) + s\n return s", "def utime_delta(days=0, hours=0, minutes=0, seconds=0):\n \"\"\"Gets time delta in microseconds.\n\n Note: Do NOT use this function without keyword arguments.\n It will become much-much harder to add extra time ranges later if positional arguments are used.\n\n \"\"\"\n return (days * DAY) + (hours * HOUR) + (minutes * MINUTE) + (seconds * SECOND)", "static public String getFormattedDays(long dt)\n {\n StringBuffer ret = new StringBuffer();\n\n long days = dt/86400000L;\n long millis = dt-(days*86400000L);\n if(days > 0)\n {\n ret.append(Long.toString(days));\n ret.append(\" days \");\n }\n ret.append(getFormattedTime(millis, false));\n\n return ret.toString();\n }", "public void plusDays(long delta)\r\n {\r\n if (delta != 0)\r\n {\r\n long result = getDay() + delta;\r\n if (result >= 1 && result <= 28)\r\n {\r\n setDay((int) result);\r\n }\r\n else\r\n {\r\n ZonedDateTime zonedDateTime = ZonedDateTime.from(this);\r\n ZonedDateTime plusDays = zonedDateTime.plusDays(delta);\r\n set(plusDays);\r\n }\r\n }\r\n }", "def FromTimedelta(self, td):\n \"\"\"Convertd timedelta to Duration.\"\"\"\n self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY,\n td.microseconds * _NANOS_PER_MICROSECOND)" ]
[ 0.7762075066566467, 0.7667632102966309, 0.7661012411117554, 0.7383315563201904, 0.7279061675071716, 0.7184981107711792, 0.7174055576324463, 0.7161235213279724, 0.7108481526374817, 0.7064957618713379, 0.7033786773681641, 0.7020765542984009 ]
Execute a runner without blocking the event loop
async def awaitable_runner(runner: BaseRunner): """Execute a runner without blocking the event loop""" runner_thread = CapturingThread(target=runner.run) runner_thread.start() delay = 0.0 while not runner_thread.join(timeout=0): await asyncio.sleep(delay) delay = min(delay + 0.1, 1.0)
[ "def _run_runner(self):\n '''\n Actually execute specific runner\n :return:\n '''\n import salt.minion\n ret = {}\n low = {'fun': self.opts['fun']}\n try:\n # Allocate a jid\n async_pub = self._gen_async_pub()\n self.jid = async_pub['jid']\n\n fun_args = salt.utils.args.parse_input(\n self.opts['arg'],\n no_parse=self.opts.get('no_parse', []))\n\n verify_fun(self.functions, low['fun'])\n args, kwargs = salt.minion.load_args_and_kwargs(\n self.functions[low['fun']],\n fun_args)\n low['arg'] = args\n low['kwarg'] = kwargs\n\n if self.opts.get('eauth'):\n if 'token' in self.opts:\n try:\n with salt.utils.files.fopen(os.path.join(self.opts['cachedir'], '.root_key'), 'r') as fp_:\n low['key'] = salt.utils.stringutils.to_unicode(fp_.readline())\n except IOError:\n low['token'] = self.opts['token']\n\n # If using eauth and a token hasn't already been loaded into\n # low, prompt the user to enter auth credentials\n if 'token' not in low and 'key' not in low and self.opts['eauth']:\n # This is expensive. Don't do it unless we need to.\n import salt.auth\n resolver = salt.auth.Resolver(self.opts)\n res = resolver.cli(self.opts['eauth'])\n if self.opts['mktoken'] and res:\n tok = resolver.token_cli(\n self.opts['eauth'],\n res\n )\n if tok:\n low['token'] = tok.get('token', '')\n if not res:\n log.error('Authentication failed')\n return ret\n low.update(res)\n low['eauth'] = self.opts['eauth']\n else:\n user = salt.utils.user.get_specific_user()\n\n if low['fun'] in ['state.orchestrate', 'state.orch', 'state.sls']:\n low['kwarg']['orchestration_jid'] = async_pub['jid']\n\n # Run the runner!\n if self.opts.get('async', False):\n if self.opts.get('eauth'):\n async_pub = self.cmd_async(low)\n else:\n async_pub = self.asynchronous(self.opts['fun'],\n low,\n user=user,\n pub=async_pub)\n\n # by default: info will be not enough to be printed out !\n log.warning(\n 'Running in asynchronous mode. Results of this execution may '\n 'be collected by attaching to the master event bus or '\n 'by examing the master job cache, if configured. '\n 'This execution is running under tag %s', async_pub['tag']\n )\n return async_pub['jid'] # return the jid\n\n # otherwise run it in the main process\n if self.opts.get('eauth'):\n ret = self.cmd_sync(low)\n if isinstance(ret, dict) and set(ret) == {'data', 'outputter'}:\n outputter = ret['outputter']\n ret = ret['data']\n else:\n outputter = None\n display_output(ret, outputter, self.opts)\n else:\n ret = self._proc_function(self.opts['fun'],\n low,\n user,\n async_pub['tag'],\n async_pub['jid'],\n daemonize=False)\n except salt.exceptions.SaltException as exc:\n evt = salt.utils.event.get_event('master', opts=self.opts)\n evt.fire_event({'success': False,\n 'return': '{0}'.format(exc),\n 'retcode': 254,\n 'fun': self.opts['fun'],\n 'fun_args': fun_args,\n 'jid': self.jid},\n tag='salt/run/{0}/ret'.format(self.jid))\n # Attempt to grab documentation\n if 'fun' in low:\n ret = self.get_docs('{0}*'.format(low['fun']))\n else:\n ret = None\n\n # If we didn't get docs returned then\n # return the `not availble` message.\n if not ret:\n ret = '{0}'.format(exc)\n if not self.opts.get('quiet', False):\n display_output(ret, 'nested', self.opts)\n else:\n # If we don't have any values in ret by now, that's a problem.\n # Otherwise, we shouldn't be overwriting the retcode.\n if not ret:\n ret = {\n 'retcode': salt.defaults.exitcodes.EX_SOFTWARE,\n }\n log.debug('Runner return: %s', ret)\n\n return ret", "def runner(self, fun, **kwargs):\n '''\n Wrap RunnerClient for executing :ref:`runner modules <all-salt.runners>`\n '''\n return self.pool.fire_async(self.client_cache['runner'].low, args=(fun, kwargs))", "def runner_async(self, fun, **kwargs):\n '''\n Run `runner modules <all-salt.runners>` asynchronously\n\n Wraps :py:meth:`salt.runner.RunnerClient.cmd_async`.\n\n Note that runner functions must be called using keyword arguments.\n Positional arguments are not supported.\n\n :return: event data and a job ID for the executed function.\n '''\n kwargs['fun'] = fun\n runner = salt.runner.RunnerClient(self.opts)\n return runner.cmd_async(kwargs)", "def _run_once(runner_list, extra_tick, use_poll):\n \"\"\"\n :return True - success; False - extra_tick failure; runner object - the runner who tick failure\n \"\"\"\n if len(runner_list) > 0:\n asyncore.loop(0, use_poll, None, 1)\n for runner in runner_list:\n if not runner.tick():\n return runner\n if extra_tick is not None:\n code = extra_tick()\n if code is False:\n return False\n return True", "def run(self):\n '''\n Execute the runner sequence\n '''\n # Print documentation only\n if self.opts.get('doc', False):\n self.print_docs()\n else:\n return self._run_runner()", "def runner(name, **kwargs):\n '''\n Execute a runner module on the master\n\n .. versionadded:: 2014.7.0\n\n name\n The name of the function to run\n\n kwargs\n Any keyword arguments to pass to the runner function\n\n asynchronous\n Run the salt command but don't wait for a reply.\n\n .. versionadded:: neon\n\n\n .. code-block:: yaml\n\n run-manage-up:\n salt.runner:\n - name: manage.up\n '''\n try:\n jid = __orchestration_jid__\n except NameError:\n log.debug(\n 'Unable to fire args event due to missing __orchestration_jid__'\n )\n jid = None\n\n if __opts__.get('test', False):\n ret = {\n 'name': name,\n 'result': None,\n 'changes': {},\n 'comment': \"Runner function '{0}' would be executed.\".format(name)\n }\n return ret\n\n out = __salt__['saltutil.runner'](name,\n __orchestration_jid__=jid,\n __env__=__env__,\n full_return=True,\n **kwargs)\n\n if kwargs.get('asynchronous'):\n out['return'] = out.copy()\n out['success'] = 'jid' in out and 'tag' in out\n\n runner_return = out.get('return')\n if isinstance(runner_return, dict) and 'Error' in runner_return:\n out['success'] = False\n\n success = out.get('success', True)\n ret = {'name': name,\n 'changes': {'return': runner_return},\n 'result': success}\n ret['comment'] = \"Runner function '{0}' {1}.\".format(\n name,\n 'executed' if success else 'failed',\n )\n\n ret['__orchestration__'] = True\n if 'jid' in out:\n ret['__jid__'] = out['jid']\n\n return ret", "def runner(self, fun, timeout=None, full_return=False, **kwargs):\n '''\n Run `runner modules <all-salt.runners>` synchronously\n\n Wraps :py:meth:`salt.runner.RunnerClient.cmd_sync`.\n\n Note that runner functions must be called using keyword arguments.\n Positional arguments are not supported.\n\n :return: Returns the result from the runner module\n '''\n kwargs['fun'] = fun\n runner = salt.runner.RunnerClient(self.opts)\n return runner.cmd_sync(kwargs, timeout=timeout, full_return=full_return)", "def execute_run(event)\n phase! Run\n @world.logger.debug format('%13s %s:%2d got event %s',\n 'Step', execution_plan_id, @step.id, event) if event\n @input = OutputReference.dereference @input, world.persistence\n\n case\n when state == :running\n raise NotImplementedError, 'recovery after restart is not implemented'\n\n when [:pending, :error, :skipping, :suspended].include?(state)\n if event && state != :suspended\n raise 'event can be processed only when in suspended state'\n end\n\n self.state = :running unless self.state == :skipping\n save_state\n with_error_handling do\n event = Skip if state == :skipping\n\n # we run the Skip event only when the run accepts events\n if event != Skip || run_accepts_events?\n result = catch(SUSPEND) do\n world.middleware.execute(:run, self, *[event].compact) do |*args|\n run(*args)\n end\n end\n\n self.state = :suspended if result == SUSPEND\n end\n\n check_serializable :output\n end\n else\n raise \"wrong state #{state} when event:#{event}\"\n end\n end", "def _local_run(self, run_conf):\n '''\n Execute local runner\n\n :param run_conf:\n :return:\n '''\n try:\n ret = self._get_runner(run_conf).run()\n except SystemExit:\n ret = 'Runner is not available at this moment'\n self.out.error(ret)\n except Exception as ex:\n ret = 'Unhandled exception occurred: {}'.format(ex)\n log.debug(ex, exc_info=True)\n\n return ret", "def runner(coro):\n \"\"\"Function execution decorator.\"\"\"\n\n @wraps(coro)\n def inner(self, *args, **kwargs):\n if self.mode == 'async':\n return coro(self, *args, **kwargs)\n return self._loop.run_until_complete(coro(self, *args, **kwargs))\n\n return inner", "def _disbatch_runner(self, chunk):\n '''\n Disbatch runner client commands\n '''\n full_return = chunk.pop('full_return', False)\n pub_data = self.saltclients['runner'](chunk)\n tag = pub_data['tag'] + '/ret'\n try:\n event = yield self.application.event_listener.get_event(self, tag=tag)\n\n # only return the return data\n ret = event if full_return else event['data']['return']\n raise tornado.gen.Return(ret)\n except TimeoutException:\n raise tornado.gen.Return('Timeout waiting for runner to execute')", "def runner(name, arg=None, kwarg=None, full_return=False, saltenv='base', jid=None, asynchronous=False, **kwargs):\n '''\n Execute a runner function. This function must be run on the master,\n either by targeting a minion running on a master or by using\n salt-call on a master.\n\n .. versionadded:: 2014.7.0\n\n name\n The name of the function to run\n\n kwargs\n Any keyword arguments to pass to the runner function\n\n asynchronous\n Run the salt command but don't wait for a reply.\n\n .. versionadded:: neon\n\n CLI Example:\n\n In this example, assume that `master_minion` is a minion running\n on a master.\n\n .. code-block:: bash\n\n salt master_minion saltutil.runner jobs.list_jobs\n salt master_minion saltutil.runner test.arg arg=\"['baz']\" kwarg=\"{'foo': 'bar'}\"\n '''\n if arg is None:\n arg = []\n if kwarg is None:\n kwarg = {}\n jid = kwargs.pop('__orchestration_jid__', jid)\n saltenv = kwargs.pop('__env__', saltenv)\n kwargs = salt.utils.args.clean_kwargs(**kwargs)\n if kwargs:\n kwarg.update(kwargs)\n\n if 'master_job_cache' not in __opts__:\n master_config = os.path.join(os.path.dirname(__opts__['conf_file']),\n 'master')\n master_opts = salt.config.master_config(master_config)\n rclient = salt.runner.RunnerClient(master_opts)\n else:\n rclient = salt.runner.RunnerClient(__opts__)\n\n if name in rclient.functions:\n aspec = salt.utils.args.get_function_argspec(rclient.functions[name])\n if 'saltenv' in aspec.args:\n kwarg['saltenv'] = saltenv\n\n if name in ['state.orchestrate', 'state.orch', 'state.sls']:\n kwarg['orchestration_jid'] = jid\n\n if jid:\n salt.utils.event.fire_args(\n __opts__,\n jid,\n {'type': 'runner', 'name': name, 'args': arg, 'kwargs': kwarg},\n prefix='run'\n )\n\n if asynchronous:\n master_key = salt.utils.master.get_master_key('root', __opts__)\n low = {'arg': arg, 'kwarg': kwarg, 'fun': name, 'key': master_key}\n return rclient.cmd_async(low)\n else:\n return rclient.cmd(name,\n arg=arg,\n kwarg=kwarg,\n print_event=False,\n full_return=full_return)" ]
[ 0.7569867372512817, 0.7517397999763489, 0.7422463297843933, 0.731895923614502, 0.7290554046630859, 0.7282692193984985, 0.728164553642273, 0.726593017578125, 0.7192322611808777, 0.7190721035003662, 0.7166314721107483, 0.716503381729126 ]
Create an ``asyncio`` event loop running in the main thread and watching runners Using ``asyncio`` to handle suprocesses requires a specific loop type to run in the main thread. This function sets up and runs the correct loop in a portable way. In addition, it runs a single :py:class:`~.BaseRunner` until completion or failure. .. seealso:: The `issue #8 <https://github.com/MatterMiners/cobald/issues/8>`_ for details.
def asyncio_main_run(root_runner: BaseRunner): """ Create an ``asyncio`` event loop running in the main thread and watching runners Using ``asyncio`` to handle suprocesses requires a specific loop type to run in the main thread. This function sets up and runs the correct loop in a portable way. In addition, it runs a single :py:class:`~.BaseRunner` until completion or failure. .. seealso:: The `issue #8 <https://github.com/MatterMiners/cobald/issues/8>`_ for details. """ assert threading.current_thread() == threading.main_thread(), 'only main thread can accept asyncio subprocesses' if sys.platform == 'win32': event_loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(event_loop) else: event_loop = asyncio.get_event_loop() asyncio.get_child_watcher().attach_loop(event_loop) event_loop.run_until_complete(awaitable_runner(root_runner))
[ "def event_loop(self):\n \"\"\"asyncio.BaseEventLoop: the running event loop.\n\n This fixture mainly exists to allow for overrides during unit tests.\n\n \"\"\"\n if not self._event_loop:\n self._event_loop = asyncio.get_event_loop()\n return self._event_loop", "def _loop_thread_main(self):\n \"\"\"Main background thread running the event loop.\"\"\"\n\n asyncio.set_event_loop(self.loop)\n self._loop_check.inside_loop = True\n\n try:\n self._logger.debug(\"Starting loop in background thread\")\n self.loop.run_forever()\n self._logger.debug(\"Finished loop in background thread\")\n except: # pylint:disable=bare-except;This is a background worker thread.\n self._logger.exception(\"Exception raised from event loop thread\")\n finally:\n self.loop.close()", "def run_forever(self) -> None:\n '''Execute the tasky/asyncio event loop until terminated.'''\n\n Log.debug('running event loop until terminated')\n asyncio.ensure_future(self.init())\n self.loop.run_forever()\n self.loop.close()", "def startLoop():\n \"\"\"\n Use nested asyncio event loop for Jupyter notebooks.\n \"\"\"\n def _ipython_loop_asyncio(kernel):\n '''\n Use asyncio event loop for the given IPython kernel.\n '''\n loop = asyncio.get_event_loop()\n\n def kernel_handler():\n kernel.do_one_iteration()\n loop.call_later(kernel._poll_interval, kernel_handler)\n\n loop.call_soon(kernel_handler)\n try:\n if not loop.is_running():\n loop.run_forever()\n finally:\n if not loop.is_running():\n loop.run_until_complete(loop.shutdown_asyncgens())\n loop.close()\n\n patchAsyncio()\n loop = asyncio.get_event_loop()\n if not loop.is_running():\n from ipykernel.eventloops import register_integration, enable_gui\n register_integration('asyncio')(_ipython_loop_asyncio)\n enable_gui('asyncio')", "def create_asyncio_eventloop(loop=None):\n \"\"\"\n Returns an asyncio :class:`~prompt_toolkit.eventloop.EventLoop` instance\n for usage in a :class:`~prompt_toolkit.interface.CommandLineInterface`. It\n is a wrapper around an asyncio loop.\n\n :param loop: The asyncio eventloop (or `None` if the default asyncioloop\n should be used.)\n \"\"\"\n # Inline import, to make sure the rest doesn't break on Python 2. (Where\n # asyncio is not available.)\n if is_windows():\n from prompt_toolkit.eventloop.asyncio_win32 import Win32AsyncioEventLoop as AsyncioEventLoop\n else:\n from prompt_toolkit.eventloop.asyncio_posix import PosixAsyncioEventLoop as AsyncioEventLoop\n\n return AsyncioEventLoop(loop)", "def run(entry_point, drivers, loop = None):\n ''' This is a runner wrapping the cyclotron \"run\" implementation. It takes\n an additional parameter to provide a custom asyncio mainloop.\n '''\n program = setup(entry_point, drivers)\n dispose = program.run()\n if loop == None:\n loop = asyncio.get_event_loop()\n\n loop.run_forever()\n dispose()", "def runner(coro):\n \"\"\"Function execution decorator.\"\"\"\n\n @wraps(coro)\n def inner(self, *args, **kwargs):\n if self.mode == 'async':\n return coro(self, *args, **kwargs)\n return self._loop.run_until_complete(coro(self, *args, **kwargs))\n\n return inner", "def asyncio_run_forever(setup_coro, shutdown_coro, *,\n stop_signals={signal.SIGINT}, debug=False):\n '''\n A proposed-but-not-implemented asyncio.run_forever() API based on\n @vxgmichel's idea.\n See discussions on https://github.com/python/asyncio/pull/465\n '''\n async def wait_for_stop():\n loop = current_loop()\n future = loop.create_future()\n for stop_sig in stop_signals:\n loop.add_signal_handler(stop_sig, future.set_result, stop_sig)\n try:\n recv_sig = await future\n finally:\n loop.remove_signal_handler(recv_sig)\n\n loop = asyncio.new_event_loop()\n try:\n asyncio.set_event_loop(loop)\n loop.set_debug(debug)\n loop.run_until_complete(setup_coro)\n loop.run_until_complete(wait_for_stop())\n finally:\n try:\n loop.run_until_complete(shutdown_coro)\n _cancel_all_tasks(loop)\n if hasattr(loop, 'shutdown_asyncgens'): # Python 3.6+\n loop.run_until_complete(loop.shutdown_asyncgens())\n finally:\n asyncio.set_event_loop(None)\n loop.close()", "def _run_asyncio(loop, zmq_context):\n \"\"\"\n Run asyncio (should be called in a thread) and close the loop and the zmq context when the thread ends\n :param loop:\n :param zmq_context:\n :return:\n \"\"\"\n try:\n asyncio.set_event_loop(loop)\n loop.run_forever()\n except:\n pass\n finally:\n loop.close()\n zmq_context.destroy(1000)", "def eventloop(self, *args, **kwargs):\n \"\"\"\n Hand crafted event loop, with only one event possible : exit\n More events ( and signals ) can be added later, after converting to asyncio.\n \"\"\"\n\n # Setting status\n status = None\n\n # Starting the clock\n start = time.time()\n\n first_loop = True\n # loop running target, maybe more than once\n while not self.exit.is_set():\n\n if first_loop:\n first_loop = False\n # signalling startup only the first time, just after having check for exit request.\n # We need to return control before starting, but after entering context...\n self.started.set()\n # TODO : check if better outside of loop maybe ??\n # It will change semantics, but might be more intuitive...\n\n # time is ticking\n # TODO : move this out of here. this class should require only generic interface to any method.\n now = time.time()\n timedelta = now - start\n start = now\n\n # replacing the original Process.run() call, passing arguments to our target\n if self._target:\n # bwcompat\n kwargs['timedelta'] = timedelta\n\n # TODO : use return code to determine when/how we need to run this the next time...\n # Also we need to keep the exit status to be able to call external process as an update...\n\n logging.debug(\n \"[{self.name}] calling {self._target.__name__} with args {args} and kwargs {kwargs}...\".format(\n **locals()))\n status = self._target(*args, **kwargs)\n\n if status is not None:\n break\n\n if self.started.is_set() and status is None and self.exit.is_set():\n # in the not so special case where we started, we didnt get exit code and we exited,\n # this is expected as a normal result and we set an exitcode here of 0\n # As 0 is the conventional success for unix process successful run\n status = 0\n\n return status", "async def awaitable_runner(runner: BaseRunner):\n \"\"\"Execute a runner without blocking the event loop\"\"\"\n runner_thread = CapturingThread(target=runner.run)\n runner_thread.start()\n delay = 0.0\n while not runner_thread.join(timeout=0):\n await asyncio.sleep(delay)\n delay = min(delay + 0.1, 1.0)", "def schedCoroSafePend(self, coro):\n '''\n Schedules a coroutine to run as soon as possible on the same event loop that this Base is running on\n\n Note:\n This method may *not* be run inside an event loop\n '''\n if __debug__:\n import synapse.lib.threads as s_threads # avoid import cycle\n assert s_threads.iden() != self.tid\n\n task = asyncio.run_coroutine_threadsafe(coro, self.loop)\n return task.result()" ]
[ 0.7763006091117859, 0.7691020965576172, 0.7420839071273804, 0.742028534412384, 0.7414741516113281, 0.7392086982727051, 0.728949248790741, 0.7274221181869507, 0.726391077041626, 0.7239053249359131, 0.7224150896072388, 0.7181748747825623 ]
Function enhance Enhance the object with new item or enhanced items
def enhance(self): """ Function enhance Enhance the object with new item or enhanced items """ self.update({'os_default_templates': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemOsDefaultTemplate)}) self.update({'operatingsystems': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemOperatingSystem)})
[ "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'os_default_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOsDefaultTemplate)})\n self.update({'config_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemConfigTemplate)})\n self.update({'ptables':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPTable)})\n self.update({'media':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemMedia)})\n self.update({'architectures':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemArchitecture)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'interfaces':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemInterface)})\n self.update({'subnets':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemSubnet)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'images':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemImages)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'puppetclasses':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPuppetClasses)})\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'interfaces':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemInterface)})\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemSmartClassParameter)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'puppetclasses':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPuppetClasses)})\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ItemSmartClassParameter)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n if self.objName in ['hosts', 'hostgroups',\n 'puppet_classes']:\n from foreman.itemSmartClassParameter\\\n import ItemSmartClassParameter\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ItemSmartClassParameter)})", "public T enhance(T t) {\n if (!needsEnhancement(t)) {\n return t;\n }\n\n try {\n return getEnhancedClass().getConstructor(baseClass).newInstance(t);\n } catch (Exception e) {\n throw new RuntimeException(String.format(\"Could not enhance object %s (%s)\", t, t.getClass()), e);\n }\n }", "private static void doEnhancement(CtClass cc, Version modelVersion) throws CannotCompileException,\n NotFoundException, ClassNotFoundException {\n CtClass inter = cp.get(OpenEngSBModel.class.getName());\n cc.addInterface(inter);\n addFields(cc);\n addGetOpenEngSBModelTail(cc);\n addSetOpenEngSBModelTail(cc);\n addRetrieveModelName(cc);\n addRetrieveModelVersion(cc, modelVersion);\n addOpenEngSBModelEntryMethod(cc);\n addRemoveOpenEngSBModelEntryMethod(cc);\n addRetrieveInternalModelId(cc);\n addRetrieveInternalModelTimestamp(cc);\n addRetrieveInternalModelVersion(cc);\n addToOpenEngSBModelValues(cc);\n addToOpenEngSBModelEntries(cc);\n cc.setModifiers(cc.getModifiers() & ~Modifier.ABSTRACT);\n }", "def enhance(self, inverse=False, gamma=1.0, stretch=\"no\",\n stretch_parameters=None, **kwargs):\n \"\"\"Image enhancement function. It applies **in this order** inversion,\n gamma correction, and stretching to the current image, with parameters\n *inverse* (see :meth:`Image.invert`), *gamma* (see\n :meth:`Image.gamma`), and *stretch* (see :meth:`Image.stretch`).\n \"\"\"\n self.invert(inverse)\n if stretch_parameters is None:\n stretch_parameters = {}\n\n stretch_parameters.update(kwargs)\n self.stretch(stretch, **stretch_parameters)\n self.gamma(gamma)", "def load(self, data):\n \"\"\" Function load\n Store the object data\n \"\"\"\n self.clear()\n self.update(data)\n self.enhance()", "def enhance2dataset(dset):\n \"\"\"Apply enhancements to dataset *dset* and return the resulting data\n array of the image.\"\"\"\n attrs = dset.attrs\n img = get_enhanced_image(dset)\n # Clip image data to interval [0.0, 1.0]\n data = img.data.clip(0.0, 1.0)\n data.attrs = attrs\n\n return data", "function enhancedEcommerceProductAction(track, action, data) {\n enhancedEcommerceTrackProduct(track);\n window.ga('ec:setAction', action, data || {});\n}" ]
[ 0.8752948045730591, 0.8729672431945801, 0.8696154356002808, 0.8679497838020325, 0.8630505800247192, 0.8407313823699951, 0.7447202205657959, 0.7079142928123474, 0.707258939743042, 0.6839107275009155, 0.6810131072998047, 0.6799886226654053 ]
Retry "tries" times, with initial "delay", increasing delay "delay*backoff" each time. Without exception success means when function returns valid object. With exception success when no exceptions
def retry(tries=10, delay=1, backoff=2, retry_exception=None): """ Retry "tries" times, with initial "delay", increasing delay "delay*backoff" each time. Without exception success means when function returns valid object. With exception success when no exceptions """ assert tries > 0, "tries must be 1 or greater" catching_mode = bool(retry_exception) def deco_retry(f): @functools.wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 0: time.sleep(mdelay) mdelay *= backoff try: rv = f(*args, **kwargs) if not catching_mode and rv: return rv except retry_exception: pass else: if catching_mode: return rv mtries -= 1 if mtries is 0 and not catching_mode: return False if mtries is 0 and catching_mode: return f(*args, **kwargs) # extra try, to avoid except-raise syntax log.debug("{0} try, sleeping for {1} sec".format(tries-mtries, mdelay)) raise Exception("unreachable code") return f_retry return deco_retry
[ "def retry(tries, delay=0, back_off=1, raise_msg=''):\n \"\"\"Retries a function or method until it got True.\n\n - ``delay`` sets the initial delay in seconds\n - ``back_off`` sets the factor by which\n - ``raise_msg`` if not '', it'll raise an Exception\n \"\"\"\n\n if back_off < 1:\n raise ValueError('back_off must be 1 or greater')\n\n tries = math.floor(tries)\n if tries < 0:\n raise ValueError('tries must be 0 or greater')\n\n if delay < 0:\n raise ValueError('delay must be 0 or greater')\n\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n max_tries, max_delay = tries, delay # make mutable\n\n while max_tries > 0:\n rv = f(*args, **kwargs) # first attempt\n if rv: # Done on success\n return rv\n\n max_tries -= 1 # consume an attempt\n time.sleep(max_delay) # wait...\n max_delay *= back_off # make future wait longer\n else:\n if raise_msg:\n raise Exception(raise_msg)\n return\n\n return f_retry # true decorator -> decorated function\n\n return deco_retry", "def retry(ExceptionToCheck, tries=3, delay=2, backoff=2):\n \"\"\"\n Retry decorator published by Saltry Crane.\n\n http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/\n \"\"\"\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n try_one_last_time = True\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n try_one_last_time = False\n break\n except ExceptionToCheck:\n six.print_(\"Retrying in %s seconds\" % str(mdelay))\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n if try_one_last_time:\n return f(*args, **kwargs)\n return\n return f_retry # true decorator\n return deco_retry", "def retry(tries, CatchExceptions=(Exception,), delay=0.01, backoff=2):\n '''\n 错误重试的修饰器\n :param tries: 重试次数\n :param CatchExceptions: 需要重试的exception列表\n :param delay: 重试前等待\n :param backoff: 重试n次后,需要等待delay * n * backoff\n :return:\n\n\n @retry(5,ValueError)\n def test():\n raise ValueError\n\n '''\n if backoff <= 1:\n raise ValueError(\"backoff must be greater than 1\")\n\n if tries < 0:\n raise ValueError(\"tries must be 0 or greater\")\n\n if delay <= 0:\n raise ValueError(\"delay must be greater than 0\")\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mdelay = delay\n retException = None\n for mtries in range(tries):\n try:\n return f(*args, **kwargs)\n except CatchExceptions as ex:\n logger.warning(\n \"function %s(%s, %s) try %d times error: %s\\n\" % (f.__name__, args, kwargs, mtries, str(ex)))\n logger.warning(\"Retrying in %.4f seconds...\" % (mdelay))\n\n retException = ex\n time.sleep(mdelay)\n mdelay *= backoff\n raise retException\n\n return f_retry\n\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2):\n \"\"\"Retry calling the decorated function using an exponential backoff.\n\n Reference: http://www.saltycrane.com/blog/2009/11/trying-out-retry\n -decorator-python/\n\n :param ExceptionToCheck: the exception to check. may be a tuple of\n exceptions to check\n :param tries: number of times to try (not retry) before giving up\n :param delay: initial delay between retries in seconds\n :param backoff: backoff multiplier e.g. value of 2 will double the delay\n each retry\n \"\"\"\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n LOG.debug(\"%(err_mess)s. Retry calling function \"\n \"'%(f_name)s' in %(delta)d seconds.\",\n {'err_mess': str(e), 'f_name': f.__name__,\n 'delta': mdelay})\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n LOG.debug(\"Last retry calling function '%s'.\", f.__name__)\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(target_exception, tries=4, delay_s=1, backoff=2):\n \"\"\"Retry calling the decorated function using an exponential backoff.\n\n http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/\n original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry\n\n :param target_exception: the exception to check. may be a tuple of\n exceptions to check\n :type target_exception: Exception or tuple\n :param tries: number of times to try (not retry) before giving up\n :type tries: int\n :param delay_s: initial delay between retries in seconds\n :type delay_s: int\n :param backoff: backoff multiplier e.g. value of 2 will double the delay\n each retry\n :type backoff: int\n \"\"\"\n import time\n from functools import wraps\n\n def decorated_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay_s\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except target_exception as e:\n logging.warning(\"Exception: %s, Retrying in %d seconds...\", str(e), mdelay)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return decorated_retry", "def RetryUntilReturnsTrue(tries, delay=2, backoff=1.5):\n '''Retries a function or method until it returns True.\n\n delay sets the initial delay in seconds, and backoff sets the factor by which\n the delay should lengthen after each failure. backoff must be greater than 1,\n or else it isn't really a backoff. tries must be at least 0, and delay\n greater than 0.'''\n\n if backoff <= 1:\n raise ValueError(\"backoff must be greater than 1\")\n\n tries = math.floor(tries)\n if tries < 0:\n raise ValueError(\"tries must be 0 or greater\")\n\n if delay <= 0:\n raise ValueError(\"delay must be greater than 0\")\n\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay # make mutable\n\n rv = f(*args, **kwargs) # first attempt\n while mtries > 0:\n if rv: # Done on success\n return rv\n\n mtries -= 1 # consume an attempt\n time.sleep(mdelay) # wait...\n mdelay *= backoff # make future wait longer\n \n rv = f(*args, **kwargs) # Try again\n\n return False # Ran out of tries :-(\n\n return f_retry # true decorator -> decorated function\n return deco_retry", "def retry_on_exception(tries=6, delay=1, backoff=2, max_delay=32):\n '''\n Decorator for implementing exponential backoff for retrying on failures.\n\n tries: Max number of tries to execute the wrapped function before failing.\n delay: Delay time in seconds before the FIRST retry.\n backoff: Multiplier to extend the initial delay by for each retry.\n max_delay: Max time in seconds to wait between retries.\n '''\n tries = math.floor(tries)\n if tries < 1:\n raise ValueError('\"tries\" must be greater than or equal to 1.')\n if delay < 0:\n raise ValueError('\"delay\" must be greater than or equal to 0.')\n if backoff < 1:\n raise ValueError('\"backoff\" must be greater than or equal to 1.')\n if max_delay < delay:\n raise ValueError('\"max_delay\" must be greater than or equal to delay.')\n\n def decorated_function_with_retry(func):\n @wraps(func)\n def function_to_retry(*args, **kwargs):\n local_tries, local_delay = tries, delay\n while local_tries > 1:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if local_delay > max_delay:\n local_delay = max_delay\n logging.exception('%s: Retrying in %d seconds...'\n % (str(e), local_delay))\n time.sleep(local_delay)\n local_tries -= 1\n local_delay *= backoff\n return func(*args, **kwargs)\n return function_to_retry\n return decorated_function_with_retry", "def retry(exceptions, tries=5, delay=1, backoff=2, logger=None):\n \"\"\"\n Retry calling the decorated function using an exponential backoff.\n\n Args:\n exceptions: The exception to check. may be a tuple of\n exceptions to check.\n tries: Number of times to try (not retry) before giving up.\n delay: Initial delay between retries in seconds.\n backoff: Backoff multiplier (e.g. value of 2 will double the delay\n each retry).\n logger: Logger to use. If None, print.\n \"\"\"\n\n def deco_retry(func):\n @wraps(func)\n async def f_retry(self, *args, **kwargs):\n if not iscoroutine(func):\n f = coroutine(func)\n else:\n f = func\n\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return await f(self, *args, **kwargs)\n except exceptions:\n if logger:\n logger.info('Retrying %s after %s seconds', f.__name__, mdelay)\n sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return await f(self, *args, **kwargs)\n\n return f_retry\n\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, status_codes=[],\n logger=None):\n \"\"\"\n Decorator function for retrying the decorated function,\n using an exponential or fixed backoff.\n\n Original: https://wiki.python.org/moin/PythonDecoratorLibrary#Retry\n\n ExceptionToCheck: the exception to check. Can be a tuple of\n exceptions to check\n tries: number of times to try (not retry) before giving up\n delay: initial delay between tries in seconds\n backoff: backoff multiplier\n status_codes: list of http status codes to check for retrying, only applies\n when ExceptionToCheck is a DataFailureException\n logger: logging.Logger instance\n \"\"\"\n if backoff is None or backoff <= 0:\n raise ValueError(\"backoff must be a number greater than 0\")\n\n tries = math.floor(tries)\n if tries < 0:\n raise ValueError(\"tries must be a number 0 or greater\")\n\n if delay is None or delay <= 0:\n raise ValueError(\"delay must be a number greater than 0\")\n\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n\n except ExceptionToCheck as err:\n if (type(err) is DataFailureException and\n len(status_codes) and\n err.status not in status_codes):\n raise\n\n if logger:\n logger.warning('%s: %s, Retrying in %s seconds.' % (\n f.__name__, err, mdelay))\n\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n\n return f(*args, **kwargs)\n\n return f_retry\n\n return deco_retry", "def retry(tries, delay=3, backoff=2):\n \"\"\"Retries a function or method until it returns True.\"\"\"\n\n if backoff <= 1:\n raise ValueError(\"backoff must be greater than 1\")\n\n tries = math.floor(tries)\n if tries < 0:\n raise ValueError(\"tries must be 0 or greater\")\n\n if delay <= 0:\n raise ValueError(\"delay must be greater than 0\")\n\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n\n rv = f(*args, **kwargs) # first attempt\n while mtries > 0:\n if rv is True:\n return True\n\n mtries -= 1\n time.sleep(mdelay)\n mdelay *= backoff\n\n rv = f(*args, **kwargs) # Try again\n print str(tries) + \" attempts. Abandoning.\"\n return False # Ran out of tries\n\n return f_retry\n\n return deco_retry", "def retry(self, f, *args, **kwargs):\n \"\"\"\n Retries the given function self.tries times on NetworkErros\n \"\"\"\n backoff = random.random() / 100 # 5ms on average\n for _ in range(self.tries - 1):\n try:\n return f(*args, **kwargs)\n except NetworkError:\n time.sleep(backoff)\n backoff *= 2\n return f(*args, **kwargs)", "def retry(ExceptionToCheck, tries=10, timeout_secs=1.0, logger=None, callback_by_exception=None):\n \"\"\"\n Retry calling the decorated function using an exponential backoff.\n :param callback_by_exception: callback/method invocation on certain exceptions\n :type callback_by_exception: None or dict\n \"\"\"\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, timeout_secs\n run_one_last_time = True\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n # check if this exception is something the caller wants special handling for\n callback_errors = callback_by_exception or {}\n for error_type in callback_errors:\n if isinstance(e, error_type):\n callback_logic = callback_by_exception[error_type]\n should_break_out = run_one_last_time = False\n if isinstance(callback_logic, (list, tuple)):\n callback_logic, should_break_out = callback_logic\n if isinstance(should_break_out, (list, tuple)):\n should_break_out, run_one_last_time = should_break_out\n callback_logic()\n if should_break_out: # caller requests we stop handling this exception\n break\n # traceback.print_exc()\n half_interval = mdelay * 0.10 # interval size\n actual_delay = random.uniform(mdelay - half_interval, mdelay + half_interval)\n msg = \"Retrying in %.2f seconds ...\" % actual_delay\n if logger is None:\n logging.exception(msg)\n else:\n logger.exception(msg)\n time.sleep(actual_delay)\n mtries -= 1\n mdelay *= 2\n if run_one_last_time: # one exception may be all the caller wanted in certain cases\n return f(*args, **kwargs)\n return f_retry # true decorator\n return deco_retry" ]
[ 0.8513075113296509, 0.8424427509307861, 0.8363626003265381, 0.8320958614349365, 0.829943060874939, 0.828590452671051, 0.8209466338157654, 0.8207831382751465, 0.8187723755836487, 0.8158443570137024, 0.8038476705551147, 0.7988787889480591 ]
Dump initialized object structure to yaml
def dump(node): """ Dump initialized object structure to yaml """ from qubell.api.private.platform import Auth, QubellPlatform from qubell.api.private.organization import Organization from qubell.api.private.application import Application from qubell.api.private.instance import Instance from qubell.api.private.revision import Revision from qubell.api.private.environment import Environment from qubell.api.private.zone import Zone from qubell.api.private.manifest import Manifest # Exclude keys from dump # Format: { 'ClassName': ['fields', 'to', 'exclude']} exclusion_list = { Auth: ['cookies'], QubellPlatform:['auth', ], Organization: ['auth', 'organizationId', 'zone'], Application: ['auth', 'applicationId', 'organization'], Instance: ['auth', 'instanceId', 'application'], Manifest: ['name', 'content'], Revision: ['auth', 'revisionId'], Environment: ['auth', 'environmentId', 'organization'], Zone: ['auth', 'zoneId', 'organization'], } def obj_presenter(dumper, obj): for x in exclusion_list.keys(): if isinstance(obj, x): # Find class fields = obj.__dict__.copy() for excl_item in exclusion_list[x]: try: fields.pop(excl_item) except: log.warn('No item %s in object %s' % (excl_item, x)) return dumper.represent_mapping('tag:yaml.org,2002:map', fields) return dumper.represent_mapping('tag:yaml.org,2002:map', obj.__dict__) noalias_dumper = yaml.dumper.Dumper noalias_dumper.ignore_aliases = lambda self, data: True yaml.add_representer(unicode, lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:str', value)) yaml.add_multi_representer(object, obj_presenter) serialized = yaml.dump(node, default_flow_style=False, Dumper=noalias_dumper) return serialized
[ "def dump\n out = \"\\#<#{self.class}:0x#{format('%08x', object_id)}>\\n\"\n out << \"Magic number : #{format('%0x', @sb['magic_num'])}\\n\"\n out << \"Block size : #{@sb['block_size']} (#{@block_size} bytes)\\n\"\n out << \"Number of blocks : #{@sb['data_blocks']}\\n\"\n out << \"Real-time blocks : #{@sb['realtime_blocks']}\\n\"\n out << \"Real-time extents : #{@sb['realtime_extents']}\\n\"\n out << \"UUID : #{@filesystem_id}\\n\"\n out << \"Journal Log Start block : #{@sb['log_start']}\\n\"\n out << \"Root Inode # : #{@sb['root_inode_num']}\\n\"\n out << \"RealTime Bitmap Inode# : #{@sb['bitmap_inode_num']}\\n\"\n out << \"RealTime Summary Inode# : #{@sb['summary_inode_num']}\\n\"\n out << \"RT Extent Size (Blocks) : #{@sb['realtime_ext_size']}\\n\"\n out << \"Alloc Group Size : #{@sb['ag_blocks']}\\n\"\n out << \"# of Alloc Groups : #{@sb['ag_count']}\\n\"\n out << \"# of RT Bitmap Blocks : #{@sb['bitmap_blocks']}\\n\"\n out << \"# of Journal Log Blocks : #{@sb['log_blocks']}\\n\"\n out << \"Filesystem Version # : #{sb_version_num}\\n\"\n out << \"Disk Sector Size : #{@sb['sector_size']} bytes\\n\"\n out << \"Inode Size : #{@sb['inode_size']} bytes\\n\"\n out << \"Inodes Per Block : #{@sb['inodes_per_blk']}\\n\"\n out << \"Filesystem Name : #{@sb['fs_name']}\\n\"\n out << \"Log Base2 of Block size : #{@sb['block_size_log']}\\n\"\n out << \"Log Base2 of Sector size : #{@sb['sector_size_log']}\\n\"\n out << \"Log Base2 of Inode size : #{@sb['inode_size_log']}\\n\"\n out << \"Log Base2 of Inodes/Blk : #{@sb['inodes_per_blk_log']}\\n\"\n out << \"Log Base2 AllocGrp size : #{@sb['ag_blocks_log']}\\n\"\n out << \"Log Base2 RT Extent sz : #{@sb['rt_ext_size_log']}\\n\"\n out << \"In Progress Flag : #{@sb['in_progress']}\\n\"\n out << \"Inode Space Max Percent : #{@sb['inode_max_pct']}\\n\"\n out << \"Inodes Allocated on FS : #{@sb['inode_count']}\\n\"\n out << \"Free Inodes on FS : #{@sb['inode_free_count']}\\n\"\n out << \"Free Data Blocks on FS : #{@sb['free_data_blocks']}\\n\"\n out << \"Free RT Extents on FS : #{@sb['free_rt_extents']}\\n\"\n out << \"Inode # for User Quotas : #{@sb['user_quota_ino']}\\n\"\n out << \"Inode # for Grp Quotas : #{@sb['group_quota_ino']}\\n\"\n out << \"Quota Flags : #{@sb['quota_flags']}\\n\"\n out << \"Miscellaneous Flags : #{@sb['misc_flags']}\\n\"\n out << \"Shared Version # : #{@sb['shared_vers_no']}\\n\"\n out << \"Inode Chunk Alignment : #{@sb['inode_alignment']}\\n\"\n out << \"Stripe or Raid Unit : #{@sb['stripe_unit']}\\n\"\n out << \"Stripe or Raid Width : #{@sb['stripe_width']}\\n\"\n out << \"Log Base2 Dir Block : #{@sb['dir_block_log']}\\n\"\n out << \"Log Base2 Log Sect Size : #{@sb['log_sect_size_log']}\\n\"\n out << \"External Log Sect Size : #{@sb['log_sector_size']}\\n\"\n out << \"Log Device Stripe Size : #{@sb['log_stripe_unit_sz']}\\n\"\n out << \"Additional Version Flgs : #{@sb['features_2']}\\n\"\n out << \"Compat Features : #{@sb['features_compat']}\\n\"\n out << \"R/O Compat Features : #{@sb['features_ro_compat']}\\n\"\n out << \"Incompat Features : #{@sb['features_incompat']}\\n\"\n out << \"Log Incompat Features : #{@sb['features_log_incompat']}\\n\"\n out << \"Superblock CRC : #{@sb['superblock_crc']}\\n\"\n out << \"Inode # Project Quotas : #{@sb['proj_quota_ino']}\\n\"\n out << \"Last Write Sequence : #{@sb['last_write_seq']}\\n\"\n out\n end", "def YamlDumper(aff4object):\n \"\"\"Dumps the given aff4object into a yaml representation.\"\"\"\n aff4object.Flush()\n\n result = {}\n for attribute, values in iteritems(aff4object.synced_attributes):\n result[attribute.predicate] = []\n for value in values:\n # This value is really a LazyDecoder() instance. We need to get at the\n # real data here.\n value = value.ToRDFValue()\n\n result[attribute.predicate].append(\n [value.__class__.__name__,\n value.SerializeToString(),\n str(value.age)])\n\n return yaml.Dump({\n \"aff4_class\": compatibility.GetName(aff4object),\n \"_urn\": aff4object.urn.SerializeToString(),\n \"attributes\": result,\n \"age_policy\": aff4object.age_policy,\n })", "def dump(self, indentation=0):\n \"\"\"Returns a string representation of the structure.\"\"\"\n \n dump = []\n \n dump.append('[%s]' % self.name)\n \n # Refer to the __set_format__ method for an explanation\n # of the following construct.\n for keys in self.__keys__:\n for key in keys:\n \n val = getattr(self, key)\n if isinstance(val, int) or isinstance(val, long):\n val_str = '0x%-8X' % (val)\n if key == 'TimeDateStamp' or key == 'dwTimeStamp':\n try:\n val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))\n except exceptions.ValueError, e:\n val_str += ' [INVALID TIME]'\n else:\n val_str = ''.join(filter(lambda c:c != '\\0', str(val)))\n \n dump.append('0x%-8X 0x%-3X %-30s %s' % (\n self.__field_offsets__[key] + self.__file_offset__, \n self.__field_offsets__[key], key+':', val_str))\n \n return dump", "def dump(self, indentation=0):\n \"\"\"Returns a string representation of the structure.\"\"\"\n\n dump = []\n\n dump.append('[{0}]'.format(self.name))\n\n printable_bytes = [ord(i) for i in string.printable if i not in string.whitespace]\n\n # Refer to the __set_format__ method for an explanation\n # of the following construct.\n for keys in self.__keys__:\n for key in keys:\n\n val = getattr(self, key)\n if isinstance(val, (int, long)):\n if key.startswith('Signature_'):\n val_str = '%-8X' % (val)\n else:\n val_str = '0x%-8X' % (val)\n if key == 'TimeDateStamp' or key == 'dwTimeStamp':\n try:\n val_str += ' [%s UTC]' % time.asctime(time.gmtime(val))\n except ValueError as e:\n val_str += ' [INVALID TIME]'\n else:\n val_str = bytearray(val)\n if key.startswith('Signature'):\n val_str = ''.join(\n ['{:02X}'.format(i) for i in val_str.rstrip(b'\\x00')])\n else:\n val_str = ''.join(\n [chr(i) if (i in printable_bytes) else\n '\\\\x{0:02x}'.format(i) for i in val_str.rstrip(b'\\x00')])\n\n dump.append('0x%-8X 0x%-3X %-30s %s' % (\n self.__field_offsets__[key] + self.__file_offset__,\n self.__field_offsets__[key], key+':', val_str))\n\n return dump", "def dump(self, obj, **kwargs):\n \"\"\"Take obj for later use: using class name to namespace definition.\"\"\"\n self.obj = obj\n return super(JSONSchema, self).dump(obj, **kwargs)", "def to_yaml(self, skip_nulls=True):\n \"\"\"Convert object to a yaml string\"\"\"\n return yaml.safe_dump(self.to_dict(skip_nulls=skip_nulls),\n default_flow_style=False)", "public void dumpObjectMetaData() {\n LOGGER.debug(\"dump class={}\", className);\n LOGGER.debug(\"----------------------------------------\");\n for (FieldMetaData md : fieldList) {\n LOGGER.debug(md.toString());\n }\n }", "def _dump(self):\n \"\"\"For debugging, dump the entire data structure.\"\"\"\n pp = pprint.PrettyPrinter(indent=4)\n\n print(\"=== Variables ===\")\n print(\"-- Globals --\")\n pp.pprint(self._global)\n print(\"-- Bot vars --\")\n pp.pprint(self._var)\n print(\"-- Substitutions --\")\n pp.pprint(self._sub)\n print(\"-- Person Substitutions --\")\n pp.pprint(self._person)\n print(\"-- Arrays --\")\n pp.pprint(self._array)\n\n print(\"=== Topic Structure ===\")\n pp.pprint(self._topics)\n print(\"=== %Previous Structure ===\")\n pp.pprint(self._thats)\n\n print(\"=== Includes ===\")\n pp.pprint(self._includes)\n\n print(\"=== Inherits ===\")\n pp.pprint(self._lineage)\n\n print(\"=== Sort Buffer ===\")\n pp.pprint(self._sorted)\n\n print(\"=== Syntax Tree ===\")\n pp.pprint(self._syntax)", "def dumps(self):\n \"\"\"\n Dump this instance as YAML.\n\n \"\"\"\n with closing(StringIO()) as fileobj:\n self.dump(fileobj)\n return fileobj.getvalue()", "def dump(self, fields=None, exclude=None):\n \"\"\"\n Dump current object to dict, but the value is string\n for manytomany fields will not automatically be dumpped, only when\n they are given in fields parameter\n \"\"\"\n exclude = exclude or []\n d = {}\n if fields and self._primary_field not in fields:\n fields = list(fields)\n fields.append(self._primary_field)\n for k, v in self.properties.items():\n if ((not fields) or (k in fields)) and (not exclude or (k not in exclude)):\n if not isinstance(v, ManyToMany):\n t = v.get_value_for_datastore(self)\n if t is Lazy:\n self.refresh()\n t = v.get_value_for_datastore(self)\n if isinstance(t, Model):\n t = t._key\n d[k] = v.to_str(t)\n else:\n if fields:\n d[k] = ','.join([str(x) for x in getattr(self, v._lazy_value(), [])])\n if self._primary_field and d and self._primary_field not in d:\n d[self._primary_field] = str(self._key)\n return d", "def on_yaml_dumps(self, yaml, config, dictionary, **kwargs):\n \"\"\" The `pyyaml <https://pypi.org/project/pyyaml/>`_ dumps method.\n\n :param module yaml: The ``yaml`` module\n :param class config: The instance's config class\n :param dict dictionary: The dictionary to seralize\n :returns: The serialized content\n :rtype: str\n \"\"\"\n\n return yaml.dump(dictionary, Dumper=yaml.Dumper)", "private function dumpStructure() {\n dump(\"Requesters:\");\n dump($this->requesters);\n\n dump(\"Request roots:\");\n dump($this->requestRoots);\n\n dump(\"Request paths:\");\n dump($this->requestPaths);\n\n dump(\"Local names:\");\n dump($this->localNames);\n\n if (isset($this->tree)) {\n dump(\"Containment tree:\");\n dump($this->tree);\n }\n }" ]
[ 0.7340636253356934, 0.7310830950737, 0.7280782461166382, 0.7258572578430176, 0.7238344550132751, 0.7223688960075378, 0.7212656736373901, 0.7188830375671387, 0.7150736451148987, 0.7141214609146118, 0.7138819098472595, 0.713685929775238 ]
Generate environment used for 'org.restore' method :param file: env file :return: env
def load_env(file): """ Generate environment used for 'org.restore' method :param file: env file :return: env """ env = yaml.load(open(file)) for org in env.get('organizations', []): if not org.get('applications'): org['applications'] = [] if org.get('starter-kit'): kit_meta = get_starter_kit_meta(org.get('starter-kit')) for meta_app in get_applications_from_metadata(kit_meta): org['applications'].append(meta_app) if org.get('meta'): for meta_app in get_applications_from_metadata(org.get('meta')): org['applications'].append(meta_app) for app in org.get('applications', []): if app.get('file'): app['file'] = os.path.realpath(os.path.join(os.path.dirname(file), app['file'])) return env
[ "def generate_env(fname=None):\n \"\"\"Generate file with exports. \n\n By default this is in .config/genomepy/exports.txt.\n\n Parameters\n ----------\n fname: strs, optional\n Name of the output file.\n \"\"\"\n config_dir = user_config_dir(\"genomepy\")\n if os.path.exists(config_dir):\n fname = os.path.join(config_dir, \"exports.txt\")\n with open(fname, \"w\") as fout:\n for env in generate_exports():\n fout.write(\"{}\\n\".format(env))", "def environ(context):\n \"\"\"Retrieves the environment for a particular SETSHELL context\"\"\"\n if 'BASEDIRSETSHELL' not in os.environ:\n # It seems that we are in a hostile environment\n # try to source the Idiap-wide shell\n idiap_source = \"/idiap/resource/software/initfiles/shrc\"\n if os.path.exists(idiap_source):\n logger.debug(\"Sourcing: '%s'\"%idiap_source)\n try:\n command = ['bash', '-c', 'source %s && env' % idiap_source]\n pi = subprocess.Popen(command, stdout = subprocess.PIPE)\n # overwrite the default environment\n for line in pi.stdout:\n line = str_(line)\n (key, _, value) = line.partition(\"=\")\n os.environ[key.strip()] = value.strip()\n except OSError as e:\n # occurs when the file is not executable or not found\n pass\n\n # in case the BASEDIRSETSHELL environment variable is not set,\n # we are not at Idiap,\n # and so we don't have to set any additional variables.\n if 'BASEDIRSETSHELL' not in os.environ:\n return dict(os.environ)\n\n BASEDIRSETSHELL = os.environ['BASEDIRSETSHELL']\n dosetshell = '%s/setshell/bin/dosetshell' % BASEDIRSETSHELL\n\n command = [dosetshell, '-s', 'sh', context]\n\n # First things first, we get the path to the temp file created by dosetshell\n try:\n logger.debug(\"Executing: '%s'\", ' '.join(command))\n p = subprocess.Popen(command, stdout = subprocess.PIPE)\n except OSError as e:\n # occurs when the file is not executable or not found\n raise OSError(\"Error executing '%s': %s (%d)\" % (' '.join(command), e.strerror, e.errno))\n\n try:\n source = str_(p.communicate()[0]).strip()\n except KeyboardInterrupt: # the user CTRL-C'ed\n os.kill(p.pid, signal.SIGTERM)\n sys.exit(signal.SIGTERM)\n\n # We have now the name of the source file, source it and erase it\n command2 = ['bash', '-c', 'source %s && env' % source]\n\n try:\n logger.debug(\"Executing: '%s'\", ' '.join(command2))\n p2 = subprocess.Popen(command2, stdout = subprocess.PIPE)\n except OSError as e:\n # occurs when the file is not executable or not found\n raise OSError(\"Error executing '%s': %s (%d)\" % (' '.join(command2), e.strerror, e.errno))\n\n new_environ = dict(os.environ)\n for line in p2.stdout:\n line = str_(line)\n (key, _, value) = line.partition(\"=\")\n new_environ[key.strip()] = value.strip()\n\n try:\n p2.communicate()\n except KeyboardInterrupt: # the user CTRL-C'ed\n os.kill(p2.pid, signal.SIGTERM)\n sys.exit(signal.SIGTERM)\n\n if os.path.exists(source): os.unlink(source)\n\n logger.debug(\"Discovered environment for context '%s':\", context)\n for k in sorted(new_environ.keys()):\n logger.debug(\" %s = %s\", k, new_environ[k])\n\n return new_environ", "def initialize_env(env_file=None, fail_silently=True, load_globally=True):\n \"\"\"\n Returns an instance of _Environment after reading the system environment an\n optionally provided file.\n \"\"\"\n data = {}\n data.update(os.environ)\n if env_file:\n data.update(read_file_values(env_file, fail_silently))\n \n if load_globally:\n os.environ.update(data)\n \n return Environment(env_dict=data)", "public static Environment of(@NonNull File file) {\n try {\n return of(Files.newInputStream(Paths.get(file.getPath())));\n } catch (IOException e) {\n throw new IllegalStateException(e);\n }\n }", "def _get_env(self):\n \"\"\"this returns an environment dictionary we want to use to run the command\n\n this will also create a fake pgpass file in order to make it possible for\n the script to be passwordless\"\"\"\n if hasattr(self, 'env'): return self.env\n\n # create a temporary pgpass file\n pgpass = self._get_file()\n # format: http://www.postgresql.org/docs/9.2/static/libpq-pgpass.html\n pgpass.write('*:*:*:{}:{}\\n'.format(self.username, self.password).encode(\"utf-8\"))\n pgpass.close()\n self.env = dict(os.environ)\n self.env['PGPASSFILE'] = pgpass.name\n\n # we want to assure a consistent environment\n if 'PGOPTIONS' in self.env: del self.env['PGOPTIONS']\n return self.env", "def get_env(env_file='.env'):\n \"\"\"\n Set default environment variables from .env file\n \"\"\"\n try:\n with open(env_file) as f:\n for line in f.readlines():\n try:\n key, val = line.split('=', maxsplit=1)\n os.environ.setdefault(key.strip(), val.strip())\n except ValueError:\n pass\n except FileNotFoundError:\n pass", "def generate(env):\n \"\"\"\n Add Builders and construction variables for the Visual Age FORTRAN\n compiler to an Environment.\n \"\"\"\n path, _f77, _shf77, version = get_xlf77(env)\n if path:\n _f77 = os.path.join(path, _f77)\n _shf77 = os.path.join(path, _shf77)\n\n f77.generate(env)\n\n env['F77'] = _f77\n env['SHF77'] = _shf77", "def environment(self):\n '''Get raw data about this worker.\n\n This is recorded in the :meth:`heartbeat` info, and can be\n retrieved by :meth:`TaskMaster.get_heartbeat`. The dictionary\n includes keys ``worker_id``, ``host``, ``fqdn``, ``version``,\n ``working_set``, and ``memory``.\n\n '''\n hostname = socket.gethostname()\n aliases = ()\n ipaddrs = ()\n\n # This sequence isn't 100% reliable. We might try a socket()\n # sequence like RedisBase._ipaddress(), or just decide that\n # socket.fqdn() and/or socket.gethostname() is good enough.\n try:\n ip = socket.gethostbyname(hostname)\n except socket.herror:\n # If you're here, then $(hostname) doesn't resolve.\n ip = None\n\n try:\n if ip is not None:\n hostname, aliases, ipaddrs = socket.gethostbyaddr(ip)\n except socket.herror:\n # If you're here, then $(hostname) resolves, but the IP\n # address that results in doesn't reverse-resolve. This\n # has been observed on OSX at least.\n ipaddrs = (ip,)\n\n env = dict(\n worker_id=self.worker_id,\n parent=self.parent,\n hostname=hostname,\n aliases=tuple(aliases),\n ipaddrs=tuple(ipaddrs),\n fqdn=socket.getfqdn(),\n version=pkg_resources.get_distribution(\"rejester\").version, # pylint: disable=E1103\n working_set=[(dist.key, dist.version) for dist in pkg_resources.WorkingSet()], # pylint: disable=E1103\n # config_hash=self.config['config_hash'],\n # config_json = self.config['config_json'],\n memory=psutil.virtual_memory(),\n pid=os.getpid(),\n )\n return env", "def custom_environment(self, **kwargs):\n \"\"\"\n A context manager around the above ``update_environment`` method to restore the\n environment back to its previous state after operation.\n\n ``Examples``::\n\n with self.custom_environment(GIT_SSH='/bin/ssh_wrapper'):\n repo.remotes.origin.fetch()\n\n :param kwargs: see update_environment\n \"\"\"\n old_env = self.update_environment(**kwargs)\n try:\n yield\n finally:\n self.update_environment(**old_env)", "def load_env_from_file(filename):\n \"\"\"\n Read an env file into a collection of (name, value) tuples.\n \"\"\"\n if not os.path.exists(filename):\n raise FileNotFoundError(\"Environment file {} does not exist.\".format(filename))\n\n with open(filename) as f:\n for lineno, line in enumerate(f):\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n if \"=\" not in line:\n raise SyntaxError(\"Invalid environment file syntax in {} at line {}.\".format(filename, lineno + 1))\n\n name, value = parse_var(line)\n\n yield name, value", "def _run_env(self):\n \"\"\"\n Augment the current environment providing the PYTHONUSERBASE.\n \"\"\"\n env = dict(os.environ)\n env.update(\n getattr(self, 'env', {}),\n PYTHONUSERBASE=self.env_path,\n PIP_USER=\"1\",\n )\n self._disable_venv(env)\n return env", "def environment(**kv):\n \"\"\"\n Context manager to run Python code with a modified UNIX process environment.\n\n All key/value pairs in the keyword arguments are added (or changed, if the\n key names an existing environmental variable) in the process environment\n upon entrance into the context. Changes are undone upon exit: added\n environmental variables are removed from the environment, and those whose\n value was changed are reset to their pristine value.\n \"\"\"\n added = []\n changed = {}\n for key, value in kv.items():\n if key not in os.environ:\n added.append(key)\n else:\n changed[key] = os.environ[key]\n os.environ[key] = value\n\n yield\n\n # restore pristine process environment\n for key in added:\n del os.environ[key]\n for key in changed:\n os.environ[key] = changed[key]" ]
[ 0.7047730684280396, 0.7017318606376648, 0.6866380572319031, 0.6833397150039673, 0.6820922493934631, 0.6766843199729919, 0.6760852932929993, 0.6714118123054504, 0.6713417768478394, 0.6699016690254211, 0.6695418953895569, 0.6689802408218384 ]