Dataset Viewer
Auto-converted to Parquet Duplicate
namespace
stringlengths
12
102
type
stringclasses
2 values
project_path
stringclasses
115 values
completion_path
stringlengths
20
110
signature_position
listlengths
2
2
body_position
listlengths
2
2
dependency
dict
requirement
dict
tests
listlengths
1
5
indent
int64
2
12
prompt
stringlengths
358
22.1k
target_function_prompt
stringlengths
15
74.4k
benedict.utils.type_util.is_json_serializable
function
Text-Processing/python-benedict
Text-Processing/python-benedict/benedict/utils/type_util.py
[ 53, 53 ]
[ 54, 55 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param val: Any. The input value to be checked for JSON serializability.\n:return: Bool. True if the input value is JSON serializable, False otherwise.", "Functionality": "Check if the input value is JSON serializable. It checks if the input value is of the JSON serializable types." }
[ "tests/utils/test_type_util.py::type_util_test_case::test_is_json_serializable" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/python-benedict/benedict/utils/type_util.py import pathlib import re from datetime import datetime from decimal import Decimal regex = re.compile("").__class__ uuid_re = re.compile( "^([0-9a-f]{32}){1}$|^([0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}){1}$", flags=re.IGNORECASE, ) def is_bool(val): return isinstance(val, bool) def is_collection(val): return isinstance(val, (dict, list, set, tuple)) def is_datetime(val): return isinstance(val, datetime) def is_decimal(val): return isinstance(val, Decimal) def is_dict(val): return isinstance(val, dict) def is_dict_or_list(val): return isinstance(val, (dict, list)) def is_dict_or_list_or_tuple(val): return isinstance(val, (dict, list, tuple)) def is_float(val): return isinstance(val, float) def is_function(val): return callable(val) def is_integer(val): return isinstance(val, int) Based on the information above, please complete the function in the current file Text-Processing/python-benedict/benedict/utils/type_util.py: def is_json_serializable(val):
def is_json_serializable(val):
feedparser.urls.convert_to_idn
function
Text-Processing/feedparser
Text-Processing/feedparser/feedparser/urls.py
[ 61, 61 ]
[ 66, 83 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param url: String. The URL to be converted to IDN notation.\n:return: String. The URL in IDN notation.", "Functionality": "Convert a URL to IDN notation. It checks if the host can be encoded in ASCII. If not, it converts the host to IDN form." }
[ "tests/runtests.py::TestConvertToIdn::test_port", "tests/runtests.py::TestConvertToIdn::test_idn", "tests/runtests.py::TestConvertToIdn::test_control" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/feedparser/feedparser/html.py # lines: 195-206 # def handle_charref(self, ref): # """ # :type ref: str # :rtype: None # """ # # Called for each character reference, e.g. '&#160;' will extract '160' # # Reconstruct the original character reference. # ref = ref.lower() # if ref.startswith('x'): # value = int(ref[1:], 16) # else: # value = int(ref) # file path: Text-Processing/feedparser/feedparser/html.py # lines: 207-221 # if value in _cp1252: # self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:]) # else: # self.pieces.append('&#%s;' % ref) # def handle_entityref(self, ref): # """ # :type ref: str # :rtype: None # """ # # Called for each entity reference, e.g. '&copy;' will extract 'copy' # # Reconstruct the original entity reference. # if ref in html.entities.name2codepoint or ref == 'apos': # self.pieces.append('&%s;' % ref) # else: # self.pieces.append('&amp;%s' % ref) # file path: Text-Processing/feedparser/feedparser/html.py # lines: 247-257 # def handle_decl(self, text): # """ # :type text: str # :rtype: None # """ # # called for the DOCTYPE, if present, e.g. # # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" # # "http://www.w3.org/TR/html4/loose.dtd"> # # Reconstruct original DOCTYPE # self.pieces.append('<!%s>' % text) # _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match # file path: Text-Processing/feedparser/feedparser/html.py # lines: 279-292 # @staticmethod # def convert_charref(name): # """ # :type name: str # :rtype: str # """ # return '&#%s;' % name # @staticmethod # def convert_entityref(name): # """ # :type name: str # :rtype: str # """ # return '&%s;' % name # file path: Text-Processing/feedparser/feedparser/html.py # lines: 16-29 # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' # # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # import html.entities # import re # from .sgml import * # file path: Text-Processing/feedparser/feedparser/html.py # lines: 102-115 # # By declaring these methods and overriding their compiled code # # with the code from sgmllib, the original code will execute in # # feedparser's scope instead of sgmllib's. This means that the # # `tagfind` and `charref` regular expressions will be found as # # they're declared above, not as they're declared in sgmllib. # def goahead(self, i): # raise NotImplementedError # # Replace goahead with SGMLParser's goahead() code object. # try: # goahead.__code__ = sgmllib.SGMLParser.goahead.__code__ # except AttributeError: # # Python 2 # # noinspection PyUnresolvedReferences # goahead.func_code = sgmllib.SGMLParser.goahead.func_code # file path: Text-Processing/feedparser/feedparser/urls.py # Copyright 2010-2022 Kurt McKee <contactme@kurtmckee.org> # Copyright 2002-2008 Mark Pilgrim # All rights reserved. # # This file is a part of feedparser. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import re import urllib.parse from .html import _BaseHTMLProcessor # If you want feedparser to allow all URL schemes, set this to () # List culled from Python's urlparse documentation at: # http://docs.python.org/library/urlparse.html # as well as from "URI scheme" at Wikipedia: # https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme # Many more will likely need to be added! ACCEPTABLE_URI_SCHEMES = ( 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet', 'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais', # Additional common-but-unofficial schemes 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs', 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg', ) _urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') def _urljoin(base, uri): uri = _urifixer.sub(r'\1\3', uri) try: uri = urllib.parse.urljoin(base, uri) except ValueError: uri = '' return uri Based on the information above, please complete the function in the current file Text-Processing/feedparser/feedparser/urls.py: def convert_to_idn(url): """Convert a URL to IDN notation"""
def convert_to_idn(url): """Convert a URL to IDN notation"""
mistune.toc.add_toc_hook
function
Text-Processing/mistune
Text-Processing/mistune/src/mistune/toc.py
[ 4, 4 ]
[ 23, 44 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param md: Markdown instance. The instance of the Markdown class.\n:param min_level: Integer. The minimum heading level to include in the TOC.\n:param max_level: Integer. The maximum heading level to include in the TOC.\n:param heading_id: Function. A function to generate heading_id.\n:return: No return values.", "Functionality": "This function adds a hook to save table of contents (TOC) items into the state.env. It is usually helpful for doc generator." }
[ "tests/test_hooks.py::TestTocHook::test_customize_heading_id_func" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/mistune/src/mistune/util.py # lines: 22-36 # def escape_url(link: str): # """Escape URL for safety.""" # safe = ( # ':/?#@' # gen-delims - '[]' (rfc3986) # '!$&()*+,;=' # sub-delims - "'" (rfc3986) # '%' # leave already-encoded octets alone # ) # return escape(quote(unescape(link), safe=safe)) # def safe_entity(s: str): # """Escape characters for safety.""" # return escape(unescape(s)) # def unikey(s: str): # """Generate a unique key for links and footnotes.""" # key = ' '.join(s.split()).strip() # return key.lower().upper() # file path: Text-Processing/mistune/src/mistune/util.py # lines: 1-12 # import re # from urllib.parse import quote # from html import _replace_charref # _expand_tab_re = re.compile(r'^( {0,3})\t', flags=re.M) # def expand_leading_tab(text: str, width=4): # def repl(m): # s = m.group(1) # return s + ' ' * (width - len(s)) # return _expand_tab_re.sub(repl, text) # def expand_tab(text: str, space: str=' '): # repl = r'\1' + space # return _expand_tab_re.sub(repl, text) # file path: Text-Processing/mistune/src/mistune/util.py # lines: 37-50 # _charref_re = re.compile( # r'&(#[0-9]{1,7};' # r'|#[xX][0-9a-fA-F]+;' # r'|[^\t\n\f <&#;]{1,32};)' # ) # def unescape(s: str): # """ # Copy from `html.unescape`, but `_charref` is different. CommonMark # does not accept entity references without a trailing semicolon # """ # if '&' not in s: # return s # return _charref_re.sub(_replace_charref, s) # _striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)') # file path: Text-Processing/mistune/src/mistune/toc.py from .util import striptags Based on the information above, please complete the function in the current file Text-Processing/mistune/src/mistune/toc.py: def add_toc_hook(md, min_level=1, max_level=3, heading_id=None): """Add a hook to save toc items into ``state.env``. This is usually helpful for doc generator:: import mistune from mistune.toc import add_toc_hook, render_toc_ul md = mistune.create_markdown(...) add_toc_hook(md) html, state = md.parse(text) toc_items = state.env['toc_items'] toc_html = render_toc_ul(toc_items) :param md: Markdown instance :param min_level: min heading level :param max_level: max heading level :param heading_id: a function to generate heading_id """
def add_toc_hook(md, min_level=1, max_level=3, heading_id=None): """Add a hook to save toc items into ``state.env``. This is usually helpful for doc generator:: import mistune from mistune.toc import add_toc_hook, render_toc_ul md = mistune.create_markdown(...) add_toc_hook(md) html, state = md.parse(text) toc_items = state.env['toc_items'] toc_html = render_toc_ul(toc_items) :param md: Markdown instance :param min_level: min heading level :param max_level: max heading level :param heading_id: a function to generate heading_id """
mistune.plugins.table.table_in_quote
function
Text-Processing/mistune
Text-Processing/mistune/src/mistune/plugins/table.py
[ 170, 170 ]
[ 172, 173 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param md: Markdown. The Markdown instance.\n:return: No return values.", "Functionality": "This function enables the table plugin in block quotes by inserting rules for table and nptable before the paragraph in the block quote rules." }
[ "tests/test_plugins.py::TestExtraPlugins::test_table_in_quote" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 41-50 # def unescape_char(text): # return _ESCAPE_CHAR_RE.sub(r'\1', text) # def parse_link_text(src, pos): # level = 1 # found = False # start_pos = pos # while pos < len(src): # m = _INLINE_SQUARE_BRACKET_RE.search(src, pos) # if not m: # break # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 26-40 # BLOCK_TAGS = ( # 'address', 'article', 'aside', 'base', 'basefont', 'blockquote', # 'body', 'caption', 'center', 'col', 'colgroup', 'dd', 'details', # 'dialog', 'dir', 'div', 'dl', 'dt', 'fieldset', 'figcaption', # 'figure', 'footer', 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', # 'h4', 'h5', 'h6', 'head', 'header', 'hr', 'html', 'iframe', # 'legend', 'li', 'link', 'main', 'menu', 'menuitem', 'meta', 'nav', # 'noframes', 'ol', 'optgroup', 'option', 'p', 'param', 'section', # 'source', 'summary', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', # 'title', 'tr', 'track', 'ul' # ) # PRE_TAGS = ('pre', 'script', 'style', 'textarea') # _INLINE_LINK_LABEL_RE = re.compile(LINK_LABEL + r'\]') # _INLINE_SQUARE_BRACKET_RE = re.compile(PREVENT_BACKSLASH + r'[\[\]]') # _ESCAPE_CHAR_RE = re.compile(r'\\(' + PUNCTUATION + r')') # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 89-99 # def parse_link_title(src, start_pos, max_pos): # m = LINK_TITLE_RE.match(src, start_pos, max_pos) # if m: # title = m.group(1)[1:-1] # title = unescape_char(title) # return title, m.end() # return None, None # def parse_link(src, pos): # href, href_pos = parse_link_href(src, pos) # if href is None: # return None, None # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 100-109 # title, title_pos = parse_link_title(src, href_pos, len(src)) # next_pos = title_pos or href_pos # m = PAREN_END_RE.match(src, next_pos) # if not m: # return None, None # href = unescape_char(href) # attrs = {'url': escape_url(href)} # if title: # attrs['title'] = title # return attrs, m.end() # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 14-25 # LINK_TITLE_RE = re.compile( # r'[ \t\n]+(' # r'"(?:\\' + PUNCTUATION + r'|[^"\x00])*"|' # "title" # r"'(?:\\" + PUNCTUATION + r"|[^'\x00])*'" # 'title' # r')' # ) # PAREN_END_RE = re.compile(r'\s*\)') # HTML_TAGNAME = r'[A-Za-z][A-Za-z0-9-]*' # HTML_ATTRIBUTES = ( # r'(?:\s+[A-Za-z_:][A-Za-z0-9_.:-]*' # r'(?:\s*=\s*(?:[^ !"\'=<>`]+|\'[^\']*?\'|"[^\"]*?"))?)*' # ) # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 1-13 # import re # import string # from .util import escape_url # PREVENT_BACKSLASH = r'(?<!\\)(?:\\\\)*' # PUNCTUATION = r'[' + re.escape(string.punctuation) + r']' # LINK_LABEL = r'(?:[^\\\[\]]|\\.){0,500}' # LINK_BRACKET_START = re.compile(r'[ \t]*\n?[ \t]*<') # LINK_BRACKET_RE = re.compile(r'<([^<>\n\\\x00]*)>') # LINK_HREF_BLOCK_RE = re.compile(r'[ \t]*\n?[ \t]*([^\s]+)(?:\s|$)') # LINK_HREF_INLINE_RE = re.compile( # r'[ \t]*\n?[ \t]*([^ \t\n]*?)(?:[ \t\n]|' # r'(?:' + PREVENT_BACKSLASH + r'\)))' # ) # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 64-77 # def parse_link_label(src, start_pos): # m = _INLINE_LINK_LABEL_RE.match(src, start_pos) # if m: # label = m.group(0)[:-1] # return label, m.end() # return None, None # def parse_link_href(src, start_pos, block=False): # m = LINK_BRACKET_START.match(src, start_pos) # if m: # start_pos = m.end() - 1 # m = LINK_BRACKET_RE.match(src, start_pos) # if m: # return m.group(1), m.end() # return None, None # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 78-88 # if block: # m = LINK_HREF_BLOCK_RE.match(src, start_pos) # else: # m = LINK_HREF_INLINE_RE.match(src, start_pos) # if not m: # return None, None # end_pos = m.end() # href = m.group(1) # if block and src[end_pos - 1] == href[-1]: # return href, end_pos # return href, end_pos - 1 # file path: Text-Processing/mistune/src/mistune/plugins/table.py import re from ..helpers import PREVENT_BACKSLASH # https://michelf.ca/projects/php-markdown/extra/#table __all__ = ['table', 'table_in_quote', 'table_in_list'] TABLE_PATTERN = ( r'^ {0,3}\|(?P<table_head>.+)\|[ \t]*\n' r' {0,3}\|(?P<table_align> *[-:]+[-| :]*)\|[ \t]*\n' r'(?P<table_body>(?: {0,3}\|.*\|[ \t]*(?:\n|$))*)\n*' ) NP_TABLE_PATTERN = ( r'^ {0,3}(?P<nptable_head>\S.*\|.*)\n' r' {0,3}(?P<nptable_align>[-:]+ *\|[-| :]*)\n' r'(?P<nptable_body>(?:.*\|.*(?:\n|$))*)\n*' ) TABLE_CELL = re.compile(r'^ {0,3}\|(.+)\|[ \t]*$') CELL_SPLIT = re.compile(r' *' + PREVENT_BACKSLASH + r'\| *') ALIGN_CENTER = re.compile(r'^ *:-+: *$') ALIGN_LEFT = re.compile(r'^ *:-+ *$') ALIGN_RIGHT = re.compile(r'^ *-+: *$') def parse_table(block, m, state): pos = m.end() header = m.group('table_head') align = m.group('table_align') thead, aligns = _process_thead(header, align) if not thead: return rows = [] body = m.group('table_body') for text in body.splitlines(): m = TABLE_CELL.match(text) if not m: # pragma: no cover return row = _process_row(m.group(1), aligns) if not row: return rows.append(row) children = [thead, {'type': 'table_body', 'children': rows}] state.append_token({'type': 'table', 'children': children}) return pos def parse_nptable(block, m, state): header = m.group('nptable_head') align = m.group('nptable_align') thead, aligns = _process_thead(header, align) if not thead: return rows = [] body = m.group('nptable_body') for text in body.splitlines(): row = _process_row(text, aligns) if not row: return rows.append(row) children = [thead, {'type': 'table_body', 'children': rows}] state.append_token({'type': 'table', 'children': children}) return m.end() def _process_thead(header, align): headers = CELL_SPLIT.split(header) aligns = CELL_SPLIT.split(align) if len(headers) != len(aligns): return None, None for i, v in enumerate(aligns): if ALIGN_CENTER.match(v): aligns[i] = 'center' elif ALIGN_LEFT.match(v): aligns[i] = 'left' elif ALIGN_RIGHT.match(v): aligns[i] = 'right' else: aligns[i] = None children = [ { 'type': 'table_cell', 'text': text.strip(), 'attrs': {'align': aligns[i], 'head': True} } for i, text in enumerate(headers) ] thead = {'type': 'table_head', 'children': children} return thead, aligns def _process_row(text, aligns): cells = CELL_SPLIT.split(text) if len(cells) != len(aligns): return None children = [ { 'type': 'table_cell', 'text': text.strip(), 'attrs': {'align': aligns[i], 'head': False} } for i, text in enumerate(cells) ] return {'type': 'table_row', 'children': children} def render_table(renderer, text): return '<table>\n' + text + '</table>\n' def render_table_head(renderer, text): return '<thead>\n<tr>\n' + text + '</tr>\n</thead>\n' def render_table_body(renderer, text): return '<tbody>\n' + text + '</tbody>\n' def render_table_row(renderer, text): return '<tr>\n' + text + '</tr>\n' def render_table_cell(renderer, text, align=None, head=False): if head: tag = 'th' else: tag = 'td' html = ' <' + tag if align: html += ' style="text-align:' + align + '"' return html + '>' + text + '</' + tag + '>\n' def table(md): """A mistune plugin to support table, spec defined at https://michelf.ca/projects/php-markdown/extra/#table Here is an example: .. code-block:: text First Header | Second Header ------------- | ------------- Content Cell | Content Cell Content Cell | Content Cell :param md: Markdown instance """ md.block.register('table', TABLE_PATTERN, parse_table, before='paragraph') md.block.register('nptable', NP_TABLE_PATTERN, parse_nptable, before='paragraph') if md.renderer and md.renderer.NAME == 'html': md.renderer.register('table', render_table) md.renderer.register('table_head', render_table_head) md.renderer.register('table_body', render_table_body) md.renderer.register('table_row', render_table_row) md.renderer.register('table_cell', render_table_cell) Based on the information above, please complete the function in the current file Text-Processing/mistune/src/mistune/plugins/table.py: def table_in_quote(md): """Enable table plugin in block quotes."""
def table_in_quote(md): """Enable table plugin in block quotes."""
mistune.plugins.table.table_in_list
function
Text-Processing/mistune
Text-Processing/mistune/src/mistune/plugins/table.py
[ 176, 176 ]
[ 178, 179 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param md: Markdown. The Markdown instance to enable the table plugin in the list.\n:return: No return values.", "Functionality": "This function enables the table plugin in the list. It inserts the table and nptable rules before the paragraph rule in the list." }
[ "tests/test_plugins.py::TestExtraPlugins::test_table_in_list" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 26-40 # BLOCK_TAGS = ( # 'address', 'article', 'aside', 'base', 'basefont', 'blockquote', # 'body', 'caption', 'center', 'col', 'colgroup', 'dd', 'details', # 'dialog', 'dir', 'div', 'dl', 'dt', 'fieldset', 'figcaption', # 'figure', 'footer', 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', # 'h4', 'h5', 'h6', 'head', 'header', 'hr', 'html', 'iframe', # 'legend', 'li', 'link', 'main', 'menu', 'menuitem', 'meta', 'nav', # 'noframes', 'ol', 'optgroup', 'option', 'p', 'param', 'section', # 'source', 'summary', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', # 'title', 'tr', 'track', 'ul' # ) # PRE_TAGS = ('pre', 'script', 'style', 'textarea') # _INLINE_LINK_LABEL_RE = re.compile(LINK_LABEL + r'\]') # _INLINE_SQUARE_BRACKET_RE = re.compile(PREVENT_BACKSLASH + r'[\[\]]') # _ESCAPE_CHAR_RE = re.compile(r'\\(' + PUNCTUATION + r')') # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 41-50 # def unescape_char(text): # return _ESCAPE_CHAR_RE.sub(r'\1', text) # def parse_link_text(src, pos): # level = 1 # found = False # start_pos = pos # while pos < len(src): # m = _INLINE_SQUARE_BRACKET_RE.search(src, pos) # if not m: # break # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 89-99 # def parse_link_title(src, start_pos, max_pos): # m = LINK_TITLE_RE.match(src, start_pos, max_pos) # if m: # title = m.group(1)[1:-1] # title = unescape_char(title) # return title, m.end() # return None, None # def parse_link(src, pos): # href, href_pos = parse_link_href(src, pos) # if href is None: # return None, None # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 14-25 # LINK_TITLE_RE = re.compile( # r'[ \t\n]+(' # r'"(?:\\' + PUNCTUATION + r'|[^"\x00])*"|' # "title" # r"'(?:\\" + PUNCTUATION + r"|[^'\x00])*'" # 'title' # r')' # ) # PAREN_END_RE = re.compile(r'\s*\)') # HTML_TAGNAME = r'[A-Za-z][A-Za-z0-9-]*' # HTML_ATTRIBUTES = ( # r'(?:\s+[A-Za-z_:][A-Za-z0-9_.:-]*' # r'(?:\s*=\s*(?:[^ !"\'=<>`]+|\'[^\']*?\'|"[^\"]*?"))?)*' # ) # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 100-109 # title, title_pos = parse_link_title(src, href_pos, len(src)) # next_pos = title_pos or href_pos # m = PAREN_END_RE.match(src, next_pos) # if not m: # return None, None # href = unescape_char(href) # attrs = {'url': escape_url(href)} # if title: # attrs['title'] = title # return attrs, m.end() # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 51-63 # pos = m.end() # marker = m.group(0) # if marker == ']': # level -= 1 # if level == 0: # found = True # break # else: # level += 1 # if found: # text = src[start_pos:pos-1] # return text, pos # return None, None # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 1-13 # import re # import string # from .util import escape_url # PREVENT_BACKSLASH = r'(?<!\\)(?:\\\\)*' # PUNCTUATION = r'[' + re.escape(string.punctuation) + r']' # LINK_LABEL = r'(?:[^\\\[\]]|\\.){0,500}' # LINK_BRACKET_START = re.compile(r'[ \t]*\n?[ \t]*<') # LINK_BRACKET_RE = re.compile(r'<([^<>\n\\\x00]*)>') # LINK_HREF_BLOCK_RE = re.compile(r'[ \t]*\n?[ \t]*([^\s]+)(?:\s|$)') # LINK_HREF_INLINE_RE = re.compile( # r'[ \t]*\n?[ \t]*([^ \t\n]*?)(?:[ \t\n]|' # r'(?:' + PREVENT_BACKSLASH + r'\)))' # ) # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 64-77 # def parse_link_label(src, start_pos): # m = _INLINE_LINK_LABEL_RE.match(src, start_pos) # if m: # label = m.group(0)[:-1] # return label, m.end() # return None, None # def parse_link_href(src, start_pos, block=False): # m = LINK_BRACKET_START.match(src, start_pos) # if m: # start_pos = m.end() - 1 # m = LINK_BRACKET_RE.match(src, start_pos) # if m: # return m.group(1), m.end() # return None, None # file path: Text-Processing/mistune/src/mistune/helpers.py # lines: 78-88 # if block: # m = LINK_HREF_BLOCK_RE.match(src, start_pos) # else: # m = LINK_HREF_INLINE_RE.match(src, start_pos) # if not m: # return None, None # end_pos = m.end() # href = m.group(1) # if block and src[end_pos - 1] == href[-1]: # return href, end_pos # return href, end_pos - 1 # file path: Text-Processing/mistune/src/mistune/plugins/table.py import re from ..helpers import PREVENT_BACKSLASH # https://michelf.ca/projects/php-markdown/extra/#table __all__ = ['table', 'table_in_quote', 'table_in_list'] TABLE_PATTERN = ( r'^ {0,3}\|(?P<table_head>.+)\|[ \t]*\n' r' {0,3}\|(?P<table_align> *[-:]+[-| :]*)\|[ \t]*\n' r'(?P<table_body>(?: {0,3}\|.*\|[ \t]*(?:\n|$))*)\n*' ) NP_TABLE_PATTERN = ( r'^ {0,3}(?P<nptable_head>\S.*\|.*)\n' r' {0,3}(?P<nptable_align>[-:]+ *\|[-| :]*)\n' r'(?P<nptable_body>(?:.*\|.*(?:\n|$))*)\n*' ) TABLE_CELL = re.compile(r'^ {0,3}\|(.+)\|[ \t]*$') CELL_SPLIT = re.compile(r' *' + PREVENT_BACKSLASH + r'\| *') ALIGN_CENTER = re.compile(r'^ *:-+: *$') ALIGN_LEFT = re.compile(r'^ *:-+ *$') ALIGN_RIGHT = re.compile(r'^ *-+: *$') def parse_table(block, m, state): pos = m.end() header = m.group('table_head') align = m.group('table_align') thead, aligns = _process_thead(header, align) if not thead: return rows = [] body = m.group('table_body') for text in body.splitlines(): m = TABLE_CELL.match(text) if not m: # pragma: no cover return row = _process_row(m.group(1), aligns) if not row: return rows.append(row) children = [thead, {'type': 'table_body', 'children': rows}] state.append_token({'type': 'table', 'children': children}) return pos def parse_nptable(block, m, state): header = m.group('nptable_head') align = m.group('nptable_align') thead, aligns = _process_thead(header, align) if not thead: return rows = [] body = m.group('nptable_body') for text in body.splitlines(): row = _process_row(text, aligns) if not row: return rows.append(row) children = [thead, {'type': 'table_body', 'children': rows}] state.append_token({'type': 'table', 'children': children}) return m.end() def _process_thead(header, align): headers = CELL_SPLIT.split(header) aligns = CELL_SPLIT.split(align) if len(headers) != len(aligns): return None, None for i, v in enumerate(aligns): if ALIGN_CENTER.match(v): aligns[i] = 'center' elif ALIGN_LEFT.match(v): aligns[i] = 'left' elif ALIGN_RIGHT.match(v): aligns[i] = 'right' else: aligns[i] = None children = [ { 'type': 'table_cell', 'text': text.strip(), 'attrs': {'align': aligns[i], 'head': True} } for i, text in enumerate(headers) ] thead = {'type': 'table_head', 'children': children} return thead, aligns def _process_row(text, aligns): cells = CELL_SPLIT.split(text) if len(cells) != len(aligns): return None children = [ { 'type': 'table_cell', 'text': text.strip(), 'attrs': {'align': aligns[i], 'head': False} } for i, text in enumerate(cells) ] return {'type': 'table_row', 'children': children} def render_table(renderer, text): return '<table>\n' + text + '</table>\n' def render_table_head(renderer, text): return '<thead>\n<tr>\n' + text + '</tr>\n</thead>\n' def render_table_body(renderer, text): return '<tbody>\n' + text + '</tbody>\n' def render_table_row(renderer, text): return '<tr>\n' + text + '</tr>\n' def render_table_cell(renderer, text, align=None, head=False): if head: tag = 'th' else: tag = 'td' html = ' <' + tag if align: html += ' style="text-align:' + align + '"' return html + '>' + text + '</' + tag + '>\n' def table(md): """A mistune plugin to support table, spec defined at https://michelf.ca/projects/php-markdown/extra/#table Here is an example: .. code-block:: text First Header | Second Header ------------- | ------------- Content Cell | Content Cell Content Cell | Content Cell :param md: Markdown instance """ md.block.register('table', TABLE_PATTERN, parse_table, before='paragraph') md.block.register('nptable', NP_TABLE_PATTERN, parse_nptable, before='paragraph') if md.renderer and md.renderer.NAME == 'html': md.renderer.register('table', render_table) md.renderer.register('table_head', render_table_head) md.renderer.register('table_body', render_table_body) md.renderer.register('table_row', render_table_row) md.renderer.register('table_cell', render_table_cell) def table_in_quote(md): """Enable table plugin in block quotes.""" md.block.insert_rule(md.block.block_quote_rules, 'table', before='paragraph') md.block.insert_rule(md.block.block_quote_rules, 'nptable', before='paragraph') Based on the information above, please complete the function in the current file Text-Processing/mistune/src/mistune/plugins/table.py: def table_in_list(md): """Enable table plugin in list."""
def table_in_list(md): """Enable table plugin in list."""
xmnlp.utils.parallel_handler
function
Text-Processing/xmnlp
Text-Processing/xmnlp/xmnlp/utils/__init__.py
[ 90, 92 ]
[ 101, 107 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param callback: Callable. The callback function to be applied to the list of texts.\n:param texts: List[str]. The list of texts to be processed.\n:param n_jobs: int. The pool size of threads. Defaults to 2.\n:param kwargs: Any additional keyword arguments to be passed to the callback function.\n:return: Generator[List[Any], None, None]. A generator that yields the results of applying the callback function to the texts in parallel.", "Functionality": "This function is a parallel handler that takes a callback function and a list of texts as input. It then processes the texts using the callback function in parallel using a thread pool executor. If the input `texts` is not a list, raise a ValueError(\"You should pass a list of texts\")." }
[ "tests/test_xmnlp.py::test_radical_parallel", "tests/test_xmnlp.py::test_pinyin_parallel" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/PyLaTeX/pylatex/lists.py # lines: 1-15 # # -*- coding: utf-8 -*- # """ # This module implements the classes that deal with LaTeX lists. # These lists are specifically enumerate, itemize and description. # .. :copyright: (c) 2015 by Sean McLemon. # :license: MIT, see License for more details. # """ # from pylatex.utils import NoEscape # from .base_classes import Command, Environment, Options # from .package import Package # class List(Environment): # """A base class that represents a list.""" # #: List environments cause compile errors when they do not contain items. # #: This is why they are omitted fully if they are empty. # omit_if_empty = True # file path: Text-Processing/xmnlp/xmnlp/utils/__init__.py # -*- coding: utf-8 -*- # -------------------------------------------# # author: sean lee # # email: xmlee97@gmail.com # # -------------------------------------------# import os import re import concurrent.futures as futures from functools import partial from typing import Any, Callable, List, Generator import numpy as np re_line_skip = re.compile('[\r\n]') re_delimiter = re.compile('[,。?!;]') def split_text(doc: str) -> List[str]: sents = [] for line in re_line_skip.split(doc): line = line.strip() if not line: continue for sent in re_delimiter.split(line): sent = sent.strip() if not sent: continue sents.append(sent) return sents def filelist(path: str) -> Generator[str, None, None]: if os.path.isdir(path): for root, dirs, files in os.walk(path): if not dirs: for f in files: yield os.sep.join([root, f]) else: yield path def load_stopword(fpath: str) -> List[str]: """load stopwords from file """ stopwords = set() for fname in filelist(fpath): with open(fname, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if not line: continue stopwords.add(line) return stopwords def rematch(offsets): """ rematch bert token """ mapping = [] for offset in offsets: if offset[0] == 0 and offset[1] == 0: mapping.append([]) else: mapping.append([i for i in range(offset[0], offset[1])]) return mapping def topK(matrix, K, axis=1): """ numpy topK """ if axis == 0: row_index = np.arange(matrix.shape[1 - axis]) topk_index = np.argpartition(-matrix, K, axis=axis)[0:K, :] topk_data = matrix[topk_index, row_index] topk_index_sort = np.argsort(-topk_data, axis=axis) topk_data_sort = topk_data[topk_index_sort, row_index] topk_index_sort = topk_index[0:K, :][topk_index_sort, row_index] else: column_index = np.arange(matrix.shape[1 - axis])[:, None] topk_index = np.argpartition(-matrix, K, axis=axis)[:, 0:K] topk_data = matrix[column_index, topk_index] topk_index_sort = np.argsort(-topk_data, axis=axis) topk_data_sort = topk_data[column_index, topk_index_sort] topk_index_sort = topk_index[:, 0:K][column_index, topk_index_sort] return topk_data_sort, topk_index_sort Based on the information above, please complete the function in the current file Text-Processing/xmnlp/xmnlp/utils/__init__.py: def parallel_handler(callback: Callable, texts: List[str], n_jobs: int = 2, **kwargs) -> Generator[ List[Any], None, None ]: """parallel handler Args: callback: callback function texts: List[str] n_jobs: int, pool size of threads Return: Generator[List[str]] """
def parallel_handler(callback: Callable, texts: List[str], n_jobs: int = 2, **kwargs) -> Generator[ List[Any], None, None ]: """parallel handler Args: callback: callback function texts: List[str] n_jobs: int, pool size of threads Return: Generator[List[str]] """
parsel.utils.shorten
function
Text-Processing/parsel
Text-Processing/parsel/parsel/utils.py
[ 87, 87 ]
[ 89, 95 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param text: String. The input text to be shortened.\n:param width: Integer. The width to which the text should be shortened.\n:param suffix: String. The suffix to be added at the end of the shortened text. Defaults to \"...\".\n:return: String. The shortened text.", "Functionality": "Shorten the given text to fit in the given width. If the length of the text is less than or equal to the width, the original text is returned. If the width is greater than the length of the suffix, the text is truncated to fit the width and the suffix is added. If the width is greater than or equal to 0, the suffix is returned based on the width. Otherwise, a ValueError(\"width must be equal or greater than 0\") is raised." }
[ "tests/test_utils.py::test_shorten" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/natasha/natasha/extractors.py # lines: 51-59 # def find(self, text): # matches = list(self(text)) # if not matches: # return # matches = sorted(matches, key=lambda _: _.start) # start = matches[0].start # stop = matches[-1].stop # parts = [_.fact for _ in matches] # return Match(start, stop, obj.Addr(parts)) # file path: Text-Processing/PyLaTeX/pylatex/lists.py # lines: 27-40 # def __init__(self, enumeration_symbol=None, *, options=None, **kwargs): # r""" # Args # ---- # enumeration_symbol: str # The enumeration symbol to use, see the `enumitem # <https://www.ctan.org/pkg/enumitem>`_ documentation to see what # can be used here. This argument is not escaped as it usually # should usually contain commands, so do not use user input here. # options: str or list or `.Options` # Custom options to be added to the enumerate list. These options are # merged with the options created by ``enumeration_symbol``. # """ # self._enumeration_symbol = enumeration_symbol # file path: Text-Processing/parsel/parsel/utils.py import re from typing import Any, Iterable, Iterator, List, Match, Pattern, Union, cast from w3lib.html import replace_entities as w3lib_replace_entities def flatten(x: Iterable[Any]) -> List[Any]: """flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). Examples: >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10] >>> flatten(["foo", "bar"]) ['foo', 'bar'] >>> flatten(["foo", ["baz", 42], "bar"]) ['foo', 'baz', 42, 'bar'] """ return list(iflatten(x)) def iflatten(x: Iterable[Any]) -> Iterator[Any]: """iflatten(sequence) -> Iterator Similar to ``.flatten()``, but returns iterator instead""" for el in x: if _is_listlike(el): yield from flatten(el) else: yield el def _is_listlike(x: Any) -> bool: """ >>> _is_listlike("foo") False >>> _is_listlike(5) False >>> _is_listlike(b"foo") False >>> _is_listlike([b"foo"]) True >>> _is_listlike((b"foo",)) True >>> _is_listlike({}) True >>> _is_listlike(set()) True >>> _is_listlike((x for x in range(3))) True >>> _is_listlike(range(5)) True """ return hasattr(x, "__iter__") and not isinstance(x, (str, bytes)) def extract_regex( regex: Union[str, Pattern[str]], text: str, replace_entities: bool = True ) -> List[str]: """Extract a list of strings from the given text/encoding using the following policies: * if the regex contains a named group called "extract" that will be returned * if the regex contains multiple numbered groups, all those will be returned (flattened) * if the regex doesn't contain any group the entire regex matching is returned """ if isinstance(regex, str): regex = re.compile(regex, re.UNICODE) if "extract" in regex.groupindex: # named group try: extracted = cast(Match[str], regex.search(text)).group("extract") except AttributeError: strings = [] else: strings = [extracted] if extracted is not None else [] else: # full regex or numbered groups strings = regex.findall(text) strings = flatten(strings) if not replace_entities: return strings return [w3lib_replace_entities(s, keep=["lt", "amp"]) for s in strings] Based on the information above, please complete the function in the current file Text-Processing/parsel/parsel/utils.py: def shorten(text: str, width: int, suffix: str = "...") -> str: """Truncate the given text to fit in the given width."""
def shorten(text: str, width: int, suffix: str = "...") -> str: """Truncate the given text to fit in the given width."""
parsel.xpathfuncs.set_xpathfunc
function
Text-Processing/parsel
Text-Processing/parsel/parsel/xpathfuncs.py
[ 13, 13 ]
[ 27, 31 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param fname: String. The identifier under which the function will be registered.\n:param func: Callable. The function to be registered. If None, the extension function will be removed.\n:return: No return values.", "Functionality": "This function registers a custom extension function to use in XPath expressions. The function registered under the fname identifier will be called for every matching node, being passed a context parameter as well as any parameters passed from the corresponding XPath expression." }
[ "tests/test_xpathfuncs.py::XPathFuncsTestCase::test_set_xpathfunc" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/parsel/parsel/xpathfuncs.py import re from typing import Any, Callable, Optional from lxml import etree from w3lib.html import HTML5_WHITESPACE regex = f"[{HTML5_WHITESPACE}]+" replace_html5_whitespaces = re.compile(regex).sub Based on the information above, please complete the function in the current file Text-Processing/parsel/parsel/xpathfuncs.py: def set_xpathfunc(fname: str, func: Optional[Callable]) -> None: # type: ignore[type-arg] """Register a custom extension function to use in XPath expressions. The function ``func`` registered under ``fname`` identifier will be called for every matching node, being passed a ``context`` parameter as well as any parameters passed from the corresponding XPath expression. If ``func`` is ``None``, the extension function will be removed. See more `in lxml documentation`_. .. _`in lxml documentation`: https://lxml.de/extensions.html#xpath-extension-functions """
def set_xpathfunc(fname: str, func: Optional[Callable]) -> None: # type: ignore[type-arg] """Register a custom extension function to use in XPath expressions. The function ``func`` registered under ``fname`` identifier will be called for every matching node, being passed a ``context`` parameter as well as any parameters passed from the corresponding XPath expression. If ``func`` is ``None``, the extension function will be removed. See more `in lxml documentation`_. .. _`in lxml documentation`: https://lxml.de/extensions.html#xpath-extension-functions """
dominate.dom_tag._get_thread_context
function
Text-Processing/dominate
Text-Processing/dominate/dominate/dom_tag.py
[ 47, 47 ]
[ 48, 51 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param: No input parameters.\n:return: Integer. The hash value of the current thread context.", "Functionality": "This function returns the hash value of the current thread context. It first creates a list of the current thread and greenlet (if available) and then returns the hash value of the tuple of the context list." }
[ "tests/test_dom_tag.py::test___get_thread_context" ]
2
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/dominate/dominate/util.py # lines: 117-130 # def __init__(self, func, *args, **kwargs): # super(lazy, self).__init__() # self.func = func # self.args = args # self.kwargs = kwargs # def _render(self, sb, *a, **kw): # r = self.func(*self.args, **self.kwargs) # sb.append(str(r)) # class text(dom_tag): # ''' # Just a string. Useful for inside context managers # ''' # is_pretty = False # is_inline = True # file path: Text-Processing/dominate/dominate/util.py # lines: 105-116 # class lazy(dom_tag): # ''' # delays function execution until rendered # ''' # def __new__(_cls, *args, **kwargs): # ''' # Need to reset this special method or else # dom_tag will think it's being used as a dectorator. # This means lazy() can't be used as a dectorator, but # thinking about when you might want that just confuses me. # ''' # return object.__new__(_cls) # file path: Text-Processing/dominate/dominate/document.py # lines: 15-21 # from . import tags # from . import util # try: # basestring = basestring # except NameError: # py3 # basestring = str # unicode = str # file path: Text-Processing/dominate/dominate/util.py # lines: 131-145 # def __init__(self, _text, escape=True): # super(text, self).__init__() # self.escape = escape # if escape: # self.text = str_escape(_text) # else: # self.text = _text # def _render(self, sb, *a, **kw): # sb.append(self.text) # return sb # def raw(s): # ''' # Inserts a raw string into the DOM. Unsafe. Alias for text(x, escape=False) # ''' # return text(s, escape=False) # file path: Text-Processing/dominate/dominate/document.py # lines: 37-48 # self.footer = util.container() # self._entry = self.main # def get_title(self): # return self.title_node.text # def set_title(self, title): # if isinstance(title, basestring): # self.title_node.text = title # else: # self.head.remove(self.title_node) # self.head.add(title) # self.title_node = title # title = property(get_title, set_title) # file path: Text-Processing/dominate/dominate/dom_tag.py __license__ = ''' This file is part of Dominate. Dominate is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Dominate is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with Dominate. If not, see <http://www.gnu.org/licenses/>. ''' # pylint: disable=bad-indentation, bad-whitespace, missing-docstring import copy import numbers from collections import defaultdict, namedtuple from functools import wraps import threading try: # Python 3 from collections.abc import Callable except ImportError: # pragma: no cover # Python 2.7 from collections import Callable try: basestring = basestring except NameError: # py3 # pragma: no cover basestring = str unicode = str try: import greenlet except ImportError: greenlet = None Based on the information above, please complete the function in the current file Text-Processing/dominate/dominate/dom_tag.py: def _get_thread_context():
def _get_thread_context():
dominate.util.system
function
Text-Processing/dominate
Text-Processing/dominate/dominate/util.py
[ 45, 45 ]
[ 49, 52 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param cmd: String. The system command to be executed.\n:param data: Bytes. Optional input data to be passed to the command.\n:return: String. The output of the system command as a decoded string.", "Functionality": "This function runs a system command and returns the output as a string. It uses the subprocess module to run the command and capture the output." }
[ "tests/test_utils.py::test_system" ]
2
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 41-55 # class dom_tag(object): # is_single = False # Tag does not require matching end tag (ex. <hr/>) # is_pretty = True # Text inside the tag should be left as-is (ex. <pre>) # # otherwise, text will be escaped() and whitespace may be # # modified # is_inline = False # def __new__(_cls, *args, **kwargs): # ''' # Check if bare tag is being used a a decorator # (called with a single function arg). # decorate the function and return # ''' # if len(args) == 1 and isinstance(args[0], Callable) \ # and not isinstance(args[0], dom_tag) and not kwargs: # wrapped = args[0] # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 261-275 # # open tag # sb.append('<') # sb.append(name) # for attribute, value in sorted(self.attributes.items()): # if value in (False, None): # continue # val = unicode(value) if isinstance(value, util.text) and not value.escape else util.escape(unicode(value), True) # sb.append(' %s="%s"' % (attribute, val)) # sb.append(' />' if self.is_single and xhtml else '>') # if self.is_single: # return sb # inline = self._render_children(sb, indent_level + 1, indent_str, pretty, xhtml) # if pretty and not inline: # sb.append('\n') # sb.append(indent_str * indent_level) # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 147-161 # if isinstance(obj, basestring): # obj = util.escape(obj) # self.children.append(obj) # elif isinstance(obj, dom_tag): # stack = dom_tag._with_contexts.get(_get_thread_context(), []) # for s in stack: # s.used.add(obj) # self.children.append(obj) # obj.parent = self # elif isinstance(obj, dict): # for attr, value in obj.items(): # self.set_attribute(*dom_tag.clean_pair(attr, value)) # elif hasattr(obj, '__iter__'): # for subobj in obj: # self.add(subobj) # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 276-280 # # close tag # sb.append('</') # sb.append(name) # sb.append('>') # return sb # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 247-260 # # String and unicode representations are the same as render() # def __unicode__(self): # return self.render() # __str__ = __unicode__ # def render(self, indent=' ', pretty=True, xhtml=False): # data = self._render([], 0, indent, pretty, xhtml) # return u''.join(data) # def _render(self, sb, indent_level, indent_str, pretty, xhtml): # pretty = pretty and self.is_pretty # name = getattr(self, 'tagname', type(self).__name__) # # Workaround for python keywords and standard classes/methods # # (del, object, input) # if name[-1] == '_': # name = name[:-1] # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 322-334 # # Workaround for dash # special_prefix = any([attribute.startswith(x) for x in ('data_', 'aria_')]) # if attribute in set(['http_equiv']) or special_prefix: # attribute = attribute.replace('_', '-').lower() # # Workaround for colon # if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'): # attribute = attribute.replace('_', ':', 1).lower() # return attribute # @classmethod # def clean_pair(cls, attribute, value): # ''' # This will call `clean_attribute` on the attribute and also allows for the # creation of boolean attributes. # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 308-321 # # Shorthand # attribute = { # 'cls': 'class', # 'className': 'class', # 'class_name': 'class', # 'klass': 'class', # 'fr': 'for', # 'html_for': 'for', # 'htmlFor': 'for', # 'phor': 'for', # }.get(attribute, attribute) # # Workaround for Python's reserved words # if attribute[0] == '_': # attribute = attribute[1:] # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 27-40 # try: # basestring = basestring # except NameError: # py3 # pragma: no cover # basestring = str # unicode = str # try: # import greenlet # except ImportError: # greenlet = None # def _get_thread_context(): # context = [threading.current_thread()] # if greenlet: # context.append(greenlet.getcurrent()) # return hash(tuple(context)) # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 176-188 # def get(self, tag=None, **kwargs): # ''' # Recursively searches children for tags of a certain # type with matching attributes. # ''' # # Stupid workaround since we can not use dom_tag in the method declaration # if tag is None: tag = dom_tag # attrs = [(dom_tag.clean_attribute(attr), value) # for attr, value in kwargs.items()] # results = [] # for child in self.children: # if (isinstance(tag, basestring) and type(child).__name__ == tag) or \ # (not isinstance(tag, basestring) and isinstance(child, tag)): # file path: Text-Processing/dominate/dominate/document.py # lines: 22-36 # class document(tags.html): # tagname = 'html' # def __init__(self, title='Dominate', doctype='<!DOCTYPE html>', *a, **kw): # ''' # Creates a new document instance. Accepts `title` and `doctype` # ''' # super(document, self).__init__(*a, **kw) # self.doctype = doctype # self.head = super(document, self).add(tags.head()) # self.body = super(document, self).add(tags.body()) # if title is not None: # self.title_node = self.head.add(tags.title(title)) # with self.body: # self.header = util.container() # self.main = util.container() # file path: Text-Processing/dominate/dominate/util.py ''' Utility classes for creating dynamic html documents ''' __license__ = ''' This file is part of Dominate. Dominate is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Dominate is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with Dominate. If not, see <http://www.gnu.org/licenses/>. ''' import re from .dom_tag import dom_tag try: basestring = basestring except NameError: basestring = str unichr = chr def include(f): ''' includes the contents of a file on disk. takes a filename ''' fl = open(f, 'r') data = fl.read() fl.close() return raw(data) Based on the information above, please complete the function in the current file Text-Processing/dominate/dominate/util.py: def system(cmd, data=None): ''' pipes the output of a program '''
def system(cmd, data=None): ''' pipes the output of a program '''
dominate.util.url_unescape
function
Text-Processing/dominate
Text-Processing/dominate/dominate/util.py
[ 118, 118 ]
[ 119, 120 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param data: String. The URL-encoded string to be unescaped.\n:return: String. The unescaped string.", "Functionality": "This function takes a string as input and unescapes any URL-encoded characters in the string." }
[ "tests/test_utils.py::test_url" ]
2
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 147-161 # if isinstance(obj, basestring): # obj = util.escape(obj) # self.children.append(obj) # elif isinstance(obj, dom_tag): # stack = dom_tag._with_contexts.get(_get_thread_context(), []) # for s in stack: # s.used.add(obj) # self.children.append(obj) # obj.parent = self # elif isinstance(obj, dict): # for attr, value in obj.items(): # self.set_attribute(*dom_tag.clean_pair(attr, value)) # elif hasattr(obj, '__iter__'): # for subobj in obj: # self.add(subobj) # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 247-260 # # String and unicode representations are the same as render() # def __unicode__(self): # return self.render() # __str__ = __unicode__ # def render(self, indent=' ', pretty=True, xhtml=False): # data = self._render([], 0, indent, pretty, xhtml) # return u''.join(data) # def _render(self, sb, indent_level, indent_str, pretty, xhtml): # pretty = pretty and self.is_pretty # name = getattr(self, 'tagname', type(self).__name__) # # Workaround for python keywords and standard classes/methods # # (del, object, input) # if name[-1] == '_': # name = name[:-1] # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 261-275 # # open tag # sb.append('<') # sb.append(name) # for attribute, value in sorted(self.attributes.items()): # if value in (False, None): # continue # val = unicode(value) if isinstance(value, util.text) and not value.escape else util.escape(unicode(value), True) # sb.append(' %s="%s"' % (attribute, val)) # sb.append(' />' if self.is_single and xhtml else '>') # if self.is_single: # return sb # inline = self._render_children(sb, indent_level + 1, indent_str, pretty, xhtml) # if pretty and not inline: # sb.append('\n') # sb.append(indent_str * indent_level) # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 322-334 # # Workaround for dash # special_prefix = any([attribute.startswith(x) for x in ('data_', 'aria_')]) # if attribute in set(['http_equiv']) or special_prefix: # attribute = attribute.replace('_', '-').lower() # # Workaround for colon # if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'): # attribute = attribute.replace('_', ':', 1).lower() # return attribute # @classmethod # def clean_pair(cls, attribute, value): # ''' # This will call `clean_attribute` on the attribute and also allows for the # creation of boolean attributes. # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 41-55 # class dom_tag(object): # is_single = False # Tag does not require matching end tag (ex. <hr/>) # is_pretty = True # Text inside the tag should be left as-is (ex. <pre>) # # otherwise, text will be escaped() and whitespace may be # # modified # is_inline = False # def __new__(_cls, *args, **kwargs): # ''' # Check if bare tag is being used a a decorator # (called with a single function arg). # decorate the function and return # ''' # if len(args) == 1 and isinstance(args[0], Callable) \ # and not isinstance(args[0], dom_tag) and not kwargs: # wrapped = args[0] # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 308-321 # # Shorthand # attribute = { # 'cls': 'class', # 'className': 'class', # 'class_name': 'class', # 'klass': 'class', # 'fr': 'for', # 'html_for': 'for', # 'htmlFor': 'for', # 'phor': 'for', # }.get(attribute, attribute) # # Workaround for Python's reserved words # if attribute[0] == '_': # attribute = attribute[1:] # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 335-343 # Ex. input(selected=True) is equivalent to input(selected="selected") # ''' # attribute = cls.clean_attribute(attribute) # # Check for boolean attributes # # (i.e. selected=True becomes selected="selected") # if value is True: # value = attribute # # Ignore `if value is False`: this is filtered out in render() # return (attribute, value) # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 133-146 # def delete_attribute(self, key): # if isinstance(key, int): # del self.children[key:key+1] # else: # del self.attributes[key] # __delitem__ = delete_attribute # def add(self, *args): # ''' # Add new child tags. # ''' # for obj in args: # if isinstance(obj, numbers.Number): # # Convert to string so we fall into next if block # obj = str(obj) # file path: Text-Processing/dominate/dominate/dom_tag.py # lines: 162-175 # else: # wtf is it? # raise ValueError('%r not a tag or string.' % obj) # if len(args) == 1: # return args[0] # return args # def add_raw_string(self, s): # self.children.append(s) # def remove(self, obj): # self.children.remove(obj) # def clear(self): # for i in self.children: # if isinstance(i, dom_tag) and i.parent is self: # i.parent = None # self.children = [] # file path: Text-Processing/dominate/dominate/document.py # lines: 15-21 # from . import tags # from . import util # try: # basestring = basestring # except NameError: # py3 # basestring = str # unicode = str # file path: Text-Processing/dominate/dominate/util.py ''' Utility classes for creating dynamic html documents ''' __license__ = ''' This file is part of Dominate. Dominate is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Dominate is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with Dominate. If not, see <http://www.gnu.org/licenses/>. ''' import re from .dom_tag import dom_tag try: basestring = basestring except NameError: basestring = str unichr = chr def include(f): ''' includes the contents of a file on disk. takes a filename ''' fl = open(f, 'r') data = fl.read() fl.close() return raw(data) def system(cmd, data=None): ''' pipes the output of a program ''' import subprocess s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE) out, err = s.communicate(data) return out.decode('utf8') def escape(data, quote=True): # stolen from std lib cgi ''' Escapes special characters into their html entities Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag quote is true, the quotation mark character (") is also translated. This is used to escape content that appears in the body of an HTML document ''' data = data.replace("&", "&amp;") # Must be done first! data = data.replace("<", "&lt;") data = data.replace(">", "&gt;") if quote: data = data.replace('"', "&quot;") return data _unescape = { 'quot': 34, 'amp': 38, 'lt': 60, 'gt': 62, 'nbsp': 32, # more here # http://www.w3.org/TR/html4/sgml/entities.html 'yuml': 255, } str_escape = escape def unescape(data): ''' unescapes html entities. the opposite of escape. ''' cc = re.compile(r'&(?:(?:#(\d+))|([^;]+));') result = [] m = cc.search(data) while m: result.append(data[0:m.start()]) d = m.group(1) if d: d = int(d) result.append(unichr(d)) else: d = _unescape.get(m.group(2), ord('?')) result.append(unichr(d)) data = data[m.end():] m = cc.search(data) result.append(data) return ''.join(result) _reserved = ";/?:@&=+$, " _replace_map = dict((c, '%%%2X' % ord(c)) for c in _reserved) def url_escape(data): return ''.join(_replace_map.get(c, c) for c in data) Based on the information above, please complete the function in the current file Text-Processing/dominate/dominate/util.py: def url_unescape(data):
def url_unescape(data):
rows.fields.DatetimeField.serialize
method
Text-Processing/rows
Text-Processing/rows/rows/fields.py
[ 390, 390 ]
[ 391, 394 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param cls: Class. The class instance.\n:param value: Datetime. The datetime value to be serialized.\n:param *args: Additional positional arguments.\n:param **kwargs: Additional keyword arguments.\n:return: String. The serialized datetime value in ISO 8601 format.", "Functionality": "Serialize the given datetime value into a string in ISO 8601 format." }
[ "tests/tests_fields.py::FieldsTestCase::test_DatetimeField" ]
8
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/natasha/natasha/norm.py # lines: 29-43 # def feats_match(a, b): # return ( # a.get(GENDER) == b.get(GENDER) # and a.get(NUMBER) == b.get(NUMBER) # and a.get(CASE) == b.get(CASE) # ) # def form_match(form, pos, feats): # return pos_match(form.pos, pos) and feats_match(form.feats, feats) # def select_form(forms, pos, feats): # for form in forms: # if form_match(form, pos, feats): # return form # def normal_word(word): # word = word.lower() # return word.replace('ё', 'е') # file path: Text-Processing/natasha/natasha/norm.py # lines: 83-91 # def syntax_normalize(vocab, tokens): # ids = set(select_inflectable(tokens)) # words = inflect_words(vocab, tokens, ids) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # def normalize(vocab, tokens): # words = inflect_words(vocab, tokens) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # file path: Text-Processing/natasha/natasha/norm.py # lines: 1-13 # from collections import defaultdict, deque # from .shape import recover_shape # PROPN = 'PROPN' # NOUN = 'NOUN' # ADJ = 'ADJ' # VERB = 'VERB' # GENDER = 'Gender' # NUMBER = 'Number' # CASE = 'Case' # NOM = 'Nom' # def recover_shapes(words, tokens): # for word, token in zip(words, tokens): # yield recover_shape(word, token.text) # file path: Text-Processing/natasha/natasha/norm.py # lines: 44-57 # def inflect_word(vocab, token): # word, pos, feats = token.text, token.pos, token.feats # word = normal_word(word) # if pos not in (PROPN, NOUN, ADJ, VERB): # return word # if feats.get(CASE) == NOM: # return word # forms = vocab(word) # form = select_form(forms, pos, feats) # if form: # form = form.inflect({NOM}) # if form: # return normal_word(form.word) # return word # file path: Text-Processing/natasha/natasha/norm.py # lines: 14-28 # def recover_spaces(words, tokens): # offset = None # parts = [] # for index, (word, token) in enumerate(zip(words, tokens)): # if index > 0: # parts.append(' ' * (token.start - offset)) # parts.append(word) # offset = token.stop # return ''.join(parts) # def normal_pos(pos): # if pos == PROPN: # pos = NOUN # return pos # def pos_match(a, b): # return normal_pos(a) == normal_pos(b) # file path: Text-Processing/natasha/natasha/norm.py # lines: 58-67 # def inflect_words(vocab, tokens, ids=None): # for token in tokens: # if not ids or token.id in ids: # yield inflect_word(vocab, token) # else: # yield token.text # def select_inflectable(tokens): # index = {} # for token in tokens: # index[token.id] = token # file path: Text-Processing/rows/rows/fields.py # coding: utf-8 # Copyright 2014-2019 Álvaro Justen <https://github.com/turicas/rows/> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six if six.PY2: from itertools import izip_longest as zip_longest else: from itertools import zip_longest # Order matters here __all__ = [ "BoolField", "IntegerField", "FloatField", "DatetimeField", "DateField", "DecimalField", "PercentField", "JSONField", "EmailField", "TextField", "BinaryField", "Field", ] NULL = ("-", "null", "none", "nil", "n/a", "na") NULL_BYTES = (b"-", b"null", b"none", b"nil", b"n/a", b"na") REGEXP_ONLY_NUMBERS = re.compile("[^0-9\-]") SHOULD_NOT_USE_LOCALE = True # This variable is changed by rows.locale_manager SLUG_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_" def value_error(value, cls): value = repr(value) if len(value) > 50: value = value[:50] + "..." raise ValueError("Value '{}' can't be {}".format(value, cls.__name__)) class Field(object): """Base Field class - all fields should inherit from this As the fallback for all other field types are the BinaryField, this Field actually implements what is expected in the BinaryField """ TYPE = (type(None),) @classmethod def serialize(cls, value, *args, **kwargs): """Serialize a value to be exported `cls.serialize` should always return an unicode value, except for BinaryField """ if value is None: value = "" return value @classmethod def deserialize(cls, value, *args, **kwargs): """Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`. """ if isinstance(value, cls.TYPE): return value elif is_null(value): return None else: return value class BinaryField(Field): """Field class to represent byte arrays Is not locale-aware (does not need to be) """ TYPE = (six.binary_type,) @classmethod def serialize(cls, value, *args, **kwargs): if value is not None: if not isinstance(value, six.binary_type): value_error(value, cls) else: try: return b64encode(value).decode("ascii") except (TypeError, binascii.Error): return value else: return "" @classmethod def deserialize(cls, value, *args, **kwargs): if value is not None: if isinstance(value, six.binary_type): return value elif isinstance(value, six.text_type): try: return b64decode(value) except (TypeError, ValueError, binascii.Error): raise ValueError("Can't decode base64") else: value_error(value, cls) else: return b"" class BoolField(Field): """Base class to representing boolean Is not locale-aware (if you need to, please customize by changing its attributes like `TRUE_VALUES` and `FALSE_VALUES`) """ TYPE = (bool,) SERIALIZED_VALUES = {True: "true", False: "false", None: ""} TRUE_VALUES = ("true", "yes") FALSE_VALUES = ("false", "no") @classmethod def serialize(cls, value, *args, **kwargs): # TODO: should we serialize `None` as well or give it to the plugin? return cls.SERIALIZED_VALUES[value] @classmethod def deserialize(cls, value, *args, **kwargs): value = super(BoolField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value).lower() if value in cls.TRUE_VALUES: return True elif value in cls.FALSE_VALUES: return False else: raise ValueError("Value is not boolean") class IntegerField(Field): """Field class to represent integer Is locale-aware """ TYPE = (int,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%d", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(IntegerField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif isinstance(value, float): new_value = int(value) if new_value != value: raise ValueError("It's float, not integer") else: value = new_value value = as_string(value) if value != "0" and value.startswith("0"): raise ValueError("It's string, not integer") return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value) class FloatField(Field): """Field class to represent float Is locale-aware """ TYPE = (float,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%f", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(FloatField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) if SHOULD_NOT_USE_LOCALE: return float(value) else: return locale.atof(value) class DecimalField(Field): """Field class to represent decimal data (as Python's decimal.Decimal) Is locale-aware """ TYPE = (Decimal,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" value_as_string = six.text_type(value) if SHOULD_NOT_USE_LOCALE: return value_as_string else: grouping = kwargs.get("grouping", None) has_decimal_places = value_as_string.find(".") != -1 if not has_decimal_places: string_format = "%d" else: decimal_places = len(value_as_string.split(".")[1]) string_format = "%.{}f".format(decimal_places) return locale.format(string_format, value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DecimalField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif type(value) in (int, float): return Decimal(six.text_type(value)) if SHOULD_NOT_USE_LOCALE: try: return Decimal(value) except InvalidOperation: value_error(value, cls) else: locale_vars = locale.localeconv() decimal_separator = locale_vars["decimal_point"] interesting_vars = ( "decimal_point", "mon_decimal_point", "mon_thousands_sep", "negative_sign", "positive_sign", "thousands_sep", ) chars = ( locale_vars[x].replace(".", r"\.").replace("-", r"\-") for x in interesting_vars ) interesting_chars = "".join(set(chars)) regexp = re.compile(r"[^0-9{} ]".format(interesting_chars)) value = as_string(value) if regexp.findall(value): value_error(value, cls) parts = [ REGEXP_ONLY_NUMBERS.subn("", number)[0] for number in value.split(decimal_separator) ] if len(parts) > 2: raise ValueError("Can't deserialize with this locale.") try: value = Decimal(parts[0]) if len(parts) == 2: decimal_places = len(parts[1]) value = value + (Decimal(parts[1]) / (10 ** decimal_places)) except InvalidOperation: value_error(value, cls) return value class PercentField(DecimalField): """Field class to represent percent values Is locale-aware (inherit this behaviour from `rows.DecimalField`) """ @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" elif value == Decimal("0"): return "0.00%" value = Decimal(six.text_type(value * 100)[:-2]) value = super(PercentField, cls).serialize(value, *args, **kwargs) return "{}%".format(value) @classmethod def deserialize(cls, value, *args, **kwargs): if isinstance(value, cls.TYPE): return value elif is_null(value): return None value = as_string(value) if "%" not in value: value_error(value, cls) value = value.replace("%", "") return super(PercentField, cls).deserialize(value) / 100 class DateField(Field): """Field class to represent date Is not locale-aware (does not need to be) """ TYPE = (datetime.date,) INPUT_FORMAT = "%Y-%m-%d" OUTPUT_FORMAT = "%Y-%m-%d" @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.strftime(cls.OUTPUT_FORMAT)) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DateField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT) return datetime.date(dt_object.year, dt_object.month, dt_object.day) Based on the information above, please complete the function in the current file Text-Processing/rows/rows/fields.py: class DatetimeField(Field): """Field class to represent date-time Is not locale-aware (does not need to be) """ TYPE = (datetime.datetime,) DATETIME_REGEXP = re.compile( "^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$" ) @classmethod def serialize(cls, value, *args, **kwargs):
class DatetimeField(Field): """Field class to represent date-time Is not locale-aware (does not need to be) """ TYPE = (datetime.datetime,) DATETIME_REGEXP = re.compile( "^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$" ) @classmethod def serialize(cls, value, *args, **kwargs):
rows.fields.Field.serialize
method
Text-Processing/rows
Text-Processing/rows/rows/fields.py
[ 77, 77 ]
[ 84, 86 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param cls: Class. The class instance.\n:param value: Any. The value to be serialized.\n:param *args: Tuple. Additional positional arguments.\n:param **kwargs: Dictionary. Additional keyword arguments.\n:return: Any. The serialized value.", "Functionality": "This function serializes a value to be exported. It should always return a unicode value, except for BinaryField." }
[ "tests/tests_fields.py::FieldsTestCase::test_Field", "tests/tests_fields.py::FieldsTestCase::test_TextField" ]
8
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/natasha/natasha/norm.py # lines: 29-43 # def feats_match(a, b): # return ( # a.get(GENDER) == b.get(GENDER) # and a.get(NUMBER) == b.get(NUMBER) # and a.get(CASE) == b.get(CASE) # ) # def form_match(form, pos, feats): # return pos_match(form.pos, pos) and feats_match(form.feats, feats) # def select_form(forms, pos, feats): # for form in forms: # if form_match(form, pos, feats): # return form # def normal_word(word): # word = word.lower() # return word.replace('ё', 'е') # file path: Text-Processing/natasha/natasha/norm.py # lines: 83-91 # def syntax_normalize(vocab, tokens): # ids = set(select_inflectable(tokens)) # words = inflect_words(vocab, tokens, ids) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # def normalize(vocab, tokens): # words = inflect_words(vocab, tokens) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # file path: Text-Processing/natasha/natasha/norm.py # lines: 44-57 # def inflect_word(vocab, token): # word, pos, feats = token.text, token.pos, token.feats # word = normal_word(word) # if pos not in (PROPN, NOUN, ADJ, VERB): # return word # if feats.get(CASE) == NOM: # return word # forms = vocab(word) # form = select_form(forms, pos, feats) # if form: # form = form.inflect({NOM}) # if form: # return normal_word(form.word) # return word # file path: Text-Processing/natasha/natasha/norm.py # lines: 1-13 # from collections import defaultdict, deque # from .shape import recover_shape # PROPN = 'PROPN' # NOUN = 'NOUN' # ADJ = 'ADJ' # VERB = 'VERB' # GENDER = 'Gender' # NUMBER = 'Number' # CASE = 'Case' # NOM = 'Nom' # def recover_shapes(words, tokens): # for word, token in zip(words, tokens): # yield recover_shape(word, token.text) # file path: Text-Processing/natasha/natasha/norm.py # lines: 14-28 # def recover_spaces(words, tokens): # offset = None # parts = [] # for index, (word, token) in enumerate(zip(words, tokens)): # if index > 0: # parts.append(' ' * (token.start - offset)) # parts.append(word) # offset = token.stop # return ''.join(parts) # def normal_pos(pos): # if pos == PROPN: # pos = NOUN # return pos # def pos_match(a, b): # return normal_pos(a) == normal_pos(b) # file path: Text-Processing/natasha/natasha/norm.py # lines: 58-67 # def inflect_words(vocab, tokens, ids=None): # for token in tokens: # if not ids or token.id in ids: # yield inflect_word(vocab, token) # else: # yield token.text # def select_inflectable(tokens): # index = {} # for token in tokens: # index[token.id] = token # file path: Text-Processing/rows/rows/fields.py # coding: utf-8 # Copyright 2014-2019 Álvaro Justen <https://github.com/turicas/rows/> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six if six.PY2: from itertools import izip_longest as zip_longest else: from itertools import zip_longest # Order matters here __all__ = [ "BoolField", "IntegerField", "FloatField", "DatetimeField", "DateField", "DecimalField", "PercentField", "JSONField", "EmailField", "TextField", "BinaryField", "Field", ] NULL = ("-", "null", "none", "nil", "n/a", "na") NULL_BYTES = (b"-", b"null", b"none", b"nil", b"n/a", b"na") REGEXP_ONLY_NUMBERS = re.compile("[^0-9\-]") SHOULD_NOT_USE_LOCALE = True # This variable is changed by rows.locale_manager SLUG_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_" def value_error(value, cls): value = repr(value) if len(value) > 50: value = value[:50] + "..." raise ValueError("Value '{}' can't be {}".format(value, cls.__name__)) Based on the information above, please complete the function in the current file Text-Processing/rows/rows/fields.py: class Field(object): """Base Field class - all fields should inherit from this As the fallback for all other field types are the BinaryField, this Field actually implements what is expected in the BinaryField """ TYPE = (type(None),) @classmethod def serialize(cls, value, *args, **kwargs): """Serialize a value to be exported `cls.serialize` should always return an unicode value, except for BinaryField """
class Field(object): """Base Field class - all fields should inherit from this As the fallback for all other field types are the BinaryField, this Field actually implements what is expected in the BinaryField """ TYPE = (type(None),) @classmethod def serialize(cls, value, *args, **kwargs): """Serialize a value to be exported `cls.serialize` should always return an unicode value, except for BinaryField """
rows.fields.EmailField.serialize
method
Text-Processing/rows
Text-Processing/rows/rows/fields.py
[ 438, 438 ]
[ 439, 442 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param cls: Class. The class itself.\n:param value: Any. The value to be serialized.\n:param *args: Tuple. Additional positional arguments.\n:param **kwargs: Dictionary. Additional keyword arguments.\n:return: String. The serialized value.", "Functionality": "Serialize the value of the email field. If the value is None, it returns an empty string. Otherwise, it returns the string representation of the value." }
[ "tests/tests_fields.py::FieldsTestCase::test_EmailField" ]
8
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/natasha/natasha/norm.py # lines: 83-91 # def syntax_normalize(vocab, tokens): # ids = set(select_inflectable(tokens)) # words = inflect_words(vocab, tokens, ids) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # def normalize(vocab, tokens): # words = inflect_words(vocab, tokens) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # file path: Text-Processing/natasha/natasha/norm.py # lines: 29-43 # def feats_match(a, b): # return ( # a.get(GENDER) == b.get(GENDER) # and a.get(NUMBER) == b.get(NUMBER) # and a.get(CASE) == b.get(CASE) # ) # def form_match(form, pos, feats): # return pos_match(form.pos, pos) and feats_match(form.feats, feats) # def select_form(forms, pos, feats): # for form in forms: # if form_match(form, pos, feats): # return form # def normal_word(word): # word = word.lower() # return word.replace('ё', 'е') # file path: Text-Processing/natasha/natasha/norm.py # lines: 1-13 # from collections import defaultdict, deque # from .shape import recover_shape # PROPN = 'PROPN' # NOUN = 'NOUN' # ADJ = 'ADJ' # VERB = 'VERB' # GENDER = 'Gender' # NUMBER = 'Number' # CASE = 'Case' # NOM = 'Nom' # def recover_shapes(words, tokens): # for word, token in zip(words, tokens): # yield recover_shape(word, token.text) # file path: Text-Processing/natasha/natasha/norm.py # lines: 44-57 # def inflect_word(vocab, token): # word, pos, feats = token.text, token.pos, token.feats # word = normal_word(word) # if pos not in (PROPN, NOUN, ADJ, VERB): # return word # if feats.get(CASE) == NOM: # return word # forms = vocab(word) # form = select_form(forms, pos, feats) # if form: # form = form.inflect({NOM}) # if form: # return normal_word(form.word) # return word # file path: Text-Processing/natasha/natasha/norm.py # lines: 14-28 # def recover_spaces(words, tokens): # offset = None # parts = [] # for index, (word, token) in enumerate(zip(words, tokens)): # if index > 0: # parts.append(' ' * (token.start - offset)) # parts.append(word) # offset = token.stop # return ''.join(parts) # def normal_pos(pos): # if pos == PROPN: # pos = NOUN # return pos # def pos_match(a, b): # return normal_pos(a) == normal_pos(b) # file path: Text-Processing/natasha/natasha/norm.py # lines: 58-67 # def inflect_words(vocab, tokens, ids=None): # for token in tokens: # if not ids or token.id in ids: # yield inflect_word(vocab, token) # else: # yield token.text # def select_inflectable(tokens): # index = {} # for token in tokens: # index[token.id] = token # file path: Text-Processing/rows/rows/fields.py # coding: utf-8 # Copyright 2014-2019 Álvaro Justen <https://github.com/turicas/rows/> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six if six.PY2: from itertools import izip_longest as zip_longest else: from itertools import zip_longest # Order matters here __all__ = [ "BoolField", "IntegerField", "FloatField", "DatetimeField", "DateField", "DecimalField", "PercentField", "JSONField", "EmailField", "TextField", "BinaryField", "Field", ] NULL = ("-", "null", "none", "nil", "n/a", "na") NULL_BYTES = (b"-", b"null", b"none", b"nil", b"n/a", b"na") REGEXP_ONLY_NUMBERS = re.compile("[^0-9\-]") SHOULD_NOT_USE_LOCALE = True # This variable is changed by rows.locale_manager SLUG_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_" def value_error(value, cls): value = repr(value) if len(value) > 50: value = value[:50] + "..." raise ValueError("Value '{}' can't be {}".format(value, cls.__name__)) class Field(object): """Base Field class - all fields should inherit from this As the fallback for all other field types are the BinaryField, this Field actually implements what is expected in the BinaryField """ TYPE = (type(None),) @classmethod def serialize(cls, value, *args, **kwargs): """Serialize a value to be exported `cls.serialize` should always return an unicode value, except for BinaryField """ if value is None: value = "" return value @classmethod def deserialize(cls, value, *args, **kwargs): """Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`. """ if isinstance(value, cls.TYPE): return value elif is_null(value): return None else: return value class BinaryField(Field): """Field class to represent byte arrays Is not locale-aware (does not need to be) """ TYPE = (six.binary_type,) @classmethod def serialize(cls, value, *args, **kwargs): if value is not None: if not isinstance(value, six.binary_type): value_error(value, cls) else: try: return b64encode(value).decode("ascii") except (TypeError, binascii.Error): return value else: return "" @classmethod def deserialize(cls, value, *args, **kwargs): if value is not None: if isinstance(value, six.binary_type): return value elif isinstance(value, six.text_type): try: return b64decode(value) except (TypeError, ValueError, binascii.Error): raise ValueError("Can't decode base64") else: value_error(value, cls) else: return b"" class BoolField(Field): """Base class to representing boolean Is not locale-aware (if you need to, please customize by changing its attributes like `TRUE_VALUES` and `FALSE_VALUES`) """ TYPE = (bool,) SERIALIZED_VALUES = {True: "true", False: "false", None: ""} TRUE_VALUES = ("true", "yes") FALSE_VALUES = ("false", "no") @classmethod def serialize(cls, value, *args, **kwargs): # TODO: should we serialize `None` as well or give it to the plugin? return cls.SERIALIZED_VALUES[value] @classmethod def deserialize(cls, value, *args, **kwargs): value = super(BoolField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value).lower() if value in cls.TRUE_VALUES: return True elif value in cls.FALSE_VALUES: return False else: raise ValueError("Value is not boolean") class IntegerField(Field): """Field class to represent integer Is locale-aware """ TYPE = (int,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%d", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(IntegerField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif isinstance(value, float): new_value = int(value) if new_value != value: raise ValueError("It's float, not integer") else: value = new_value value = as_string(value) if value != "0" and value.startswith("0"): raise ValueError("It's string, not integer") return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value) class FloatField(Field): """Field class to represent float Is locale-aware """ TYPE = (float,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%f", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(FloatField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) if SHOULD_NOT_USE_LOCALE: return float(value) else: return locale.atof(value) class DecimalField(Field): """Field class to represent decimal data (as Python's decimal.Decimal) Is locale-aware """ TYPE = (Decimal,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" value_as_string = six.text_type(value) if SHOULD_NOT_USE_LOCALE: return value_as_string else: grouping = kwargs.get("grouping", None) has_decimal_places = value_as_string.find(".") != -1 if not has_decimal_places: string_format = "%d" else: decimal_places = len(value_as_string.split(".")[1]) string_format = "%.{}f".format(decimal_places) return locale.format(string_format, value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DecimalField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif type(value) in (int, float): return Decimal(six.text_type(value)) if SHOULD_NOT_USE_LOCALE: try: return Decimal(value) except InvalidOperation: value_error(value, cls) else: locale_vars = locale.localeconv() decimal_separator = locale_vars["decimal_point"] interesting_vars = ( "decimal_point", "mon_decimal_point", "mon_thousands_sep", "negative_sign", "positive_sign", "thousands_sep", ) chars = ( locale_vars[x].replace(".", r"\.").replace("-", r"\-") for x in interesting_vars ) interesting_chars = "".join(set(chars)) regexp = re.compile(r"[^0-9{} ]".format(interesting_chars)) value = as_string(value) if regexp.findall(value): value_error(value, cls) parts = [ REGEXP_ONLY_NUMBERS.subn("", number)[0] for number in value.split(decimal_separator) ] if len(parts) > 2: raise ValueError("Can't deserialize with this locale.") try: value = Decimal(parts[0]) if len(parts) == 2: decimal_places = len(parts[1]) value = value + (Decimal(parts[1]) / (10 ** decimal_places)) except InvalidOperation: value_error(value, cls) return value class PercentField(DecimalField): """Field class to represent percent values Is locale-aware (inherit this behaviour from `rows.DecimalField`) """ @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" elif value == Decimal("0"): return "0.00%" value = Decimal(six.text_type(value * 100)[:-2]) value = super(PercentField, cls).serialize(value, *args, **kwargs) return "{}%".format(value) @classmethod def deserialize(cls, value, *args, **kwargs): if isinstance(value, cls.TYPE): return value elif is_null(value): return None value = as_string(value) if "%" not in value: value_error(value, cls) value = value.replace("%", "") return super(PercentField, cls).deserialize(value) / 100 class DateField(Field): """Field class to represent date Is not locale-aware (does not need to be) """ TYPE = (datetime.date,) INPUT_FORMAT = "%Y-%m-%d" OUTPUT_FORMAT = "%Y-%m-%d" @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.strftime(cls.OUTPUT_FORMAT)) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DateField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT) return datetime.date(dt_object.year, dt_object.month, dt_object.day) class DatetimeField(Field): """Field class to represent date-time Is not locale-aware (does not need to be) """ TYPE = (datetime.datetime,) DATETIME_REGEXP = re.compile( "^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$" ) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.isoformat()) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DatetimeField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) # TODO: may use iso8601 groups = cls.DATETIME_REGEXP.findall(value) if not groups: value_error(value, cls) else: return datetime.datetime(*[int(x) for x in groups[0]]) class TextField(Field): """Field class to represent unicode strings Is not locale-aware (does not need to be) """ TYPE = (six.text_type,) @classmethod def deserialize(cls, value, *args, **kwargs): if value is None or isinstance(value, cls.TYPE): return value else: return as_string(value) Based on the information above, please complete the function in the current file Text-Processing/rows/rows/fields.py: class EmailField(TextField): """Field class to represent e-mail addresses Is not locale-aware (does not need to be) """ EMAIL_REGEXP = re.compile( r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE ) @classmethod def serialize(cls, value, *args, **kwargs):
class EmailField(TextField): """Field class to represent e-mail addresses Is not locale-aware (does not need to be) """ EMAIL_REGEXP = re.compile( r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE ) @classmethod def serialize(cls, value, *args, **kwargs):
rows.fields.as_string
function
Text-Processing/rows
Text-Processing/rows/rows/fields.py
[ 478, 478 ]
[ 479, 484 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param value: Any. The input value to be converted to a string.\n:return: String. The input value converted to a string.", "Functionality": "Convert the input value to a string. If the input value is already a string, it returns the input value. If the input value is a binary type, it raises a ValueError." }
[ "tests/tests_fields.py::FieldsFunctionsTestCase::test_as_string" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/natasha/natasha/norm.py # lines: 29-43 # def feats_match(a, b): # return ( # a.get(GENDER) == b.get(GENDER) # and a.get(NUMBER) == b.get(NUMBER) # and a.get(CASE) == b.get(CASE) # ) # def form_match(form, pos, feats): # return pos_match(form.pos, pos) and feats_match(form.feats, feats) # def select_form(forms, pos, feats): # for form in forms: # if form_match(form, pos, feats): # return form # def normal_word(word): # word = word.lower() # return word.replace('ё', 'е') # file path: Text-Processing/natasha/natasha/norm.py # lines: 83-91 # def syntax_normalize(vocab, tokens): # ids = set(select_inflectable(tokens)) # words = inflect_words(vocab, tokens, ids) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # def normalize(vocab, tokens): # words = inflect_words(vocab, tokens) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # file path: Text-Processing/natasha/natasha/norm.py # lines: 44-57 # def inflect_word(vocab, token): # word, pos, feats = token.text, token.pos, token.feats # word = normal_word(word) # if pos not in (PROPN, NOUN, ADJ, VERB): # return word # if feats.get(CASE) == NOM: # return word # forms = vocab(word) # form = select_form(forms, pos, feats) # if form: # form = form.inflect({NOM}) # if form: # return normal_word(form.word) # return word # file path: Text-Processing/natasha/natasha/norm.py # lines: 14-28 # def recover_spaces(words, tokens): # offset = None # parts = [] # for index, (word, token) in enumerate(zip(words, tokens)): # if index > 0: # parts.append(' ' * (token.start - offset)) # parts.append(word) # offset = token.stop # return ''.join(parts) # def normal_pos(pos): # if pos == PROPN: # pos = NOUN # return pos # def pos_match(a, b): # return normal_pos(a) == normal_pos(b) # file path: Text-Processing/natasha/natasha/norm.py # lines: 58-67 # def inflect_words(vocab, tokens, ids=None): # for token in tokens: # if not ids or token.id in ids: # yield inflect_word(vocab, token) # else: # yield token.text # def select_inflectable(tokens): # index = {} # for token in tokens: # index[token.id] = token # file path: Text-Processing/natasha/natasha/norm.py # lines: 1-13 # from collections import defaultdict, deque # from .shape import recover_shape # PROPN = 'PROPN' # NOUN = 'NOUN' # ADJ = 'ADJ' # VERB = 'VERB' # GENDER = 'Gender' # NUMBER = 'Number' # CASE = 'Case' # NOM = 'Nom' # def recover_shapes(words, tokens): # for word, token in zip(words, tokens): # yield recover_shape(word, token.text) # file path: Text-Processing/rows/rows/fields.py # coding: utf-8 # Copyright 2014-2019 Álvaro Justen <https://github.com/turicas/rows/> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six if six.PY2: from itertools import izip_longest as zip_longest else: from itertools import zip_longest # Order matters here __all__ = [ "BoolField", "IntegerField", "FloatField", "DatetimeField", "DateField", "DecimalField", "PercentField", "JSONField", "EmailField", "TextField", "BinaryField", "Field", ] NULL = ("-", "null", "none", "nil", "n/a", "na") NULL_BYTES = (b"-", b"null", b"none", b"nil", b"n/a", b"na") REGEXP_ONLY_NUMBERS = re.compile("[^0-9\-]") SHOULD_NOT_USE_LOCALE = True # This variable is changed by rows.locale_manager SLUG_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_" def value_error(value, cls): value = repr(value) if len(value) > 50: value = value[:50] + "..." raise ValueError("Value '{}' can't be {}".format(value, cls.__name__)) class Field(object): """Base Field class - all fields should inherit from this As the fallback for all other field types are the BinaryField, this Field actually implements what is expected in the BinaryField """ TYPE = (type(None),) @classmethod def serialize(cls, value, *args, **kwargs): """Serialize a value to be exported `cls.serialize` should always return an unicode value, except for BinaryField """ if value is None: value = "" return value @classmethod def deserialize(cls, value, *args, **kwargs): """Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`. """ if isinstance(value, cls.TYPE): return value elif is_null(value): return None else: return value class BinaryField(Field): """Field class to represent byte arrays Is not locale-aware (does not need to be) """ TYPE = (six.binary_type,) @classmethod def serialize(cls, value, *args, **kwargs): if value is not None: if not isinstance(value, six.binary_type): value_error(value, cls) else: try: return b64encode(value).decode("ascii") except (TypeError, binascii.Error): return value else: return "" @classmethod def deserialize(cls, value, *args, **kwargs): if value is not None: if isinstance(value, six.binary_type): return value elif isinstance(value, six.text_type): try: return b64decode(value) except (TypeError, ValueError, binascii.Error): raise ValueError("Can't decode base64") else: value_error(value, cls) else: return b"" class BoolField(Field): """Base class to representing boolean Is not locale-aware (if you need to, please customize by changing its attributes like `TRUE_VALUES` and `FALSE_VALUES`) """ TYPE = (bool,) SERIALIZED_VALUES = {True: "true", False: "false", None: ""} TRUE_VALUES = ("true", "yes") FALSE_VALUES = ("false", "no") @classmethod def serialize(cls, value, *args, **kwargs): # TODO: should we serialize `None` as well or give it to the plugin? return cls.SERIALIZED_VALUES[value] @classmethod def deserialize(cls, value, *args, **kwargs): value = super(BoolField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value).lower() if value in cls.TRUE_VALUES: return True elif value in cls.FALSE_VALUES: return False else: raise ValueError("Value is not boolean") class IntegerField(Field): """Field class to represent integer Is locale-aware """ TYPE = (int,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%d", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(IntegerField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif isinstance(value, float): new_value = int(value) if new_value != value: raise ValueError("It's float, not integer") else: value = new_value value = as_string(value) if value != "0" and value.startswith("0"): raise ValueError("It's string, not integer") return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value) class FloatField(Field): """Field class to represent float Is locale-aware """ TYPE = (float,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%f", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(FloatField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) if SHOULD_NOT_USE_LOCALE: return float(value) else: return locale.atof(value) class DecimalField(Field): """Field class to represent decimal data (as Python's decimal.Decimal) Is locale-aware """ TYPE = (Decimal,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" value_as_string = six.text_type(value) if SHOULD_NOT_USE_LOCALE: return value_as_string else: grouping = kwargs.get("grouping", None) has_decimal_places = value_as_string.find(".") != -1 if not has_decimal_places: string_format = "%d" else: decimal_places = len(value_as_string.split(".")[1]) string_format = "%.{}f".format(decimal_places) return locale.format(string_format, value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DecimalField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif type(value) in (int, float): return Decimal(six.text_type(value)) if SHOULD_NOT_USE_LOCALE: try: return Decimal(value) except InvalidOperation: value_error(value, cls) else: locale_vars = locale.localeconv() decimal_separator = locale_vars["decimal_point"] interesting_vars = ( "decimal_point", "mon_decimal_point", "mon_thousands_sep", "negative_sign", "positive_sign", "thousands_sep", ) chars = ( locale_vars[x].replace(".", r"\.").replace("-", r"\-") for x in interesting_vars ) interesting_chars = "".join(set(chars)) regexp = re.compile(r"[^0-9{} ]".format(interesting_chars)) value = as_string(value) if regexp.findall(value): value_error(value, cls) parts = [ REGEXP_ONLY_NUMBERS.subn("", number)[0] for number in value.split(decimal_separator) ] if len(parts) > 2: raise ValueError("Can't deserialize with this locale.") try: value = Decimal(parts[0]) if len(parts) == 2: decimal_places = len(parts[1]) value = value + (Decimal(parts[1]) / (10 ** decimal_places)) except InvalidOperation: value_error(value, cls) return value class PercentField(DecimalField): """Field class to represent percent values Is locale-aware (inherit this behaviour from `rows.DecimalField`) """ @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" elif value == Decimal("0"): return "0.00%" value = Decimal(six.text_type(value * 100)[:-2]) value = super(PercentField, cls).serialize(value, *args, **kwargs) return "{}%".format(value) @classmethod def deserialize(cls, value, *args, **kwargs): if isinstance(value, cls.TYPE): return value elif is_null(value): return None value = as_string(value) if "%" not in value: value_error(value, cls) value = value.replace("%", "") return super(PercentField, cls).deserialize(value) / 100 class DateField(Field): """Field class to represent date Is not locale-aware (does not need to be) """ TYPE = (datetime.date,) INPUT_FORMAT = "%Y-%m-%d" OUTPUT_FORMAT = "%Y-%m-%d" @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.strftime(cls.OUTPUT_FORMAT)) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DateField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT) return datetime.date(dt_object.year, dt_object.month, dt_object.day) class DatetimeField(Field): """Field class to represent date-time Is not locale-aware (does not need to be) """ TYPE = (datetime.datetime,) DATETIME_REGEXP = re.compile( "^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$" ) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.isoformat()) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DatetimeField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) # TODO: may use iso8601 groups = cls.DATETIME_REGEXP.findall(value) if not groups: value_error(value, cls) else: return datetime.datetime(*[int(x) for x in groups[0]]) class TextField(Field): """Field class to represent unicode strings Is not locale-aware (does not need to be) """ TYPE = (six.text_type,) @classmethod def deserialize(cls, value, *args, **kwargs): if value is None or isinstance(value, cls.TYPE): return value else: return as_string(value) class EmailField(TextField): """Field class to represent e-mail addresses Is not locale-aware (does not need to be) """ EMAIL_REGEXP = re.compile( r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE ) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(EmailField, cls).deserialize(value) if value is None or not value.strip(): return None result = cls.EMAIL_REGEXP.findall(value) if not result: value_error(value, cls) else: return result[0] class JSONField(Field): """Field class to represent JSON-encoded strings Is not locale-aware (does not need to be) """ TYPE = (list, dict) @classmethod def serialize(cls, value, *args, **kwargs): return json.dumps(value) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(JSONField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value else: return json.loads(value) Based on the information above, please complete the function in the current file Text-Processing/rows/rows/fields.py: def as_string(value):
def as_string(value):
rows.fields.get_items
function
Text-Processing/rows
Text-Processing/rows/rows/fields.py
[ 506, 506 ]
[ 513, 515 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param indexes: Tuple. The indexes of the object to be fetched.\n:return: Lambda function. A callable that fetches the given indexes of an object.", "Functionality": "This function returns a callable that fetches the given indexes of an object. It always returns a tuple even when len(indexes) == 1. It is similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError)." }
[ "tests/tests_fields.py::FieldsFunctionsTestCase::test_get_items" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/natasha/natasha/norm.py # lines: 14-28 # def recover_spaces(words, tokens): # offset = None # parts = [] # for index, (word, token) in enumerate(zip(words, tokens)): # if index > 0: # parts.append(' ' * (token.start - offset)) # parts.append(word) # offset = token.stop # return ''.join(parts) # def normal_pos(pos): # if pos == PROPN: # pos = NOUN # return pos # def pos_match(a, b): # return normal_pos(a) == normal_pos(b) # file path: Text-Processing/natasha/natasha/norm.py # lines: 58-67 # def inflect_words(vocab, tokens, ids=None): # for token in tokens: # if not ids or token.id in ids: # yield inflect_word(vocab, token) # else: # yield token.text # def select_inflectable(tokens): # index = {} # for token in tokens: # index[token.id] = token # file path: Text-Processing/natasha/natasha/norm.py # lines: 29-43 # def feats_match(a, b): # return ( # a.get(GENDER) == b.get(GENDER) # and a.get(NUMBER) == b.get(NUMBER) # and a.get(CASE) == b.get(CASE) # ) # def form_match(form, pos, feats): # return pos_match(form.pos, pos) and feats_match(form.feats, feats) # def select_form(forms, pos, feats): # for form in forms: # if form_match(form, pos, feats): # return form # def normal_word(word): # word = word.lower() # return word.replace('ё', 'е') # file path: Text-Processing/natasha/natasha/norm.py # lines: 83-91 # def syntax_normalize(vocab, tokens): # ids = set(select_inflectable(tokens)) # words = inflect_words(vocab, tokens, ids) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # def normalize(vocab, tokens): # words = inflect_words(vocab, tokens) # words = recover_shapes(words, tokens) # return recover_spaces(words, tokens) # file path: Text-Processing/natasha/natasha/norm.py # lines: 68-82 # roots = set() # children = defaultdict(list) # for token in tokens: # if token.head_id not in index: # roots.add(token.id) # else: # children[token.head_id].append(token.id) # stack = deque(roots) # while stack: # id = stack.popleft() # yield id # for child in children[id]: # token = index[child] # if token.pos in (ADJ, VERB): # stack.append(child) # file path: Text-Processing/rows/rows/fields.py # coding: utf-8 # Copyright 2014-2019 Álvaro Justen <https://github.com/turicas/rows/> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import binascii import datetime import json import locale import re from base64 import b64decode, b64encode from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from unicodedata import normalize import six if six.PY2: from itertools import izip_longest as zip_longest else: from itertools import zip_longest # Order matters here __all__ = [ "BoolField", "IntegerField", "FloatField", "DatetimeField", "DateField", "DecimalField", "PercentField", "JSONField", "EmailField", "TextField", "BinaryField", "Field", ] NULL = ("-", "null", "none", "nil", "n/a", "na") NULL_BYTES = (b"-", b"null", b"none", b"nil", b"n/a", b"na") REGEXP_ONLY_NUMBERS = re.compile("[^0-9\-]") SHOULD_NOT_USE_LOCALE = True # This variable is changed by rows.locale_manager SLUG_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_" def value_error(value, cls): value = repr(value) if len(value) > 50: value = value[:50] + "..." raise ValueError("Value '{}' can't be {}".format(value, cls.__name__)) class Field(object): """Base Field class - all fields should inherit from this As the fallback for all other field types are the BinaryField, this Field actually implements what is expected in the BinaryField """ TYPE = (type(None),) @classmethod def serialize(cls, value, *args, **kwargs): """Serialize a value to be exported `cls.serialize` should always return an unicode value, except for BinaryField """ if value is None: value = "" return value @classmethod def deserialize(cls, value, *args, **kwargs): """Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`. """ if isinstance(value, cls.TYPE): return value elif is_null(value): return None else: return value class BinaryField(Field): """Field class to represent byte arrays Is not locale-aware (does not need to be) """ TYPE = (six.binary_type,) @classmethod def serialize(cls, value, *args, **kwargs): if value is not None: if not isinstance(value, six.binary_type): value_error(value, cls) else: try: return b64encode(value).decode("ascii") except (TypeError, binascii.Error): return value else: return "" @classmethod def deserialize(cls, value, *args, **kwargs): if value is not None: if isinstance(value, six.binary_type): return value elif isinstance(value, six.text_type): try: return b64decode(value) except (TypeError, ValueError, binascii.Error): raise ValueError("Can't decode base64") else: value_error(value, cls) else: return b"" class BoolField(Field): """Base class to representing boolean Is not locale-aware (if you need to, please customize by changing its attributes like `TRUE_VALUES` and `FALSE_VALUES`) """ TYPE = (bool,) SERIALIZED_VALUES = {True: "true", False: "false", None: ""} TRUE_VALUES = ("true", "yes") FALSE_VALUES = ("false", "no") @classmethod def serialize(cls, value, *args, **kwargs): # TODO: should we serialize `None` as well or give it to the plugin? return cls.SERIALIZED_VALUES[value] @classmethod def deserialize(cls, value, *args, **kwargs): value = super(BoolField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value).lower() if value in cls.TRUE_VALUES: return True elif value in cls.FALSE_VALUES: return False else: raise ValueError("Value is not boolean") class IntegerField(Field): """Field class to represent integer Is locale-aware """ TYPE = (int,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%d", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(IntegerField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif isinstance(value, float): new_value = int(value) if new_value != value: raise ValueError("It's float, not integer") else: value = new_value value = as_string(value) if value != "0" and value.startswith("0"): raise ValueError("It's string, not integer") return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value) class FloatField(Field): """Field class to represent float Is locale-aware """ TYPE = (float,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" if SHOULD_NOT_USE_LOCALE: return six.text_type(value) else: grouping = kwargs.get("grouping", None) return locale.format("%f", value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(FloatField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) if SHOULD_NOT_USE_LOCALE: return float(value) else: return locale.atof(value) class DecimalField(Field): """Field class to represent decimal data (as Python's decimal.Decimal) Is locale-aware """ TYPE = (Decimal,) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" value_as_string = six.text_type(value) if SHOULD_NOT_USE_LOCALE: return value_as_string else: grouping = kwargs.get("grouping", None) has_decimal_places = value_as_string.find(".") != -1 if not has_decimal_places: string_format = "%d" else: decimal_places = len(value_as_string.split(".")[1]) string_format = "%.{}f".format(decimal_places) return locale.format(string_format, value, grouping=grouping) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DecimalField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value elif type(value) in (int, float): return Decimal(six.text_type(value)) if SHOULD_NOT_USE_LOCALE: try: return Decimal(value) except InvalidOperation: value_error(value, cls) else: locale_vars = locale.localeconv() decimal_separator = locale_vars["decimal_point"] interesting_vars = ( "decimal_point", "mon_decimal_point", "mon_thousands_sep", "negative_sign", "positive_sign", "thousands_sep", ) chars = ( locale_vars[x].replace(".", r"\.").replace("-", r"\-") for x in interesting_vars ) interesting_chars = "".join(set(chars)) regexp = re.compile(r"[^0-9{} ]".format(interesting_chars)) value = as_string(value) if regexp.findall(value): value_error(value, cls) parts = [ REGEXP_ONLY_NUMBERS.subn("", number)[0] for number in value.split(decimal_separator) ] if len(parts) > 2: raise ValueError("Can't deserialize with this locale.") try: value = Decimal(parts[0]) if len(parts) == 2: decimal_places = len(parts[1]) value = value + (Decimal(parts[1]) / (10 ** decimal_places)) except InvalidOperation: value_error(value, cls) return value class PercentField(DecimalField): """Field class to represent percent values Is locale-aware (inherit this behaviour from `rows.DecimalField`) """ @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" elif value == Decimal("0"): return "0.00%" value = Decimal(six.text_type(value * 100)[:-2]) value = super(PercentField, cls).serialize(value, *args, **kwargs) return "{}%".format(value) @classmethod def deserialize(cls, value, *args, **kwargs): if isinstance(value, cls.TYPE): return value elif is_null(value): return None value = as_string(value) if "%" not in value: value_error(value, cls) value = value.replace("%", "") return super(PercentField, cls).deserialize(value) / 100 class DateField(Field): """Field class to represent date Is not locale-aware (does not need to be) """ TYPE = (datetime.date,) INPUT_FORMAT = "%Y-%m-%d" OUTPUT_FORMAT = "%Y-%m-%d" @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.strftime(cls.OUTPUT_FORMAT)) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DateField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT) return datetime.date(dt_object.year, dt_object.month, dt_object.day) class DatetimeField(Field): """Field class to represent date-time Is not locale-aware (does not need to be) """ TYPE = (datetime.datetime,) DATETIME_REGEXP = re.compile( "^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$" ) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value.isoformat()) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(DatetimeField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value value = as_string(value) # TODO: may use iso8601 groups = cls.DATETIME_REGEXP.findall(value) if not groups: value_error(value, cls) else: return datetime.datetime(*[int(x) for x in groups[0]]) class TextField(Field): """Field class to represent unicode strings Is not locale-aware (does not need to be) """ TYPE = (six.text_type,) @classmethod def deserialize(cls, value, *args, **kwargs): if value is None or isinstance(value, cls.TYPE): return value else: return as_string(value) class EmailField(TextField): """Field class to represent e-mail addresses Is not locale-aware (does not need to be) """ EMAIL_REGEXP = re.compile( r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE ) @classmethod def serialize(cls, value, *args, **kwargs): if value is None: return "" return six.text_type(value) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(EmailField, cls).deserialize(value) if value is None or not value.strip(): return None result = cls.EMAIL_REGEXP.findall(value) if not result: value_error(value, cls) else: return result[0] class JSONField(Field): """Field class to represent JSON-encoded strings Is not locale-aware (does not need to be) """ TYPE = (list, dict) @classmethod def serialize(cls, value, *args, **kwargs): return json.dumps(value) @classmethod def deserialize(cls, value, *args, **kwargs): value = super(JSONField, cls).deserialize(value) if value is None or isinstance(value, cls.TYPE): return value else: return json.loads(value) def as_string(value): if isinstance(value, six.binary_type): raise ValueError("Binary is not supported") elif isinstance(value, six.text_type): return value else: return six.text_type(value) def is_null(value): if value is None: return True elif type(value) is six.binary_type: value = value.strip().lower() return not value or value in NULL_BYTES else: value_str = as_string(value).strip().lower() return not value_str or value_str in NULL def unique_values(values): result = [] for value in values: if not is_null(value) and value not in result: result.append(value) return result Based on the information above, please complete the function in the current file Text-Processing/rows/rows/fields.py: def get_items(*indexes): """Return a callable that fetches the given indexes of an object Always return a tuple even when len(indexes) == 1. Similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError). """
def get_items(*indexes): """Return a callable that fetches the given indexes of an object Always return a tuple even when len(indexes) == 1. Similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError). """
pycorrector.proper_corrector.load_dict_file
function
Text-Processing/pycorrector
Text-Processing/pycorrector/pycorrector/proper_corrector.py
[ 31, 31 ]
[ 37, 52 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param path: String. The file path from which the dictionary is to be loaded.\n:return: Dictionary. The loaded dictionary from the file. If the file is not found, an empty dictionary is returned.", "Functionality": "Load a dictionary from the given file path. It reads the file line by line and creates a dictionary with the key-value pairs from the file. If the file is not found, an empty dictionary is returned." }
[ "tests/ner_error_test.py::test_common_error" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/pycorrector/pycorrector/utils/tokenizer.py # lines: 109-119 # class Tokenizer(object): # def __init__(self, dict_path='', custom_word_freq_dict=None, custom_confusion_dict=None): # self.model = jieba # jieba.setLogLevel("ERROR") # # 初始化大词典 # if os.path.exists(dict_path): # self.model.set_dictionary(dict_path) # # 加载用户自定义词典 # if custom_word_freq_dict: # for w, f in custom_word_freq_dict.items(): # self.model.add_word(w, freq=f) # file path: Text-Processing/pycorrector/pycorrector/config.py # lines: 27-34 # # 专名词典,包括成语、俗语、专业领域词等 format: 词语 # proper_name_path = os.path.join(pwd_path, 'data/proper_name.txt') # # 停用词 # stopwords_path = os.path.join(pwd_path, 'data/stopwords.txt') # # 搭配词 # ngram_words_path = os.path.join(pwd_path, 'data/ngram_words.txt') # # 英文拼写词频文件 # en_dict_path = os.path.join(pwd_path, 'data/en/en.json.gz') # file path: Text-Processing/pycorrector/pycorrector/utils/tokenizer.py # lines: 120-125 # # 加载混淆集词典 # if custom_confusion_dict: # for k, word in custom_confusion_dict.items(): # # 添加到分词器的自定义词典中 # self.model.add_word(k) # self.model.add_word(word) # file path: Text-Processing/pycorrector/pycorrector/config.py # lines: 12-26 # # -----词典文件路径----- # # 通用分词词典文件 format: 词语 词频 # word_freq_path = os.path.join(pwd_path, 'data/word_freq.txt') # # 中文常用字符集 # common_char_path = os.path.join(pwd_path, 'data/common_char_set.txt') # # 同音字 # same_pinyin_path = os.path.join(pwd_path, 'data/same_pinyin.txt') # # 形似字 # same_stroke_path = os.path.join(pwd_path, 'data/same_stroke.txt') # # 五笔笔画字典 # stroke_path = os.path.join(pwd_path, 'data/stroke.txt') # # 知名人名词典 format: 词语 词频 # person_name_path = os.path.join(pwd_path, 'data/person_name.txt') # # 地名词典 format: 词语 词频 # place_name_path = os.path.join(pwd_path, 'data/place_name.txt') # file path: Text-Processing/pycorrector/pycorrector/ernie/tokenizing_ernie.py # lines: 183-197 # if not vocab_path.exists(): # raise ValueError('no vocab file in pretrain dir: %s' % pretrain_dir) # vocab_dict = {j.strip().split('\t')[0]: i for i, j in enumerate(vocab_path.open(encoding='utf8').readlines())} # t = cls(vocab_dict, sp_model_path, **kwargs) # return t # def __init__(self, vocab, sp_model_path, **kwargs): # super(ErnieTinyTokenizer, self).__init__(vocab, **kwargs) # import sentencepiece as spm # import jieba as jb # self.sp_model = spm.SentencePieceProcessor() # self.window_size = 5 # self.sp_model.Load(sp_model_path) # self.jb = jb # def cut(self, sentence): # return self.jb.cut(sentence) # file path: Text-Processing/pycorrector/pycorrector/ernie/tokenizing_ernie.py # lines: 66-80 # @classmethod # def from_pretrained(cls, pretrain_dir_or_url, force_download=False, **kwargs): # if pretrain_dir_or_url in cls.resource_map: # url = cls.resource_map[pretrain_dir_or_url] # logger.info('get pretrain dir from %s' % url) # pretrain_dir = Path(_fetch_from_remote(url, force_download=force_download)) # else: # logger.info('pretrain dir %s not in %s, read from local' % (pretrain_dir_or_url, repr(cls.resource_map))) # pretrain_dir = Path(pretrain_dir_or_url) # if not pretrain_dir.exists(): # raise ValueError('pretrain dir not found: %s' % pretrain_dir) # vocab_path = pretrain_dir / 'vocab.txt' # if not vocab_path.exists(): # raise ValueError('no vocab file in pretrain dir: %s' % pretrain_dir) # vocab_dict = {j.strip().split('\t')[0]: i for i, j in enumerate(vocab_path.open(encoding='utf8').readlines())} # file path: Text-Processing/pycorrector/pycorrector/utils/tokenizer.py # lines: 156-166 # if __name__ == '__main__': # text = "这个消息在北京城里不胫儿走,你好,我才来到这里。你呢?" # print(text) # t = Tokenizer() # print('deault', t.tokenize(text, 'default')) # print('search', t.tokenize(text, 'search')) # print('ngram', t.tokenize(text, 'ngram')) # paragraph = "The first time I heard that song was in Hawaii on radio. " \ # "I was just a kid, and loved it very much! What a fantastic song!" # cutwords1 = whitespace_tokenize(paragraph) # 分词 # print('【my分词结果:】', cutwords1) # file path: Text-Processing/pycorrector/pycorrector/config.py # lines: 35-49 # # -----深度模型文件路径 ----- # # bert模型文件夹路径 # bert_model_dir = os.path.join(USER_DATA_DIR, 'bert_models/chinese_finetuned_lm/') # os.makedirs(bert_model_dir, exist_ok=True) # # ernie模型文件夹路径: /Users/name/.paddle-ernie-cache/ # # electra模型文件夹路径 # electra_D_model_dir = os.path.join(USER_DATA_DIR, "electra_models/chinese_electra_base_discriminator_pytorch/") # electra_G_model_dir = os.path.join(USER_DATA_DIR, "electra_models/chinese_electra_base_generator_pytorch/") # # macbert模型文件路径 # macbert_model_dir = os.path.join(USER_DATA_DIR, 'macbert_models/chinese_finetuned_correction/') # os.makedirs(macbert_model_dir, exist_ok=True) # # t5模型文件路径 # t5_model_dir = os.path.join(USER_DATA_DIR, 't5_models/mengzi-t5-base-chinese-correction/') # os.makedirs(t5_model_dir, exist_ok=True) # copyt5_model_dir = os.path.join(USER_DATA_DIR, 't5_models/copyt5-base-chinese-correction/') # file path: Text-Processing/pycorrector/pycorrector/ernie/tokenizing_ernie.py # lines: 14-28 # import io # import re # from functools import partial # from pathlib import Path # import numpy as np # import six # from loguru import logger # from .file_utils import _fetch_from_remote # open = partial(io.open, encoding='utf8') # _max_input_chars_per_word = 100 # def _wordpiece(token, vocab, unk_token, prefix='##', sentencepiece_prefix=''): # """ wordpiece: helloworld => [hello, ##world] """ # chars = list(token) # if len(chars) > _max_input_chars_per_word: # return [unk_token], [(0, len(chars))] # file path: Text-Processing/pycorrector/pycorrector/ernie/tokenizing_ernie.py # lines: 170-182 # @classmethod # def from_pretrained(cls, pretrain_dir_or_url, force_download=False, **kwargs): # if pretrain_dir_or_url in cls.resource_map: # url = cls.resource_map[pretrain_dir_or_url] # logger.info('get pretrain dir from %s' % url) # pretrain_dir = _fetch_from_remote(url, force_download) # else: # logger.info('pretrain dir %s not in %s, read from local' % (pretrain_dir_or_url, repr(cls.resource_map))) # pretrain_dir = Path(pretrain_dir_or_url) # if not pretrain_dir.exists(): # raise ValueError('pretrain dir not found: %s' % pretrain_dir) # vocab_path = pretrain_dir / 'vocab.txt' # sp_model_path = pretrain_dir / 'subword/spm_cased_simp_sampled.model' # file path: Text-Processing/pycorrector/pycorrector/proper_corrector.py # -*- coding: utf-8 -*- """ @author:XuMing(xuming624@qq.com) @description: 专名纠错,包括成语纠错、人名纠错、机构名纠错、领域词纠错等 """ import os from codecs import open import pypinyin from loguru import logger from pycorrector import config from pycorrector.utils.math_utils import edit_distance from pycorrector.utils.ngram_util import NgramUtil from pycorrector.utils.text_utils import is_chinese from pycorrector.utils.tokenizer import segment, split_2_short_text def load_set_file(path): words = set() if path and os.path.exists(path): with open(path, 'r', encoding='utf-8') as f: for w in f: w = w.strip() if w.startswith('#'): continue if w: words.add(w) return words Based on the information above, please complete the function in the current file Text-Processing/pycorrector/pycorrector/proper_corrector.py: def load_dict_file(path): """ 加载词典 :param path: :return: """
def load_dict_file(path): """ 加载词典 :param path: :return: """
natasha.span.envelop_spans
function
Text-Processing/natasha
Text-Processing/natasha/natasha/span.py
[ 23, 23 ]
[ 24, 37 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param spans: List of spans. The spans to be enveloped.\n:param envelopes: List of envelopes. The envelopes used to envelop the spans.\n:return: Yield the chunk of spans for each envelope.", "Functionality": "This function envelops the spans based on the given envelopes. It iterates through the spans and envelopes and yields the chunk of spans that are enveloped by each envelope." }
[ "tests/test_span.py::test_envelope_spans" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Text-Processing/natasha/natasha/record.py # lines: 1-12 # from collections import OrderedDict # def parse_annotation(annotation): # type = annotation or str # repeatable = False # if isinstance(annotation, list): # [Fact] # repeatable = True # type = annotation[0] # is_record = issubclass(type, Record) # return type, repeatable, is_record # class Record(object): # __attributes__ = [] # __annotations__ = {} # file path: Text-Processing/natasha/natasha/record.py # lines: 73-78 # if repeatable and is_record: # value = [_.as_json for _ in value] # elif is_record: # value = value.as_json # data[key] = value # return data # file path: Text-Processing/natasha/natasha/record.py # lines: 27-30 # def __iter__(self): # return (getattr(self, _) for _ in self.__attributes__) # def __hash__(self): # return hash(tuple(self)) # file path: Text-Processing/natasha/natasha/record.py # lines: 59-72 # if index < size - 1: # printer.text(',') # printer.break_() # printer.break_() # printer.text(')') # @property # def as_json(self): # data = OrderedDict() # for key in self.__attributes__: # annotation = self.__annotations__.get(key) # _, repeatable, is_record = parse_annotation(annotation) # value = getattr(self, key) # if value is None: # continue # file path: Text-Processing/natasha/natasha/record.py # lines: 44-58 # def _repr_pretty_(self, printer, cycle): # name = self.__class__.__name__ # if cycle: # printer.text('{name}(...)'.format(name=name)) # else: # printer.text('{name}('.format(name=name)) # keys = self.__attributes__ # size = len(keys) # if size: # with printer.indent(4): # printer.break_() # for index, key in enumerate(keys): # printer.text(key + '=') # value = getattr(self, key) # printer.pretty(value) # file path: Text-Processing/natasha/natasha/record.py # lines: 13-26 # def __init__(self, *args, **kwargs): # for key, value in zip(self.__attributes__, args): # self.__dict__[key] = value # self.__dict__.update(kwargs) # def __eq__(self, other): # return ( # type(self) == type(other) # and all( # (getattr(self, _) == getattr(other, _)) # for _ in self.__attributes__ # ) # ) # def __ne__(self, other): # return not self == other # file path: Text-Processing/natasha/natasha/record.py # lines: 31-43 # def __repr__(self): # name = self.__class__.__name__ # args = ', '.join( # '{key}={value!r}'.format( # key=_, # value=getattr(self, _) # ) # for _ in self.__attributes__ # ) # return '{name}({args})'.format( # name=name, # args=args # ) # file path: Text-Processing/natasha/natasha/record.py # lines: 79-93 # @classmethod # def from_json(cls, data): # args = [] # for key in cls.__attributes__: # annotation = cls.__annotations__.get(key) # type, repeatable, is_record = parse_annotation(annotation) # value = data.get(key) # if value is None and repeatable: # value = [] # elif value is not None: # if repeatable and is_record: # value = [type.from_json(_) for _ in value] # elif is_record: # value = type.from_json(value) # args.append(value) # file path: Text-Processing/natasha/natasha/span.py from .record import Record class Span(Record): __attributes__ = ['start', 'stop', 'type'] def adapt_spans(spans): for span in spans: yield Span(span.start, span.stop, span.type) def offset_spans(spans, offset): for span in spans: yield Span( offset + span.start, offset + span.stop, span.type ) Based on the information above, please complete the function in the current file Text-Processing/natasha/natasha/span.py: def envelop_spans(spans, envelopes):
def envelop_spans(spans, envelopes):
googleapiclient._helpers.parse_unique_urlencoded
function
Internet/google-api-python-client
Internet/google-api-python-client/googleapiclient/_helpers.py
[ 141, 141 ]
[ 153, 163 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param content: string. URL-encoded key-value pairs.\n:return: dict. The key-value pairs from the input content.\nRaises:\nValueError: if one of the keys is repeated.", "Functionality": "This function parses unique key-value parameters from URL-encoded content. It first parses the URL-encoded content and then checks for repeated keys. If a repeated key is found, it raises a ValueError." }
[ "tests/test__helpers.py::Test_parse_unique_urlencoded::test_without_repeats", "tests/test__helpers.py::Test_parse_unique_urlencoded::test_with_repeats" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Internet/Authlib/authlib/oauth2/rfc7523/validator.py # lines: 1-15 # import time # import logging # from authlib.jose import jwt, JoseError, JWTClaims # from ..rfc6749 import TokenMixin # from ..rfc6750 import BearerTokenValidator # logger = logging.getLogger(__name__) # class JWTBearerToken(TokenMixin, JWTClaims): # def check_client(self, client): # return self['client_id'] == client.get_client_id() # def get_scope(self): # return self.get('scope') # def get_expires_in(self): # return self['exp'] - self['iat'] # def is_expired(self): # return self['exp'] < time.time() # file path: Internet/google-api-python-client/googleapiclient/_helpers.py # Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for commonly used utilities.""" import functools import inspect import logging import urllib logger = logging.getLogger(__name__) POSITIONAL_WARNING = "WARNING" POSITIONAL_EXCEPTION = "EXCEPTION" POSITIONAL_IGNORE = "IGNORE" POSITIONAL_SET = frozenset( [POSITIONAL_WARNING, POSITIONAL_EXCEPTION, POSITIONAL_IGNORE] ) positional_parameters_enforcement = POSITIONAL_WARNING _SYM_LINK_MESSAGE = "File: {0}: Is a symbolic link." _IS_DIR_MESSAGE = "{0}: Is a directory" _MISSING_FILE_MESSAGE = "Cannot access {0}: No such file or directory" def positional(max_positional_args): """A decorator to declare that only the first N arguments may be positional. This decorator makes it easy to support Python 3 style keyword-only parameters. For example, in Python 3 it is possible to write:: def fn(pos1, *, kwonly1=None, kwonly2=None): ... All named parameters after ``*`` must be a keyword:: fn(10, 'kw1', 'kw2') # Raises exception. fn(10, kwonly1='kw1') # Ok. Example ^^^^^^^ To define a function like above, do:: @positional(1) def fn(pos1, kwonly1=None, kwonly2=None): ... If no default value is provided to a keyword argument, it becomes a required keyword argument:: @positional(0) def fn(required_kw): ... This must be called with the keyword parameter:: fn() # Raises exception. fn(10) # Raises exception. fn(required_kw=10) # Ok. When defining instance or class methods always remember to account for ``self`` and ``cls``:: class MyClass(object): @positional(2) def my_method(self, pos1, kwonly1=None): ... @classmethod @positional(2) def my_method(cls, pos1, kwonly1=None): ... The positional decorator behavior is controlled by ``_helpers.positional_parameters_enforcement``, which may be set to ``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or ``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do nothing, respectively, if a declaration is violated. Args: max_positional_arguments: Maximum number of positional arguments. All parameters after this index must be keyword only. Returns: A decorator that prevents using arguments after max_positional_args from being used as positional parameters. Raises: TypeError: if a keyword-only argument is provided as a positional parameter, but only if _helpers.positional_parameters_enforcement is set to POSITIONAL_EXCEPTION. """ def positional_decorator(wrapped): @functools.wraps(wrapped) def positional_wrapper(*args, **kwargs): if len(args) > max_positional_args: plural_s = "" if max_positional_args != 1: plural_s = "s" message = ( "{function}() takes at most {args_max} positional " "argument{plural} ({args_given} given)".format( function=wrapped.__name__, args_max=max_positional_args, args_given=len(args), plural=plural_s, ) ) if positional_parameters_enforcement == POSITIONAL_EXCEPTION: raise TypeError(message) elif positional_parameters_enforcement == POSITIONAL_WARNING: logger.warning(message) return wrapped(*args, **kwargs) return positional_wrapper if isinstance(max_positional_args, int): return positional_decorator else: args, _, _, defaults, _, _, _ = inspect.getfullargspec(max_positional_args) return positional(len(args) - len(defaults))(max_positional_args) Based on the information above, please complete the function in the current file Internet/google-api-python-client/googleapiclient/_helpers.py: def parse_unique_urlencoded(content): """Parses unique key-value parameters from urlencoded content. Args: content: string, URL-encoded key-value pairs. Returns: dict, The key-value pairs from ``content``. Raises: ValueError: if one of the keys is repeated. """
def parse_unique_urlencoded(content): """Parses unique key-value parameters from urlencoded content. Args: content: string, URL-encoded key-value pairs. Returns: dict, The key-value pairs from ``content``. Raises: ValueError: if one of the keys is repeated. """
jinja2.async_utils.auto_aiter
function
Internet/Jinja2
Internet/Jinja2/src/jinja2/async_utils.py
[ 70, 72 ]
[ 73, 78 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param iterable: Union of AsyncIterable and Iterable. The input iterable from which the iterator is created.\n:return: AsyncIterator. The created asynchronous iterator.", "Functionality": "This function creates an asynchronous iterator from the given iterable. It checks if the iterable has an __aiter__ attribute and if so, it yields items asynchronously, otherwise, it yields items synchronously." }
[ "tests/test_async.py::test_async_iteration_in_templates_extended" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Internet/Jinja2/src/jinja2/utils.py # lines: 444-454 # def select_autoescape( # enabled_extensions: t.Collection[str] = ("html", "htm", "xml"), # disabled_extensions: t.Collection[str] = (), # default_for_string: bool = True, # default: bool = False, # ) -> t.Callable[[t.Optional[str]], bool]: # """Intelligently sets the initial value of autoescaping based on the # filename of the template. This is the recommended way to configure # autoescaping if you do not want to write a custom function yourself. # If you want to enable it for all templates created from strings or # for all templates with `.html` and `.xml` extensions:: # file path: Internet/Jinja2/src/jinja2/utils.py # lines: 513-526 # return markupsafe.Markup( # dumps(obj, **kwargs) # .replace("<", "\\u003c") # .replace(">", "\\u003e") # .replace("&", "\\u0026") # .replace("'", "\\u0027") # ) # class Cycler: # """Cycle through values by yield them one at a time, then restarting # once the end is reached. Available as ``cycler`` in templates. # Similar to ``loop.cycle``, but can be used outside loops or across # multiple loops. For example, render a list of folders and files in a # list, alternating giving them "odd" and "even" classes. # .. code-block:: html+jinja # file path: Internet/Jinja2/src/jinja2/utils.py # lines: 80-93 # def consume(iterable: t.Iterable[t.Any]) -> None: # """Consumes an iterable without doing anything with it.""" # for _ in iterable: # pass # def clear_caches() -> None: # """Jinja keeps internal caches for environments and lexers. These are # used so that Jinja doesn't have to recreate environments and lexers all # the time. Normally you don't have to care about that but if you are # measuring memory consumption you may want to clean the caches. # """ # from .environment import get_spontaneous_environment # from .lexer import _lexer_cache # get_spontaneous_environment.cache_clear() # _lexer_cache.clear() # file path: Internet/Jinja2/src/jinja2/utils.py # lines: 578-589 # def __getattribute__(self, name: str) -> t.Any: # # __class__ is needed for the awaitable check in async mode # if name in {"_Namespace__attrs", "__class__"}: # return object.__getattribute__(self, name) # try: # return self.__attrs[name] # except KeyError: # raise AttributeError(name) from None # def __setitem__(self, name: str, value: t.Any) -> None: # self.__attrs[name] = value # def __repr__(self) -> str: # return f"<Namespace {self.__attrs!r}>" # file path: Internet/Jinja2/src/jinja2/utils.py # lines: 540-553 # def __init__(self, *items: t.Any) -> None: # if not items: # raise RuntimeError("at least one item has to be provided") # self.items = items # self.pos = 0 # def reset(self) -> None: # """Resets the current item to the first item.""" # self.pos = 0 # @property # def current(self) -> t.Any: # """Return the current item. Equivalent to the item that will be # returned next time :meth:`next` is called. # """ # return self.items[self.pos] # file path: Internet/Jinja2/src/jinja2/utils.py # lines: 430-443 # def values(self) -> t.Iterable[t.Any]: # """Return a list of all values.""" # return [x[1] for x in self.items()] # def keys(self) -> t.Iterable[t.Any]: # """Return a list of all keys ordered by most recent usage.""" # return list(self) # def __iter__(self) -> t.Iterator[t.Any]: # return reversed(tuple(self._queue)) # def __reversed__(self) -> t.Iterator[t.Any]: # """Iterate over the keys in the cache dict, oldest items # coming first. # """ # return iter(tuple(self._queue)) # __copy__ = copy # file path: Internet/Jinja2/src/jinja2/utils.py # lines: 554-566 # def next(self) -> t.Any: # """Return the current item, then advance :attr:`current` to the # next item. # """ # rv = self.current # self.pos = (self.pos + 1) % len(self.items) # return rv # __next__ = next # class Joiner: # """A joining helper for templates.""" # def __init__(self, sep: str = ", ") -> None: # self.sep = sep # self.used = False # file path: Internet/Jinja2/src/jinja2/utils.py # lines: 1-15 # import enum # import json # import os # import re # import typing as t # from collections import abc # from collections import deque # from random import choice # from random import randrange # from threading import Lock # from types import CodeType # from urllib.parse import quote_from_bytes # import markupsafe # if t.TYPE_CHECKING: # import typing_extensions as te # file path: Internet/Jinja2/src/jinja2/async_utils.py import inspect import typing as t from functools import WRAPPER_ASSIGNMENTS from functools import wraps from .utils import _PassArg from .utils import pass_eval_context V = t.TypeVar("V") def async_variant(normal_func): # type: ignore def decorator(async_func): # type: ignore pass_arg = _PassArg.from_obj(normal_func) need_eval_context = pass_arg is None if pass_arg is _PassArg.environment: def is_async(args: t.Any) -> bool: return t.cast(bool, args[0].is_async) else: def is_async(args: t.Any) -> bool: return t.cast(bool, args[0].environment.is_async) # Take the doc and annotations from the sync function, but the # name from the async function. Pallets-Sphinx-Themes # build_function_directive expects __wrapped__ to point to the # sync function. async_func_attrs = ("__module__", "__name__", "__qualname__") normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs)) @wraps(normal_func, assigned=normal_func_attrs) @wraps(async_func, assigned=async_func_attrs, updated=()) def wrapper(*args, **kwargs): # type: ignore b = is_async(args) if need_eval_context: args = args[1:] if b: return async_func(*args, **kwargs) return normal_func(*args, **kwargs) if need_eval_context: wrapper = pass_eval_context(wrapper) wrapper.jinja_async_variant = True return wrapper return decorator _common_primitives = {int, float, bool, str, list, dict, tuple, type(None)} async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V": # Avoid a costly call to isawaitable if type(value) in _common_primitives: return t.cast("V", value) if inspect.isawaitable(value): return await t.cast("t.Awaitable[V]", value) return t.cast("V", value) async Based on the information above, please complete the function in the current file Internet/Jinja2/src/jinja2/async_utils.py: def auto_aiter( iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", ) -> "t.AsyncIterator[V]":
def auto_aiter( iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", ) -> "t.AsyncIterator[V]":
jinja2.utils.consume
function
Internet/Jinja2
Internet/Jinja2/src/jinja2/utils.py
[ 112, 112 ]
[ 114, 115 ]
{ "cross_file": [], "intra_class": [], "intra_file": [] }
{ "Arguments": ":param iterable: Iterable. The iterable to be consumed.\n:return: No return values.", "Functionality": "This function consumes an iterable without doing anything with it. It iterates through the given iterable and does nothing with the elements." }
[ "tests/test_utils.py::test_consume" ]
4
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function: # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 93-107 # generator = environment.code_generator_class( # environment, name, filename, stream, defer_init, optimized # ) # generator.visit(node) # if stream is None: # return generator.stream.getvalue() # type: ignore # return None # def has_safe_repr(value: t.Any) -> bool: # """Does the node have a safe representation?""" # if value is None or value is NotImplemented or value is Ellipsis: # return True # if type(value) in {bool, int, float, complex, range, str, Markup}: # return True # if type(value) in {tuple, list, set, frozenset}: # return all(has_safe_repr(v) for v in value) # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 486-496 # def leave_frame(self, frame: Frame, with_python_scope: bool = False) -> None: # if not with_python_scope: # undefs = [] # for target in frame.symbols.loads: # undefs.append(target) # if undefs: # self.writeline(f"{' = '.join(undefs)} = missing") # def choose_async(self, async_value: str = "async ", sync_value: str = "") -> str: # return async_value if self.environment.is_async else sync_value # def func(self, name: str) -> str: # return f"{self.choose_async()}def {name}" # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 1199-1211 # else: # src = f"{src}{pass_arg}, " # if pass_arg == "environment": # def finalize(value: t.Any) -> t.Any: # return default(env_finalize(self.environment, value)) # self._finalize = self._FinalizeInfo(finalize, src) # return self._finalize # def _output_const_repr(self, group: t.Iterable[t.Any]) -> str: # """Given a group of constant values converted from ``Output`` # child nodes, produce a string to write to the template module # source. # """ # return repr(concat(group)) # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 616-627 # def mark_parameter_stored(self, target: str) -> None: # """Marks a parameter in the current parameter definitions as stored. # This will skip the enforced undefined checks. # """ # if self._param_def_block: # self._param_def_block[-1].discard(target) # def push_context_reference(self, target: str) -> None: # self._context_reference_stack.append(target) # def pop_context_reference(self) -> None: # self._context_reference_stack.pop() # def get_context_ref(self) -> str: # return self._context_reference_stack[-1] # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 80-92 # return visitor # def generate( # node: nodes.Template, # environment: "Environment", # name: t.Optional[str], # filename: t.Optional[str], # stream: t.Optional[t.TextIO] = None, # defer_init: bool = False, # optimized: bool = True, # ) -> t.Optional[str]: # """Generate the python source for a node tree.""" # if not isinstance(node, nodes.Template): # raise TypeError("Can't compile non template nodes") # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 381-395 # def signature( # self, # node: t.Union[nodes.Call, nodes.Filter, nodes.Test], # frame: Frame, # extra_kwargs: t.Optional[t.Mapping[str, t.Any]] = None, # ) -> None: # """Writes a function call to the stream for the current node. # A leading comma is added automatically. The extra keyword # arguments may not include python keywords otherwise a syntax # error could occur. The extra keyword arguments should be given # as python dict. # """ # # if any of the given keyword arguments is a python keyword # # we have to make sure that no invalid call is created. # kwarg_workaround = any( # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 471-485 # def enter_frame(self, frame: Frame) -> None: # undefs = [] # for target, (action, param) in frame.symbols.loads.items(): # if action == VAR_LOAD_PARAMETER: # pass # elif action == VAR_LOAD_RESOLVE: # self.writeline(f"{target} = {self.get_resolve_func()}({param!r})") # elif action == VAR_LOAD_ALIAS: # self.writeline(f"{target} = {param}") # elif action == VAR_LOAD_UNDEFINED: # undefs.append(target) # else: # raise NotImplementedError("unknown load instruction") # if undefs: # self.writeline(f"{' = '.join(undefs)} = missing") # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 1159-1173 # class _FinalizeInfo(t.NamedTuple): # const: t.Optional[t.Callable[..., str]] # src: t.Optional[str] # @staticmethod # def _default_finalize(value: t.Any) -> t.Any: # """The default finalize function if the environment isn't # configured with one. Or, if the environment has one, this is # called on that function's output for constants. # """ # return str(value) # _finalize: t.Optional[_FinalizeInfo] = None # def _make_finalize(self) -> _FinalizeInfo: # """Build the finalize function to be used on constants and at # runtime. Cached so it's only created once for all output nodes. # Returns a ``namedtuple`` with the following attributes: # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 864-876 # def visit_Include(self, node: nodes.Include, frame: Frame) -> None: # """Handles includes.""" # if node.ignore_missing: # self.writeline("try:") # self.indent() # func_name = "get_or_select_template" # if isinstance(node.template, nodes.Const): # if isinstance(node.template.value, str): # func_name = "get_template" # elif isinstance(node.template.value, (tuple, list)): # func_name = "select_template" # elif isinstance(node.template, (nodes.Tuple, nodes.List)): # func_name = "select_template" # file path: Internet/Jinja2/src/jinja2/compiler.py # lines: 1352-1366 # # If we are looking up a variable we might have to deal with the # # case where it's undefined. We can skip that case if the load # # instruction indicates a parameter which are always defined. # if node.ctx == "load": # load = frame.symbols.find_load(ref) # if not ( # load is not None # and load[0] == VAR_LOAD_PARAMETER # and not self.parameter_is_undeclared(ref) # ): # self.write( # f"(undefined(name={node.name!r}) if {ref} is missing else {ref})" # ) # return # self.write(ref) # file path: Internet/Jinja2/src/jinja2/utils.py import enum import json import os import re import typing as t from collections import abc from collections import deque from random import choice from random import randrange from threading import Lock from types import CodeType from urllib.parse import quote_from_bytes import markupsafe if t.TYPE_CHECKING: import typing_extensions as te F = t.TypeVar("F", bound=t.Callable[..., t.Any]) # special singleton representing missing values for the runtime missing: t.Any = type("MissingType", (), {"__repr__": lambda x: "missing"})() internal_code: t.MutableSet[CodeType] = set() concat = "".join def pass_context(f: F) -> F: """Pass the :class:`~jinja2.runtime.Context` as the first argument to the decorated function when called while rendering a template. Can be used on functions, filters, and tests. If only ``Context.eval_context`` is needed, use :func:`pass_eval_context`. If only ``Context.environment`` is needed, use :func:`pass_environment`. .. versionadded:: 3.0.0 Replaces ``contextfunction`` and ``contextfilter``. """ f.jinja_pass_arg = _PassArg.context # type: ignore return f def pass_eval_context(f: F) -> F: """Pass the :class:`~jinja2.nodes.EvalContext` as the first argument to the decorated function when called while rendering a template. See :ref:`eval-context`. Can be used on functions, filters, and tests. If only ``EvalContext.environment`` is needed, use :func:`pass_environment`. .. versionadded:: 3.0.0 Replaces ``evalcontextfunction`` and ``evalcontextfilter``. """ f.jinja_pass_arg = _PassArg.eval_context # type: ignore return f def pass_environment(f: F) -> F: """Pass the :class:`~jinja2.Environment` as the first argument to the decorated function when called while rendering a template. Can be used on functions, filters, and tests. .. versionadded:: 3.0.0 Replaces ``environmentfunction`` and ``environmentfilter``. """ f.jinja_pass_arg = _PassArg.environment # type: ignore return f class _PassArg(enum.Enum): context = enum.auto() eval_context = enum.auto() environment = enum.auto() @classmethod def from_obj(cls, obj: F) -> t.Optional["_PassArg"]: if hasattr(obj, "jinja_pass_arg"): return obj.jinja_pass_arg # type: ignore return None def internalcode(f: F) -> F: """Marks the function as internally used""" internal_code.add(f.__code__) return f def is_undefined(obj: t.Any) -> bool: """Check if the object passed is undefined. This does nothing more than performing an instance check against :class:`Undefined` but looks nicer. This can be used for custom filters or tests that want to react to undefined variables. For example a custom default filter can look like this:: def default(var, default=''): if is_undefined(var): return default return var """ from .runtime import Undefined return isinstance(obj, Undefined) Based on the information above, please complete the function in the current file Internet/Jinja2/src/jinja2/utils.py: def consume(iterable: t.Iterable[t.Any]) -> None: """Consumes an iterable without doing anything with it."""
def consume(iterable: t.Iterable[t.Any]) -> None: """Consumes an iterable without doing anything with it."""
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
68