desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Uploads a user file to a GridFS bucket with a custom file id. Reads the contents of the user file from `source` and uploads it to the file `filename`. Source can be a string or file-like object. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) file_id = fs.upload_from_stream( ObjectId(), "test_file", ...
def upload_from_stream_with_id(self, file_id, filename, source, chunk_size_bytes=None, metadata=None):
with self.open_upload_stream_with_id(file_id, filename, chunk_size_bytes, metadata) as gin: gin.write(source)
'Opens a Stream from which the application can read the contents of the stored file specified by file_id. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) # get _id of file to read. file_id = fs.upload_from_stream("test_file", "data I want to store!") grid_out = fs.open_download_stream(file_id) content...
def open_download_stream(self, file_id):
gout = GridOut(self._collection, file_id) gout._ensure_file() return gout
'Downloads the contents of the stored file specified by file_id and writes the contents to `destination`. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) # Get _id of file to read file_id = fs.upload_from_stream("test_file", "data I want to store!") # Get file to write to file = open(\'myfile\',\'wb+\...
def download_to_stream(self, file_id, destination):
gout = self.open_download_stream(file_id) for chunk in gout: destination.write(chunk)
'Given an file_id, delete this stored file\'s files collection document and associated chunks from a GridFS bucket. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) # Get _id of file to delete file_id = fs.upload_from_stream("test_file", "data I want to store!") fs.delete(file_id) Raises :exc:`~gridfs....
def delete(self, file_id):
res = self._files.delete_one({'_id': file_id}) self._chunks.delete_many({'files_id': file_id}) if (not res.deleted_count): raise NoFile(('no file could be deleted because none matched %s' % file_id))
'Find and return the files collection documents that match ``filter`` Returns a cursor that iterates across files matching arbitrary queries on the files collection. Can be combined with other modifiers for additional control. For example:: for grid_data in fs.find({"filename": "lisa.txt"}, no_cursor_timeout=True): dat...
def find(self, *args, **kwargs):
return GridOutCursor(self._collection, *args, **kwargs)
'Opens a Stream from which the application can read the contents of `filename` and optional `revision`. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) grid_out = fs.open_download_stream_by_name("test_file") contents = grid_out.read() Returns an instance of :class:`~gridfs.grid_file.GridOut`. Raises :...
def open_download_stream_by_name(self, filename, revision=(-1)):
validate_string('filename', filename) query = {'filename': filename} cursor = self._files.find(query) if (revision < 0): skip = (abs(revision) - 1) cursor.limit((-1)).skip(skip).sort('uploadDate', DESCENDING) else: cursor.limit((-1)).skip(revision).sort('uploadDate', ASCENDIN...
'Write the contents of `filename` (with optional `revision`) to `destination`. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) # Get file to write to file = open(\'myfile\',\'wb\') fs.download_to_stream_by_name("test_file", file) Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exis...
def download_to_stream_by_name(self, filename, destination, revision=(-1)):
gout = self.open_download_stream_by_name(filename, revision) for chunk in gout: destination.write(chunk)
'Renames the stored file with the specified file_id. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) # Get _id of file to rename file_id = fs.upload_from_stream("test_file", "data I want to store!") fs.rename(file_id, "new_test_name") Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists....
def rename(self, file_id, new_filename):
result = self._files.update_one({'_id': file_id}, {'$set': {'filename': new_filename}}) if (not result.matched_count): raise NoFile(('no files could be renamed %r because none matched file_id %i' % (new_filename, file_id)))
'Write a file to GridFS Application developers should generally not need to instantiate this class directly - instead see the methods provided by :class:`~gridfs.GridFS`. Raises :class:`TypeError` if `root_collection` is not an instance of :class:`~pymongo.collection.Collection`. Any of the file level options specified...
def __init__(self, root_collection, **kwargs):
if (not isinstance(root_collection, Collection)): raise TypeError('root_collection must be an instance of Collection') if (not root_collection.write_concern.acknowledged): raise ConfigurationError('root_collection must use acknowledged write_concern') if ('conte...
'Remove all chunks/files that may have been uploaded and close.'
def abort(self):
self._coll.chunks.delete_many({'files_id': self._file['_id']}) self._coll.files.delete_one({'_id': self._file['_id']}) object.__setattr__(self, '_closed', True)
'Is this file closed?'
@property def closed(self):
return self._closed
'Flush `data` to a chunk.'
def __flush_data(self, data):
self.__ensure_indexes() self._file['md5'].update(data) if (not data): return assert (len(data) <= self.chunk_size) chunk = {'files_id': self._file['_id'], 'n': self._chunk_number, 'data': Binary(data)} try: self._chunks.insert_one(chunk) except DuplicateKeyError: self...
'Flush the buffer contents out to a chunk.'
def __flush_buffer(self):
self.__flush_data(self._buffer.getvalue()) self._buffer.close() self._buffer = StringIO()
'Flush the file to the database.'
def __flush(self):
try: self.__flush_buffer() self._file['md5'] = self._file['md5'].hexdigest() self._file['length'] = self._position self._file['uploadDate'] = datetime.datetime.utcnow() return self._coll.files.insert_one(self._file) except DuplicateKeyError: self._raise_file_exist...
'Raise a FileExists exception for the given file_id.'
def _raise_file_exists(self, file_id):
raise FileExists(('file with _id %r already exists' % file_id))
'Flush the file and close it. A closed file cannot be written any more. Calling :meth:`close` more than once is allowed.'
def close(self):
if (not self._closed): self.__flush() object.__setattr__(self, '_closed', True)
'Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object (implementing :meth:`read`). If the file has an :attr:`encoding` attribute, `data` can also be a :class:`unicode` (:class:`str` in python 3) instance, which will be encoded as :attr:`encoding` before being wr...
def write(self, data):
if self._closed: raise ValueError('cannot write to a closed file') try: read = data.read except AttributeError: if (not isinstance(data, (text_type, bytes))): raise TypeError('can only write strings or file-like objects') if isinst...
'Write a sequence of strings to the file. Does not add seperators.'
def writelines(self, sequence):
for line in sequence: self.write(line)
'Support for the context manager protocol.'
def __enter__(self):
return self
'Support for the context manager protocol. Close the file and allow exceptions to propagate.'
def __exit__(self, exc_type, exc_val, exc_tb):
self.close() return False
'Read a file from GridFS Application developers should generally not need to instantiate this class directly - instead see the methods provided by :class:`~gridfs.GridFS`. Either `file_id` or `file_document` must be specified, `file_document` will be given priority if present. Raises :class:`TypeError` if `root_collect...
def __init__(self, root_collection, file_id=None, file_document=None):
if (not isinstance(root_collection, Collection)): raise TypeError('root_collection must be an instance of Collection') self.__chunks = root_collection.chunks self.__files = root_collection.files self.__file_id = file_id self.__buffer = EMPTY self.__position = 0 self...
'Reads a chunk at a time. If the current position is within a chunk the remainder of the chunk is returned.'
def readchunk(self):
received = len(self.__buffer) chunk_data = EMPTY chunk_size = int(self.chunk_size) if (received > 0): chunk_data = self.__buffer elif (self.__position < int(self.length)): chunk_number = int(((received + self.__position) / chunk_size)) chunk = self.__chunks.find_one({'files_i...
'Read at most `size` bytes from the file (less if there isn\'t enough data). The bytes are returned as an instance of :class:`str` (:class:`bytes` in python 3). If `size` is negative or omitted all data is read. :Parameters: - `size` (optional): the number of bytes to read'
def read(self, size=(-1)):
self._ensure_file() if (size == 0): return EMPTY remainder = (int(self.length) - self.__position) if ((size < 0) or (size > remainder)): size = remainder received = 0 data = StringIO() while (received < size): chunk_data = self.readchunk() received += len(chun...
'Read one line or up to `size` bytes from the file. :Parameters: - `size` (optional): the maximum number of bytes to read'
def readline(self, size=(-1)):
if (size == 0): return '' remainder = (int(self.length) - self.__position) if ((size < 0) or (size > remainder)): size = remainder received = 0 data = StringIO() while (received < size): chunk_data = self.readchunk() pos = chunk_data.find(NEWLN, 0, size) i...
'Return the current position of this file.'
def tell(self):
return self.__position
'Set the current position of this file. :Parameters: - `pos`: the position (or offset if using relative positioning) to seek to - `whence` (optional): where to seek from. :attr:`os.SEEK_SET` (``0``) for absolute file positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative to the current position, :attr:`os.SEEK_END` ...
def seek(self, pos, whence=_SEEK_SET):
if (whence == _SEEK_SET): new_pos = pos elif (whence == _SEEK_CUR): new_pos = (self.__position + pos) elif (whence == _SEEK_END): new_pos = (int(self.length) + pos) else: raise IOError(22, 'Invalid value for `whence`') if (new_pos < 0): raise IOError(...
'Return an iterator over all of this file\'s data. The iterator will return chunk-sized instances of :class:`str` (:class:`bytes` in python 3). This can be useful when serving files using a webserver that handles such an iterator efficiently.'
def __iter__(self):
return GridOutIterator(self, self.__chunks)
'Make GridOut more generically file-like.'
def close(self):
pass
'Makes it possible to use :class:`GridOut` files with the context manager protocol.'
def __enter__(self):
return self
'Makes it possible to use :class:`GridOut` files with the context manager protocol.'
def __exit__(self, exc_type, exc_val, exc_tb):
return False
'Create a new cursor, similar to the normal :class:`~pymongo.cursor.Cursor`. Should not be called directly by application developers - see the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead. .. versionadded 2.7 .. mongodoc:: cursors'
def __init__(self, collection, filter=None, skip=0, limit=0, no_cursor_timeout=False, sort=None, batch_size=0):
self.__root_collection = collection super(GridOutCursor, self).__init__(collection.files, filter, skip=skip, limit=limit, no_cursor_timeout=no_cursor_timeout, sort=sort, batch_size=batch_size)
'Get next GridOut object from cursor.'
def next(self):
next_file = super(GridOutCursor, self).next() return GridOut(self.__root_collection, file_document=next_file)
'Creates an empty GridOutCursor for information to be copied into.'
def _clone_base(self):
return GridOutCursor(self.__root_collection)
'Create a new command cursor.'
def __init__(self, collection, cursor_info, address, retrieved=0):
self.__collection = collection self.__id = cursor_info['id'] self.__address = address self.__data = deque(cursor_info['firstBatch']) self.__retrieved = retrieved self.__batch_size = 0 self.__killed = (self.__id == 0) if ('ns' in cursor_info): self.__ns = cursor_info['ns'] els...
'Closes this cursor.'
def __die(self):
if (self.__id and (not self.__killed)): self.__collection.database.client.close_cursor(self.__id, _CursorAddress(self.__address, self.__ns)) self.__killed = True
'Explicitly close / kill this cursor. Required for PyPy, Jython and other Python implementations that don\'t use reference counting garbage collection.'
def close(self):
self.__die()
'Limits the number of documents returned in one batch. Each batch requires a round trip to the server. It can be adjusted to optimize performance and limit data transfer. .. note:: batch_size can not override MongoDB\'s internal limits on the amount of data it will return to the client in a single batch (i.e if you set...
def batch_size(self, batch_size):
if (not isinstance(batch_size, integer_types)): raise TypeError('batch_size must be an integer') if (batch_size < 0): raise ValueError('batch_size must be >= 0') self.__batch_size = (((batch_size == 1) and 2) or batch_size) return self
'Send a getmore message and handle the response.'
def __send_message(self, operation):
client = self.__collection.database.client listeners = client._event_listeners publish = listeners.enabled_for_commands try: response = client._send_message_with_response(operation, address=self.__address) except AutoReconnect: self.__killed = True raise cmd_duration = re...
'Refreshes the cursor with more data from the server. Returns the length of self.__data after refresh. Will exit early if self.__data is already non-empty. Raises OperationFailure when the cursor cannot be refreshed due to an error on the query.'
def _refresh(self):
if (len(self.__data) or self.__killed): return len(self.__data) if self.__id: (dbname, collname) = self.__ns.split('.', 1) self.__send_message(_GetMore(dbname, collname, self.__batch_size, self.__id, self.__collection.codec_options)) else: self.__killed = True return len(...
'Does this cursor have the potential to return more data? Even if :attr:`alive` is ``True``, :meth:`next` can raise :exc:`StopIteration`. Best to use a for loop:: for doc in collection.aggregate(pipeline): print(doc) .. note:: :attr:`alive` can be True while iterating a cursor from a failed server. In this case :attr:`...
@property def alive(self):
return bool((len(self.__data) or (not self.__killed)))
'Returns the id of the cursor.'
@property def cursor_id(self):
return self.__id
'The (host, port) of the server used, or None. .. versionadded:: 3.0'
@property def address(self):
return self.__address
'Advance the cursor.'
def next(self):
if (len(self.__data) or self._refresh()): coll = self.__collection return coll.database._fix_outgoing(self.__data.popleft(), coll) else: raise StopIteration
'"Run a target function periodically on a background thread. If the target\'s return value is false, the executor stops. :Parameters: - `interval`: Seconds between calls to `target`. - `min_interval`: Minimum seconds between calls if `wake` is called very often. - `target`: A function. - `name`: A name to give the unde...
def __init__(self, interval, min_interval, target, name=None):
self._event = False self._interval = interval self._min_interval = min_interval self._target = target self._stopped = False self._thread = None self._name = name self._thread_will_exit = False self._lock = threading.Lock()
'Start. Multiple calls have no effect. Not safe to call from multiple threads at once.'
def open(self):
with self._lock: if self._thread_will_exit: try: self._thread.join() except ReferenceError: pass self._thread_will_exit = False self._stopped = False started = False try: started = (self._thread and self._thread.is_alive...
'Stop. To restart, call open(). The dummy parameter allows an executor\'s close method to be a weakref callback; see monitor.py.'
def close(self, dummy=None):
self._stopped = True
'Execute the target function soon.'
def wake(self):
self._event = True
'Representation of a deployment of MongoDB servers. :Parameters: - `topology_type`: initial type - `server_descriptions`: dict of (address, ServerDescription) for all seeds - `replica_set_name`: replica set name or None - `max_set_version`: greatest setVersion seen from a primary, or None - `max_election_id`: greatest ...
def __init__(self, topology_type, server_descriptions, replica_set_name, max_set_version, max_election_id, topology_settings):
self._topology_type = topology_type self._replica_set_name = replica_set_name self._server_descriptions = server_descriptions self._max_set_version = max_set_version self._max_election_id = max_election_id self._topology_settings = topology_settings self._incompatible_err = None for s in...
'Raise ConfigurationError if any server is incompatible. A server is incompatible if its wire protocol version range does not overlap with PyMongo\'s.'
def check_compatible(self):
if self._incompatible_err: raise ConfigurationError(self._incompatible_err)
'A copy of this description, with one server marked Unknown.'
def reset_server(self, address):
return updated_topology_description(self, ServerDescription(address))
'A copy of this description, with all servers marked Unknown.'
def reset(self):
if (self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary): topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary else: topology_type = self._topology_type sds = dict(((address, ServerDescription(address)) for address in self._server_descriptions)) return TopologyDescription(topology_ty...
'Dict of (address, :class:`~pymongo.server_description.ServerDescription`).'
def server_descriptions(self):
return self._server_descriptions.copy()
'The type of this topology.'
@property def topology_type(self):
return self._topology_type
'The topology type as a human readable string. .. versionadded:: 3.4'
@property def topology_type_name(self):
return TOPOLOGY_TYPE._fields[self._topology_type]
'The replica set name.'
@property def replica_set_name(self):
return self._replica_set_name
'Greatest setVersion seen from a primary, or None.'
@property def max_set_version(self):
return self._max_set_version
'Greatest electionId seen from a primary, or None.'
@property def max_election_id(self):
return self._max_election_id
'List of Servers of types besides Unknown.'
@property def known_servers(self):
return [s for s in self._server_descriptions.values() if s.is_server_type_known]
'Minimum of all servers\' max wire versions, or None.'
@property def common_wire_version(self):
servers = self.known_servers if servers: return min((s.max_wire_version for s in self.known_servers)) return None
'Does this topology have any readable servers available matching the given read preference? :Parameters: - `read_preference`: an instance of a read preference from :mod:`~pymongo.read_preferences`. Defaults to :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. .. note:: When connected directly to a single server...
def has_readable_server(self, read_preference=ReadPreference.PRIMARY):
common.validate_read_preference('read_preference', read_preference) return any(self.apply_selector(read_preference, None))
'Does this topology have a writable server available? .. note:: When connected directly to a single server this method always returns ``True``. .. versionadded:: 3.4'
def has_writable_server(self):
return self.has_readable_server(ReadPreference.PRIMARY)
'The document representation of this collation. .. note:: :class:`Collation` is immutable. Mutating the value of :attr:`document` does not mutate this :class:`Collation`.'
@property def document(self):
return self.__document.copy()
'The address (host, port) of this server.'
@property def address(self):
return self._address
'The type of this server.'
@property def server_type(self):
return self._server_type
'The server type as a human readable string. .. versionadded:: 3.4'
@property def server_type_name(self):
return SERVER_TYPE._fields[self._server_type]
'List of hosts, passives, and arbiters known to this server.'
@property def all_hosts(self):
return self._all_hosts
'Replica set name or None.'
@property def replica_set_name(self):
return self._replica_set_name
'This server\'s opinion about who the primary is, or None.'
@property def primary(self):
return self._primary
'The current average latency or None.'
@property def round_trip_time(self):
if (self._address in self._host_to_round_trip_time): return self._host_to_round_trip_time[self._address] return self._round_trip_time
'The last error attempting to connect to the server, or None.'
@property def error(self):
return self._error
'Represent a response from the server. :Parameters: - `data`: Raw BSON bytes. - `address`: (host, port) of the source server. - `request_id`: The request id of this operation. - `duration`: The duration of the operation. - `from_command`: if the response is the result of a db command.'
def __init__(self, data, address, request_id, duration, from_command):
self._data = data self._address = address self._request_id = request_id self._duration = duration self._from_command = from_command
'Server response\'s raw BSON bytes.'
@property def data(self):
return self._data
'(host, port) of the source server.'
@property def address(self):
return self._address
'The request id of this operation.'
@property def request_id(self):
return self._request_id
'The duration of the operation.'
@property def duration(self):
return self._duration
'If the response is a result from a db command.'
@property def from_command(self):
return self._from_command
'Represent a response to an exhaust cursor\'s initial query. :Parameters: - `data`: Raw BSON bytes. - `address`: (host, port) of the source server. - `socket_info`: The SocketInfo used for the initial query. - `pool`: The Pool from which the SocketInfo came. - `request_id`: The request id of this operation. - `duration...
def __init__(self, data, address, socket_info, pool, request_id, duration, from_command):
super(ExhaustResponse, self).__init__(data, address, request_id, duration, from_command) self._socket_info = socket_info self._pool = pool
'The SocketInfo used for the initial query. The server will send batches on this socket, without waiting for getMores from the client, until the result set is exhausted or there is an error.'
@property def socket_info(self):
return self._socket_info
'The Pool from which the SocketInfo came.'
@property def pool(self):
return self._pool
'The original options used to create this ClientOptions.'
@property def _options(self):
return self.__options
'Whether to begin discovering a MongoDB topology automatically.'
@property def connect(self):
return self.__connect
'A :class:`~bson.codec_options.CodecOptions` instance.'
@property def codec_options(self):
return self.__codec_options
'A :class:`~pymongo.auth.MongoCredentials` instance or None.'
@property def credentials(self):
return self.__credentials
'The local threshold for this instance.'
@property def local_threshold_ms(self):
return self.__local_threshold_ms
'The server selection timeout for this instance in seconds.'
@property def server_selection_timeout(self):
return self.__server_selection_timeout
'The monitoring frequency in seconds.'
@property def heartbeat_frequency(self):
return self.__heartbeat_frequency
'A :class:`~pymongo.pool.PoolOptions` instance.'
@property def pool_options(self):
return self.__pool_options
'A read preference instance.'
@property def read_preference(self):
return self.__read_preference
'Replica set name or None.'
@property def replica_set_name(self):
return self.__replica_set_name
'A :class:`~pymongo.write_concern.WriteConcern` instance.'
@property def write_concern(self):
return self.__write_concern
'A :class:`~pymongo.read_concern.ReadConcern` instance.'
@property def read_concern(self):
return self.__read_concern
'Get a database by client and name. Raises :class:`TypeError` if `name` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName` if `name` is not a valid database name. :Parameters: - `client`: A :class:`~pymongo.mongo_client.MongoClient` instance. - `name`: The ...
def __init__(self, client, name, codec_options=None, read_preference=None, write_concern=None, read_concern=None):
super(Database, self).__init__((codec_options or client.codec_options), (read_preference or client.read_preference), (write_concern or client.write_concern), (read_concern or client.read_concern)) if (not isinstance(name, string_type)): raise TypeError(('name must be an instance of %s'...
'Add a new son manipulator to this database. **DEPRECATED** - `add_son_manipulator` is deprecated. .. versionchanged:: 3.0 Deprecated add_son_manipulator.'
def add_son_manipulator(self, manipulator):
warnings.warn('add_son_manipulator is deprecated', DeprecationWarning, stacklevel=2) base = SONManipulator() def method_overwritten(instance, method): 'Test if this method has been overridden.' return (getattr(instance, method).__func__ != getattr(base, method).__func...
'**DEPRECATED**: :class:`SystemJS` helper for this :class:`Database`. See the documentation for :class:`SystemJS` for more details.'
@property def system_js(self):
return SystemJS(self)
'The client instance for this :class:`Database`.'
@property def client(self):
return self.__client
'The name of this :class:`Database`.'
@property def name(self):
return self.__name
'**DEPRECATED**: All incoming SON manipulators. .. versionchanged:: 3.5 Deprecated. .. versionadded:: 2.0'
@property def incoming_manipulators(self):
warnings.warn('Database.incoming_manipulators() is deprecated', DeprecationWarning, stacklevel=2) return [manipulator.__class__.__name__ for manipulator in self.__incoming_manipulators]
'**DEPRECATED**: All incoming SON copying manipulators. .. versionchanged:: 3.5 Deprecated. .. versionadded:: 2.0'
@property def incoming_copying_manipulators(self):
warnings.warn('Database.incoming_copying_manipulators() is deprecated', DeprecationWarning, stacklevel=2) return [manipulator.__class__.__name__ for manipulator in self.__incoming_copying_manipulators]
'**DEPRECATED**: All outgoing SON manipulators. .. versionchanged:: 3.5 Deprecated. .. versionadded:: 2.0'
@property def outgoing_manipulators(self):
warnings.warn('Database.outgoing_manipulators() is deprecated', DeprecationWarning, stacklevel=2) return [manipulator.__class__.__name__ for manipulator in self.__outgoing_manipulators]
'**DEPRECATED**: All outgoing SON copying manipulators. .. versionchanged:: 3.5 Deprecated. .. versionadded:: 2.0'
@property def outgoing_copying_manipulators(self):
warnings.warn('Database.outgoing_copying_manipulators() is deprecated', DeprecationWarning, stacklevel=2) return [manipulator.__class__.__name__ for manipulator in self.__outgoing_copying_manipulators]