_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q255100
WsgiDAVApp.resolve_provider
validation
def resolve_provider(self, path): """Get the registered DAVProvider for a given path. Returns: tuple: (share, provider) """ # Find DAV provider that matches the share share = None lower_path = path.lower() for r in self.sorted_share_list: # @@: Case sensitivity should be an option of some sort here; # os.path.normpath might give the preferred case for a filename. if r == "/":
python
{ "resource": "" }
q255101
HTTPAuthenticator.compute_digest_response
validation
def compute_digest_response( self, realm, user_name, method, uri, nonce, cnonce, qop, nc, environ ): """Computes digest hash. Calculation of the A1 (HA1) part is delegated to the dc interface method `digest_auth_user()`. Args: realm (str): user_name (str): method (str): WebDAV Request Method uri (str): nonce (str): server generated nonce value cnonce (str): client generated cnonce value
python
{ "resource": "" }
q255102
FileLikeQueue.read
validation
def read(self, size=0): """Read a chunk of bytes from queue. size = 0: Read next chunk (arbitrary length) > 0: Read one chunk of `size` bytes (or less if stream was closed) < 0: Read all bytes as single chunk (i.e. blocks until stream is closed) This method blocks until the requested size become available. However, if close() was called, '' is returned immediately. """ res = self.unread self.unread = "" # Get next chunk, cumulating requested size as needed
python
{ "resource": "" }
q255103
StreamingFile.read
validation
def read(self, size=None): """Read bytes from an iterator.""" while size is None or len(self.buffer) < size: try: self.buffer += next(self.data_stream) except StopIteration: break
python
{ "resource": "" }
q255104
ExtServer.handle_error
validation
def handle_error(self, request, client_address): """Handle an error gracefully. May be overridden. The default is to _logger.info a traceback and continue. """ ei = sys.exc_info() e = ei[1] # Suppress stack trace when client aborts connection disgracefully: # 10053: Software caused connection abort # 10054: Connection reset by peer if e.args[0] in (10053, 10054): _logger.error("*** Caught socket.error: {}".format(e)) return # This is what BaseHTTPServer.HTTPServer.handle_error does, but with # added thread ID and using stderr _logger.error("-" * 40, file=sys.stderr)
python
{ "resource": "" }
q255105
HgResource.end_write
validation
def end_write(self, with_errors): """Called when PUT has finished writing. See DAVResource.end_write() """ if
python
{ "resource": "" }
q255106
HgResource.handle_copy
validation
def handle_copy(self, dest_path, depth_infinity): """Handle a COPY request natively. """ destType, destHgPath = util.pop_path(dest_path) destHgPath = destHgPath.strip("/") ui = self.provider.ui repo = self.provider.repo _logger.info("handle_copy %s -> %s" % (self.localHgPath, destHgPath)) if self.rev is None and destType == "edit": # COPY /edit/a/b to /edit/c/d: turn into 'hg copy -f a/b c/d' commands.copy(ui, repo, self.localHgPath, destHgPath, force=True) elif self.rev is None and destType == "released":
python
{ "resource": "" }
q255107
HgResourceProvider._get_log
validation
def _get_log(self, limit=None): """Read log entries into a list of dictionaries.""" self.ui.pushbuffer() commands.log(self.ui, self.repo, limit=limit, date=None, rev=None, user=None) res = self.ui.popbuffer().strip() logList = [] for logentry in res.split("\n\n"): log = {} logList.append(log) for line in logentry.split("\n"): k, v = line.split(":", 1) assert k in ("changeset", "tag", "user", "date",
python
{ "resource": "" }
q255108
HgResourceProvider._get_repo_info
validation
def _get_repo_info(self, environ, rev, reload=False): """Return a dictionary containing all files under source control. dirinfos: Dictionary containing direct members for every collection. {folderpath: (collectionlist, filelist), ...} files: Sorted list of all file paths in the manifest. filedict: Dictionary containing all files under source control. :: {'dirinfos': {'': (['wsgidav', 'tools', 'WsgiDAV.egg-info', 'tests'], ['index.rst', 'wsgidav MAKE_DAILY_BUILD.launch', 'wsgidav run_server.py DEBUG.launch', 'wsgidav-paste.conf', ... 'setup.py']), 'wsgidav': (['addons', 'samples', 'server', 'interfaces'], ['__init__.pyc', 'dav_error.pyc', 'dav_provider.pyc', ... 'wsgidav_app.py']), }, 'files': ['.hgignore', 'ADDONS.txt', 'wsgidav/samples/mysql_dav_provider.py', ... ], 'filedict': {'.hgignore': True, 'README.txt': True, 'WsgiDAV.egg-info/PKG-INFO': True, } } """ caches = environ.setdefault("wsgidav.hg.cache", {})
python
{ "resource": "" }
q255109
HgResourceProvider.get_resource_inst
validation
def get_resource_inst(self, path, environ): """Return HgResource object for path. See DAVProvider.get_resource_inst() """ self._count_get_resource_inst += 1 # HG expects the resource paths without leading '/' localHgPath = path.strip("/") rev = None cmd, rest = util.pop_path(path) if cmd == "": return VirtualCollection( path, environ, "root", ["edit", "released", "archive"] ) elif cmd == "edit": localHgPath = rest.strip("/") rev = None elif cmd == "released": localHgPath = rest.strip("/") rev = "tip" elif cmd == "archive": if rest == "/": # Browse /archive: return a list of revision folders: loglist = self._get_log(limit=10) members = [compat.to_native(l["local_id"]) for l in loglist] return VirtualCollection(path, environ, "Revisions", members) revid, rest = util.pop_path(rest) try: int(revid)
python
{ "resource": "" }
q255110
_DAVResource.get_preferred_path
validation
def get_preferred_path(self): """Return preferred mapping for a resource mapping. Different URLs may map to the same resource, e.g.: '/a/b' == '/A/b' == '/a/b/' get_preferred_path() returns the same value for all these variants, e.g.: '/a/b/' (assuming resource names considered case insensitive) @param path: a UTF-8 encoded, unquoted byte string. @return: a UTF-8 encoded, unquoted byte string. """ if self.path in ("", "/"): return "/" # Append '/' for collections if
python
{ "resource": "" }
q255111
_DAVResource.get_href
validation
def get_href(self): """Convert path to a URL that can be passed to XML responses. Byte string, UTF-8 encoded, quoted. See http://www.webdav.org/specs/rfc4918.html#rfc.section.8.3 We are using the path-absolute option. i.e. starting with '/'.
python
{ "resource": "" }
q255112
_DAVResource.set_property_value
validation
def set_property_value(self, name, value, dry_run=False): """Set a property value or remove a property. value == None means 'remove property'. Raise HTTP_FORBIDDEN if property is read-only, or not supported. When dry_run is True, this function should raise errors, as in a real run, but MUST NOT change any data. This default implementation - raises HTTP_FORBIDDEN, if trying to modify a locking property - raises HTTP_FORBIDDEN, if trying to modify an immutable {DAV:} property - handles Windows' Win32LastModifiedTime to set the getlastmodified property, if enabled - stores everything else as dead property, if a property manager is present. - raises HTTP_FORBIDDEN, else Removing a non-existing prop is NOT an error. Note: RFC 4918 states that {DAV:}displayname 'SHOULD NOT be protected' A resource provider may override this method, to update supported custom live properties. """ assert value is None or xml_tools.is_etree_element(value) if name in _lockPropertyNames: # Locking properties are always read-only raise DAVError( HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty ) # Live property config = self.environ["wsgidav.config"] # hotfixes = config.get("hotfixes", {}) mutableLiveProps = config.get("mutable_live_props", []) # Accept custom live property updates on resources if configured. if ( name.startswith("{DAV:}") and name in _standardLivePropNames and name in mutableLiveProps ): # Please note that some properties should not be mutable according # to RFC4918. This includes the 'getlastmodified' property, which # it may still make sense to make mutable in order to support time # stamp changes from e.g. utime calls or the touch or rsync -a # commands. if name in ("{DAV:}getlastmodified", "{DAV:}last_modified"): try: return self.set_last_modified(self.path, value.text, dry_run) except Exception: _logger.warning(
python
{ "resource": "" }
q255113
_DAVResource.remove_all_properties
validation
def remove_all_properties(self, recursive): """Remove all associated dead properties."""
python
{ "resource": "" }
q255114
_DAVResource.is_locked
validation
def is_locked(self): """Return True, if URI is locked.""" if self.provider.lock_manager is None: return False
python
{ "resource": "" }
q255115
DAVProvider.set_share_path
validation
def set_share_path(self, share_path): """Set application location for this resource provider. @param share_path: a UTF-8 encoded, unquoted byte string. """ # if isinstance(share_path, unicode): # share_path = share_path.encode("utf8") assert share_path == "" or share_path.startswith("/")
python
{ "resource": "" }
q255116
DAVProvider.ref_url_to_path
validation
def ref_url_to_path(self, ref_url): """Convert a refUrl to a path, by stripping the share prefix. Used to calculate the <path> from a storage key by inverting get_ref_url(). """
python
{ "resource": "" }
q255117
DAVProvider.is_collection
validation
def is_collection(self, path, environ): """Return True, if path maps to an existing collection resource. This method should only be used, if no other information is queried for <path>.
python
{ "resource": "" }
q255118
string_to_xml
validation
def string_to_xml(text): """Convert XML string into etree.Element.""" try: return etree.XML(text) except Exception: # TODO: # ExpatError: reference to invalid character number: line 1, column 62 # litmus fails, when xml is used instead of lxml # 18. propget............... FAIL (PROPFIND on `/temp/litmus/prop2': # Could not read status line: connection was closed by server) # text = <ns0:high-unicode xmlns:ns0="http://example.com/neon/litmus/">&#55296;&#56320; # </ns0:high-unicode> # t2 = text.encode("utf8")
python
{ "resource": "" }
q255119
xml_to_bytes
validation
def xml_to_bytes(element, pretty_print=False): """Wrapper for etree.tostring, that takes care of unsupported pretty_print option and prepends an encoding header.""" if use_lxml: xml = etree.tostring( element, encoding="UTF-8", xml_declaration=True, pretty_print=pretty_print ) else: xml = etree.tostring(element, encoding="UTF-8")
python
{ "resource": "" }
q255120
make_sub_element
validation
def make_sub_element(parent, tag, nsmap=None): """Wrapper for etree.SubElement, that takes care of unsupported nsmap option.""" if use_lxml:
python
{ "resource": "" }
q255121
element_content_as_string
validation
def element_content_as_string(element): """Serialize etree.Element. Note: element may contain more than one child or only text (i.e. no child at all). Therefore the resulting string may raise an exception, when passed back to etree.XML(). """ if len(element) == 0: return element.text or "" # Make sure, None is returned as ''
python
{ "resource": "" }
q255122
_get_checked_path
validation
def _get_checked_path(path, config, must_exist=True, allow_none=True): """Convert path to absolute if not None.""" if path in (None, ""): if allow_none: return None raise ValueError("Invalid path {!r}".format(path)) # Evaluate path relative to the folder of the config file (if any) config_file = config.get("_config_file") if config_file and not os.path.isabs(path): path
python
{ "resource": "" }
q255123
_read_config_file
validation
def _read_config_file(config_file, verbose): """Read configuration file options into a dictionary.""" config_file = os.path.abspath(config_file) if not os.path.exists(config_file): raise RuntimeError("Couldn't open configuration file '{}'.".format(config_file)) if config_file.endswith(".json"): with io.open(config_file, mode="r", encoding="utf-8") as json_file: # Minify the JSON file to strip embedded comments minified = jsmin(json_file.read()) conf = json.loads(minified) elif config_file.endswith(".yaml"): with io.open(config_file, mode="r", encoding="utf-8") as yaml_file: conf = yaml.safe_load(yaml_file) else: try: import imp conf = {} configmodule = imp.load_source("configuration_module", config_file) for k, v in vars(configmodule).items(): if k.startswith("__"): continue
python
{ "resource": "" }
q255124
_run_paste
validation
def _run_paste(app, config, mode): """Run WsgiDAV using paste.httpserver, if Paste is installed. See http://pythonpaste.org/modules/httpserver.html for more options """ from paste import httpserver version = "WsgiDAV/{} {} Python {}".format( __version__, httpserver.WSGIHandler.server_version, util.PYTHON_VERSION ) _logger.info("Running {}...".format(version)) # See http://pythonpaste.org/modules/httpserver.html for more options server = httpserver.serve( app, host=config["host"], port=config["port"], server_version=version, # This option enables handling of keep-alive # and expect-100: protocol_version="HTTP/1.1", start_loop=False, ) if config["verbose"] >= 5: __handle_one_request = server.RequestHandlerClass.handle_one_request def handle_one_request(self): __handle_one_request(self) if self.close_connection == 1: _logger.debug("HTTP Connection : close") else: _logger.debug("HTTP Connection : continue") server.RequestHandlerClass.handle_one_request = handle_one_request # __handle = server.RequestHandlerClass.handle
python
{ "resource": "" }
q255125
_run_gevent
validation
def _run_gevent(app, config, mode): """Run WsgiDAV using gevent if gevent is installed. See https://github.com/gevent/gevent/blob/master/src/gevent/pywsgi.py#L1356 https://github.com/gevent/gevent/blob/master/src/gevent/server.py#L38 for more options """ import gevent import gevent.monkey gevent.monkey.patch_all() from gevent.pywsgi import WSGIServer server_args = { "bind_addr": (config["host"], config["port"]), "wsgi_app": app, # TODO: SSL support "keyfile": None, "certfile": None, } protocol = "http" # Override or add custom args server_args.update(config.get("server_args", {}))
python
{ "resource": "" }
q255126
_run__cherrypy
validation
def _run__cherrypy(app, config, mode): """Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed.""" assert mode == "cherrypy-wsgiserver" try: from cherrypy import wsgiserver from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter _logger.warning("WARNING: cherrypy.wsgiserver is deprecated.") _logger.warning( " Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver" ) _logger.warning(" was moved to the cheroot project.") _logger.warning(" Consider using --server=cheroot.") except ImportError: _logger.error("*" * 78) _logger.error("ERROR: Could not import cherrypy.wsgiserver.") _logger.error( "Try `pip install cherrypy` or specify another server using the --server option." ) _logger.error("Note that starting with CherryPy 9.0, the server was moved to") _logger.error( "the cheroot project, so it is recommended to use `-server=cheroot`" ) _logger.error("and run `pip install cheroot` instead.") _logger.error("*" * 78) raise server_name = "WsgiDAV/{} {} Python/{}".format( __version__, wsgiserver.CherryPyWSGIServer.version, util.PYTHON_VERSION ) wsgiserver.CherryPyWSGIServer.version = server_name # Support SSL ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config) ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config) ssl_certificate_chain = _get_checked_path( config.get("ssl_certificate_chain"), config ) protocol = "http" if ssl_certificate: assert ssl_private_key wsgiserver.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter( ssl_certificate, ssl_private_key, ssl_certificate_chain ) protocol = "https"
python
{ "resource": "" }
q255127
_run_cheroot
validation
def _run_cheroot(app, config, mode): """Run WsgiDAV using cheroot.server if Cheroot is installed.""" assert mode == "cheroot" try: from cheroot import server, wsgi # from cheroot.ssl.builtin import BuiltinSSLAdapter # import cheroot.ssl.pyopenssl except ImportError: _logger.error("*" * 78) _logger.error("ERROR: Could not import Cheroot.") _logger.error( "Try `pip install cheroot` or specify another server using the --server option." ) _logger.error("*" * 78) raise server_name = "WsgiDAV/{} {} Python/{}".format( __version__, wsgi.Server.version, util.PYTHON_VERSION ) wsgi.Server.version = server_name # Support SSL ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config) ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config) ssl_certificate_chain = _get_checked_path( config.get("ssl_certificate_chain"), config ) ssl_adapter = config.get("ssl_adapter", "builtin") protocol = "http" if ssl_certificate and ssl_private_key: ssl_adapter = server.get_ssl_adapter_class(ssl_adapter) wsgi.Server.ssl_adapter = ssl_adapter( ssl_certificate, ssl_private_key, ssl_certificate_chain ) protocol = "https" _logger.info("SSL / HTTPS enabled. Adapter: {}".format(ssl_adapter)) elif ssl_certificate or ssl_private_key: raise RuntimeError( "Option 'ssl_certificate' and 'ssl_private_key' must be used together." ) # elif ssl_adapter: # print("WARNING: Ignored
python
{ "resource": "" }
q255128
_run_flup
validation
def _run_flup(app, config, mode): """Run WsgiDAV using flup.server.fcgi if Flup is installed.""" # http://trac.saddi.com/flup/wiki/FlupServers if mode == "flup-fcgi": from flup.server.fcgi import WSGIServer, __version__ as flupver elif mode == "flup-fcgi-fork": from flup.server.fcgi_fork import WSGIServer, __version__ as
python
{ "resource": "" }
q255129
_run_wsgiref
validation
def _run_wsgiref(app, config, mode): """Run WsgiDAV using wsgiref.simple_server, on Python 2.5+.""" # http://www.python.org/doc/2.5.2/lib/module-wsgiref.html from wsgiref.simple_server import make_server, software_version version = "WsgiDAV/{} {}".format(__version__, software_version) _logger.info("Running {}...".format(version)) _logger.warning( "WARNING: This single threaded server (wsgiref) is not meant for production." )
python
{ "resource": "" }
q255130
_run_ext_wsgiutils
validation
def _run_ext_wsgiutils(app, config, mode): """Run WsgiDAV using ext_wsgiutils_server from the wsgidav package.""" from wsgidav.server import ext_wsgiutils_server _logger.info( "Running WsgiDAV {} on wsgidav.ext_wsgiutils_server...".format(__version__) ) _logger.warning(
python
{ "resource": "" }
q255131
RequestServer.do_PROPPATCH
validation
def do_PROPPATCH(self, environ, start_response): """Handle PROPPATCH request to set or remove a property. @see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH """ path = environ["PATH_INFO"] res = self._davProvider.get_resource_inst(path, environ) # Only accept Depth: 0 (but assume this, if omitted) environ.setdefault("HTTP_DEPTH", "0") if environ["HTTP_DEPTH"] != "0": self._fail(HTTP_BAD_REQUEST, "Depth must be '0'.") if res is None: self._fail(HTTP_NOT_FOUND) self._evaluate_if_headers(res, environ) self._check_write_permission(res, "0", environ) # Parse request requestEL = util.parse_xml_body(environ) if requestEL.tag != "{DAV:}propertyupdate": self._fail(HTTP_BAD_REQUEST) # Create a list of update request tuples: (name, value) propupdatelist = [] for ppnode in requestEL: propupdatemethod = None if ppnode.tag == "{DAV:}remove": propupdatemethod = "remove" elif ppnode.tag == "{DAV:}set": propupdatemethod = "set" else: self._fail( HTTP_BAD_REQUEST, "Unknown tag (expected 'set' or 'remove')." ) for propnode in ppnode: if propnode.tag != "{DAV:}prop": self._fail(HTTP_BAD_REQUEST, "Unknown tag (expected 'prop').") for propertynode in propnode: propvalue = None if propupdatemethod == "remove": propvalue = None # Mark as 'remove' if len(propertynode) > 0: # 14.23: All the XML elements in a 'prop' XML # element inside of a 'remove' XML element MUST be # empty self._fail( HTTP_BAD_REQUEST, "prop element must be empty for 'remove'.", ) else: propvalue = propertynode propupdatelist.append((propertynode.tag, propvalue)) # Apply updates in SIMULATION MODE and create a result list (name, # result) successflag = True writeresultlist = [] for (name, propvalue) in propupdatelist: try: res.set_property_value(name, propvalue, dry_run=True) except Exception as e: writeresult = as_DAVError(e) else: writeresult = "200 OK" writeresultlist.append((name, writeresult)) successflag = successflag and writeresult == "200 OK" # Generate response list of 2-tuples (name, value) # <value> is None on success, or an instance of DAVError propResponseList = [] responsedescription = [] if not successflag: # If dry run failed: convert all OK to FAILED_DEPENDENCY. for (name, result) in writeresultlist: if result == "200 OK":
python
{ "resource": "" }
q255132
RequestServer.do_MKCOL
validation
def do_MKCOL(self, environ, start_response): """Handle MKCOL request to create a new collection. @see http://www.webdav.org/specs/rfc4918.html#METHOD_MKCOL """ path = environ["PATH_INFO"] provider = self._davProvider # res = provider.get_resource_inst(path, environ) # Do not understand ANY request body entities if util.get_content_length(environ) != 0: self._fail( HTTP_MEDIATYPE_NOT_SUPPORTED, "The server does not handle any body content.", ) # Only accept Depth: 0 (but assume this, if omitted)
python
{ "resource": "" }
q255133
RequestServer._stream_data_chunked
validation
def _stream_data_chunked(self, environ, block_size): """Get the data from a chunked transfer.""" # Chunked Transfer Coding # http://www.servlets.com/rfcs/rfc2616-sec3.html#sec3.6.1 if "Darwin" in environ.get("HTTP_USER_AGENT", "") and environ.get( "HTTP_X_EXPECTED_ENTITY_LENGTH" ): # Mac Finder, that does not prepend chunk-size + CRLF , # like it should to comply with the spec. It sends chunk # size as integer in a HTTP header instead. WORKAROUND_CHUNK_LENGTH = True buf = environ.get("HTTP_X_EXPECTED_ENTITY_LENGTH", "0") length = int(buf) else: WORKAROUND_CHUNK_LENGTH = False buf = environ["wsgi.input"].readline() environ["wsgidav.some_input_read"] = 1 if buf == compat.b_empty: length = 0 else: length = int(buf, 16) while length > 0: buf =
python
{ "resource": "" }
q255134
RequestServer._stream_data
validation
def _stream_data(self, environ, content_length, block_size): """Get the data from a non-chunked transfer.""" if content_length == 0: # TODO: review this # XP and Vista MiniRedir submit PUT with Content-Length 0, # before LOCK and the real PUT. So we have to accept this. _logger.info("PUT: Content-Length == 0. Creating empty file...") # elif content_length < 0: # # TODO: review this # # If CONTENT_LENGTH is invalid, we may try to workaround this # # by reading until the end of the stream. This may block however! # # The iterator produced small chunks of varying size, but not # # sure, if we always get everything before it times out. # _logger.warning("PUT with invalid Content-Length (%s). " # "Trying to read all (this may timeout)..." # .format(environ.get("CONTENT_LENGTH"))) # nb = 0 # try: # for s in environ["wsgi.input"]: # environ["wsgidav.some_input_read"] = 1 # _logger.debug("PUT: read from wsgi.input.__iter__, len=%s" % len(s)) # yield s # nb += len (s) # except socket.timeout:
python
{ "resource": "" }
q255135
CouchPropertyManager._find
validation
def _find(self, url): """Return properties document for path.""" # Query the permanent view to find a url vr = self.db.view("properties/by_url", key=url, include_docs=True) _logger.debug("find(%r) returned %s" % (url, len(vr)))
python
{ "resource": "" }
q255136
SimpleDomainController.get_domain_realm
validation
def get_domain_realm(self, path_info, environ): """Resolve a relative url to the appropriate realm name."""
python
{ "resource": "" }
q255137
SimpleDomainController.digest_auth_user
validation
def digest_auth_user(self, realm, user_name, environ): """Computes digest hash A1 part.""" user = self._get_realm_entry(realm, user_name) if user is None: return False password =
python
{ "resource": "" }
q255138
LockStorageDict.get
validation
def get(self, token): """Return a lock dictionary for a token. If the lock does not exist or is expired, None is returned. token: lock token Returns: Lock dictionary or <None> Side effect: if lock is expired, it will be purged and None is returned. """ self._lock.acquire_read() try: lock = self._dict.get(token) if lock is None: # Lock not found: purge dangling URL2TOKEN entries _logger.debug("Lock purged dangling: {}".format(token))
python
{ "resource": "" }
q255139
LockStorageDict.create
validation
def create(self, path, lock): """Create a direct lock for a resource path. path: Normalized path (utf8 encoded string, no trailing '/') lock: lock dictionary, without a token entry Returns: New unique lock token.: <lock **Note:** the lock dictionary may be modified on return: - lock['root'] is ignored and set to the normalized <path> - lock['timeout'] may be normalized and shorter than requested - lock['token'] is added """ self._lock.acquire_write() try: # We expect only a lock definition, not an existing lock assert lock.get("token") is None assert lock.get("expire") is None, "Use timeout instead of expire" assert path and "/" in path # Normalize root: /foo/bar org_path = path path = normalize_lock_root(path) lock["root"] = path # Normalize timeout from ttl to expire-date timeout = float(lock.get("timeout")) if timeout is None: timeout = LockStorageDict.LOCK_TIME_OUT_DEFAULT elif timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX: timeout = LockStorageDict.LOCK_TIME_OUT_MAX
python
{ "resource": "" }
q255140
LockStorageDict.refresh
validation
def refresh(self, token, timeout): """Modify an existing lock's timeout. token: Valid lock token. timeout: Suggested lifetime in seconds (-1 for infinite). The real expiration time may be shorter than requested! Returns: Lock dictionary. Raises ValueError, if token is invalid. """ assert token in self._dict, "Lock must exist" assert timeout == -1 or timeout > 0 if timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX: timeout = LockStorageDict.LOCK_TIME_OUT_MAX self._lock.acquire_write() try:
python
{ "resource": "" }
q255141
LockStorageDict.delete
validation
def delete(self, token): """Delete lock. Returns True on success. False, if token does not exist, or is expired. """ self._lock.acquire_write() try: lock = self._dict.get(token) _logger.debug("delete {}".format(lock_string(lock))) if lock is None: return False # Remove url to lock mapping key = "URL2TOKEN:{}".format(lock.get("root"))
python
{ "resource": "" }
q255142
LockStorageShelve.clear
validation
def clear(self): """Delete all entries.""" self._lock.acquire_write() # TODO: read access is enough? try: was_closed = self._dict is None
python
{ "resource": "" }
q255143
FileResource.set_last_modified
validation
def set_last_modified(self, dest_path, time_stamp, dry_run): """Set last modified time for destPath to timeStamp on epoch-format""" # Translate time from RFC 1123 to seconds since epoch format
python
{ "resource": "" }
q255144
lock_string
validation
def lock_string(lock_dict): """Return readable rep.""" if not lock_dict: return "Lock: None" if lock_dict["expire"] < 0: expire = "Infinite ({})".format(lock_dict["expire"]) else: expire = "{} (in {} seconds)".format( util.get_log_time(lock_dict["expire"]), lock_dict["expire"] - time.time() ) return "Lock(<{}..>, '{}', {}, {}, depth-{}, until {}".format(
python
{ "resource": "" }
q255145
LockManager._generate_lock
validation
def _generate_lock( self, principal, lock_type, lock_scope, lock_depth, lock_owner, path, timeout ): """Acquire lock and return lock_dict. principal Name of the principal. lock_type Must be 'write'. lock_scope Must be 'shared' or 'exclusive'. lock_depth Must be '0' or 'infinity'. lock_owner String identifying the owner.
python
{ "resource": "" }
q255146
LockManager.acquire
validation
def acquire( self, url, lock_type, lock_scope, lock_depth, lock_owner, timeout, principal, token_list, ): """Check for permissions and acquire a lock. On success return new lock dictionary. On error raise a DAVError with an embedded DAVErrorCondition. """ url = normalize_lock_root(url) self._lock.acquire_write() try: # Raises DAVError on conflict: self._check_lock_permission(
python
{ "resource": "" }
q255147
LockManager.refresh
validation
def refresh(self, token, timeout=None): """Set new timeout for lock, if existing and valid.""" if timeout is None:
python
{ "resource": "" }
q255148
LockManager.get_lock
validation
def get_lock(self, token, key=None): """Return lock_dict, or None, if not found or invalid. Side effect: if lock is expired, it will be purged and None is returned. key: name of lock attribute that will be returned instead of a dictionary. """ assert key in ( None, "type", "scope",
python
{ "resource": "" }
q255149
ReadWriteLock.acquire_read
validation
def acquire_read(self, timeout=None): """Acquire a read lock for the current thread, waiting at most timeout seconds or doing a non-blocking check in case timeout is <= 0. In case timeout is None, the call to acquire_read blocks until the lock request can be serviced. In case the timeout expires before the lock could be serviced, a RuntimeError is thrown.""" if timeout is not None: endtime = time() + timeout me = currentThread() self.__condition.acquire() try: if self.__writer is me: # If we are the writer, grant a new read lock, always. self.__writercount += 1 return while True: if self.__writer is None: # Only test anything if there is no current writer. if self.__upgradewritercount or self.__pendingwriters: if me in self.__readers: # Only grant a read lock if we already have one # in case writers are waiting for their turn. # This means that writers can't easily get starved # (but see below, readers can). self.__readers[me] += 1 return # No, we aren't a reader (yet), wait for
python
{ "resource": "" }
q255150
ReadWriteLock.acquire_write
validation
def acquire_write(self, timeout=None): """Acquire a write lock for the current thread, waiting at most timeout seconds or doing a non-blocking check in case timeout is <= 0. In case the write lock cannot be serviced due to the deadlock condition mentioned above, a ValueError is raised. In case timeout is None, the call to acquire_write blocks until the lock request can be serviced. In case the timeout expires before the lock could be serviced, a RuntimeError is thrown.""" if timeout is not None: endtime = time() + timeout me, upgradewriter = currentThread(), False self.__condition.acquire() try: if self.__writer is me: # If we are the writer, grant a new write lock, always. self.__writercount += 1 return elif me in self.__readers: # If we are a reader, no need to add us to pendingwriters, # we get the upgradewriter slot. if self.__upgradewritercount: # If we are a reader and want to upgrade, and someone # else also wants to upgrade, there is no way we can do # this except if one of us releases all his read locks. # Signal this to user. raise ValueError("Inevitable dead lock, denying write lock") upgradewriter = True self.__upgradewritercount = self.__readers.pop(me) else: # We aren't a reader, so add us to the pending writers queue # for synchronization with the readers. self.__pendingwriters.append(me) while True: if not self.__readers and self.__writer is None: # Only test anything if there are no readers and writers. if self.__upgradewritercount: if upgradewriter: # There is a writer to upgrade, and it's us. Take # the write lock. self.__writer = me self.__writercount = self.__upgradewritercount + 1 self.__upgradewritercount = 0 return # There is a writer to upgrade, but it's not us. # Always leave the upgrade writer the advance slot, # because he presumes he'll get a write lock directly # from a previously held read lock. elif self.__pendingwriters[0] is me: # If there are no readers and writers, it's always # fine for us to take the writer slot, removing us # from the pending writers queue. # This might mean starvation for readers, though.
python
{ "resource": "" }
q255151
ReadWriteLock.release
validation
def release(self): """Release the currently held lock. In case the current thread holds no lock, a ValueError is thrown.""" me = currentThread() self.__condition.acquire() try: if self.__writer is me: # We are the writer, take one nesting depth away. self.__writercount -= 1 if not self.__writercount: # No more write locks; take our writer position away and # notify waiters of the new circumstances. self.__writer = None self.__condition.notifyAll() elif me in self.__readers: # We are a reader currently, take one nesting depth away. self.__readers[me] -= 1 if not self.__readers[me]:
python
{ "resource": "" }
q255152
init_logging
validation
def init_logging(config): """Initialize base logger named 'wsgidav'. The base logger is filtered by the `verbose` configuration option. Log entries will have a time stamp and thread id. :Parameters: verbose : int Verbosity configuration (0..5) enable_loggers : string list List of module logger names, that will be switched to DEBUG level. Module loggers ~~~~~~~~~~~~~~ Module loggers (e.g 'wsgidav.lock_manager') are named loggers, that can be independently switched to DEBUG mode. Except for verbosity, they will inherit settings from the base logger. They will suppress DEBUG level messages, unless they are enabled by passing their name to util.init_logging(). If enabled, module loggers will print DEBUG messages, even if verbose == 3. Example initialize and use a module logger, that will generate output, if enabled (and verbose >= 2):: _logger = util.get_module_logger(__name__) [..] _logger.debug("foo: '{}'".format(s)) This logger would be enabled by passing its name to init_logging():: enable_loggers = ["lock_manager", "property_manager", ] util.init_logging(2, enable_loggers) Log Level Matrix ~~~~~~~~~~~~~~~~ +---------+--------+---------------------------------------------------------------+ | Verbose | Option | Log level | | level | +-------------+------------------------+------------------------+ | | | base logger | module logger(default) | module logger(enabled) | +=========+========+=============+========================+========================+ | 0 | -qqq | CRITICAL | CRITICAL | CRITICAL | +---------+--------+-------------+------------------------+------------------------+ | 1 | -qq | ERROR | ERROR | ERROR | +---------+--------+-------------+------------------------+------------------------+ | 2 | -q | WARN | WARN | WARN | +---------+--------+-------------+------------------------+------------------------+ | 3 | | INFO | INFO | **DEBUG** | +---------+--------+-------------+------------------------+------------------------+ | 4 | -v | DEBUG | DEBUG | DEBUG | +---------+--------+-------------+------------------------+------------------------+ | 5
python
{ "resource": "" }
q255153
dynamic_instantiate_middleware
validation
def dynamic_instantiate_middleware(name, args, expand=None): """Import a class and instantiate with custom args. Example: name = "my.module.Foo" args_dict = { "bar": 42, "baz": "qux" } => from my.module import Foo return Foo(bar=42, baz="qux") """ def _expand(v): """Replace some string templates with defined values.""" if expand and compat.is_basestring(v) and v.lower() in expand: return expand[v] return v try: the_class = dynamic_import_class(name) inst = None if type(args) in (tuple, list):
python
{ "resource": "" }
q255154
string_repr
validation
def string_repr(s): """Return a string as hex dump.""" if compat.is_bytes(s): res = "{!r}: ".format(s) for b in s: if type(b) is str: # Py2
python
{ "resource": "" }
q255155
byte_number_string
validation
def byte_number_string( number, thousandsSep=True, partition=False, base1024=True, appendBytes=True ): """Convert bytes into human-readable representation.""" magsuffix = "" bytesuffix = "" if partition: magnitude = 0 if base1024: while number >= 1024: magnitude += 1 number = number >> 10 else: while number >= 1000: magnitude += 1 number /= 1000.0 # TODO: use "9 KB" instead of "9K Bytes"? # TODO use 'kibi' for base 1024? # http://en.wikipedia.org/wiki/Kibi-#IEC_standard_prefixes magsuffix = ["", "K", "M", "G", "T", "P"][magnitude] if appendBytes:
python
{ "resource": "" }
q255156
read_and_discard_input
validation
def read_and_discard_input(environ): """Read 1 byte from wsgi.input, if this has not been done yet. Returning a response without reading from a request body might confuse the WebDAV client. This may happen, if an exception like '401 Not authorized', or '500 Internal error' was raised BEFORE anything was read from the request stream. See GC issue 13, issue 23 See http://groups.google.com/group/paste-users/browse_frm/thread/fc0c9476047e9a47?hl=en Note that with persistent sessions (HTTP/1.1) we must make sure, that the 'Connection: closed' header is set with the response, to prevent reusing the current stream. """ if environ.get("wsgidav.some_input_read") or environ.get("wsgidav.all_input_read"): return cl = get_content_length(environ) assert cl >= 0 if cl == 0: return READ_ALL = True environ["wsgidav.some_input_read"] = 1 if READ_ALL: environ["wsgidav.all_input_read"] = 1 wsgi_input = environ["wsgi.input"] # TODO: check if still required after GC issue 24 is fixed if hasattr(wsgi_input, "_consumed") and hasattr(wsgi_input, "length"): # Seems to be Paste's httpserver.LimitedLengthFile # see http://groups.google.com/group/paste-users/browse_thread/thread/fc0c9476047e9a47/aa4a3aa416016729?hl=en&lnk=gst&q=.input#aa4a3aa416016729 # noqa # Consume something if nothing was consumed *and* work # around a bug where paste.httpserver allows negative lengths if wsgi_input._consumed == 0 and wsgi_input.length > 0: # This seems to work even if there's 10K of input. if READ_ALL: n = wsgi_input.length else: n = 1 body = wsgi_input.read(n) _logger.debug( "Reading {} bytes from potentially unread httpserver.LimitedLengthFile: '{}'...".format( n, body[:50] ) ) elif hasattr(wsgi_input, "_sock") and hasattr(wsgi_input._sock, "settimeout"): # Seems to be a socket try:
python
{ "resource": "" }
q255157
join_uri
validation
def join_uri(uri, *segments): """Append segments to URI. Example: join_uri("/a/b", "c", "d") """ sub = "/".join(segments)
python
{ "resource": "" }
q255158
is_child_uri
validation
def is_child_uri(parentUri, childUri): """Return True, if childUri is a child of parentUri. This function accounts for the fact that '/a/b/c' and 'a/b/c/' are
python
{ "resource": "" }
q255159
is_equal_or_child_uri
validation
def is_equal_or_child_uri(parentUri, childUri): """Return True, if childUri is a child of parentUri or maps to the same resource. Similar to <util.is_child_uri>_ , but this method also returns True, if parent equals child. ('/a/b' is considered
python
{ "resource": "" }
q255160
make_complete_url
validation
def make_complete_url(environ, localUri=None): """URL reconstruction according to PEP 333. @see https://www.python.org/dev/peps/pep-3333/#url-reconstruction """ url = environ["wsgi.url_scheme"] + "://" if environ.get("HTTP_HOST"): url += environ["HTTP_HOST"] else: url += environ["SERVER_NAME"] if environ["wsgi.url_scheme"] == "https": if environ["SERVER_PORT"] != "443": url += ":" + environ["SERVER_PORT"] else: if environ["SERVER_PORT"] != "80":
python
{ "resource": "" }
q255161
parse_xml_body
validation
def parse_xml_body(environ, allow_empty=False): """Read request body XML into an etree.Element. Return None, if no request body was sent. Raise HTTP_BAD_REQUEST, if something else went wrong. TODO: this is a very relaxed interpretation: should we raise HTTP_BAD_REQUEST instead, if CONTENT_LENGTH is missing, invalid, or 0? RFC: For compatibility with HTTP/1.0 applications, HTTP/1.1 requests containing a message-body MUST include a valid Content-Length header field unless the server is known to be HTTP/1.1 compliant. If a request contains a message-body and a Content-Length is not given, the server SHOULD respond with 400 (bad request) if it cannot determine the length of the message, or with 411 (length required) if it wishes to insist on receiving a valid Content-Length." So I'd say, we should accept a missing CONTENT_LENGTH, and try to read the content anyway. But WSGI doesn't guarantee to support input.read() without length(?). At least it locked, when I tried it with a request that had a missing content-type and no body. Current approach: if CONTENT_LENGTH is - valid and >0: read body (exactly <CONTENT_LENGTH> bytes) and parse the result. - 0: Assume empty body and return None or raise exception. - invalid (negative or not a number: raise HTTP_BAD_REQUEST - missing: NOT: Try to read body until end and parse the result. BUT: assume '0' - empty string: WSGI allows it to be empty or absent: treated like 'missing'. """ # clHeader = environ.get("CONTENT_LENGTH", "").strip() # content_length = -1 # read all of stream if clHeader == "": # No Content-Length given: read to end of stream # TODO: etree.parse() locks, if input is invalid? # pfroot = etree.parse(environ["wsgi.input"]).getroot() # requestbody = environ["wsgi.input"].read() # TODO: read() should be # called in a loop? requestbody = "" else: try: content_length = int(clHeader) if content_length < 0: raise DAVError(HTTP_BAD_REQUEST, "Negative content-length.") except ValueError:
python
{ "resource": "" }
q255162
send_status_response
validation
def send_status_response(environ, start_response, e, add_headers=None, is_head=False): """Start a WSGI response for a DAVError or status code.""" status = get_http_status_string(e) headers = [] if add_headers: headers.extend(add_headers) # if 'keep-alive' in environ.get('HTTP_CONNECTION', '').lower(): # headers += [ # ('Connection', 'keep-alive'), # ] if e in (HTTP_NOT_MODIFIED, HTTP_NO_CONTENT): # See paste.lint: these code don't have content start_response( status, [("Content-Length", "0"), ("Date", get_rfc1123_time())] + headers ) return [b""] if e in (HTTP_OK, HTTP_CREATED):
python
{ "resource": "" }
q255163
calc_base64
validation
def calc_base64(s): """Return base64 encoded binarystring.""" s = compat.to_bytes(s)
python
{ "resource": "" }
q255164
read_timeout_value_header
validation
def read_timeout_value_header(timeoutvalue): """Return -1 if infinite, else return numofsecs.""" timeoutsecs = 0 timeoutvaluelist = timeoutvalue.split(",") for timeoutspec in timeoutvaluelist: timeoutspec = timeoutspec.strip() if timeoutspec.lower() == "infinite": return -1 else: listSR = reSecondsReader.findall(timeoutspec) for secs in listSR:
python
{ "resource": "" }
q255165
parse_if_header_dict
validation
def parse_if_header_dict(environ): """Parse HTTP_IF header into a dictionary and lists, and cache the result. @see http://www.webdav.org/specs/rfc4918.html#HEADER_If """ if "wsgidav.conditions.if" in environ: return if "HTTP_IF" not in environ: environ["wsgidav.conditions.if"] = None environ["wsgidav.ifLockTokenList"] = [] return iftext = environ["HTTP_IF"].strip() if not iftext.startswith("<"): iftext = "<*>" + iftext ifDict = dict([]) ifLockList = [] resource1 = "*" for (tmpURLVar, URLVar, _tmpContentVar, contentVar) in reIfSeparator.findall( iftext ): if tmpURLVar != "": resource1 = URLVar else:
python
{ "resource": "" }
q255166
guess_mime_type
validation
def guess_mime_type(url): """Use the mimetypes module to lookup the type for an extension. This function also adds some extensions required for HTML5 """ (mimetype, _mimeencoding) = mimetypes.guess_type(url) if not mimetype: ext = os.path.splitext(url)[1] mimetype = _MIME_TYPES.get(ext)
python
{ "resource": "" }
q255167
Group.add_members
validation
def add_members(self, new_members): """ Add objects to the group. Parameters ---------- new_members : list A list of cobrapy objects to add to the group. """ if isinstance(new_members, string_types) or \
python
{ "resource": "" }
q255168
Group.remove_members
validation
def remove_members(self, to_remove): """ Remove objects from the group. Parameters ---------- to_remove : list A list of cobra objects to remove from the group """ if isinstance(to_remove, string_types) or \
python
{ "resource": "" }
q255169
geometric_fba
validation
def geometric_fba(model, epsilon=1E-06, max_tries=200, processes=None): """ Perform geometric FBA to obtain a unique, centered flux distribution. Geometric FBA [1]_ formulates the problem as a polyhedron and then solves it by bounding the convex hull of the polyhedron. The bounding forms a box around the convex hull which reduces with every iteration and extracts a unique solution in this way. Parameters ---------- model: cobra.Model The model to perform geometric FBA on. epsilon: float, optional The convergence tolerance of the model (default 1E-06). max_tries: int, optional Maximum number of iterations (default 200). processes : int, optional The number of parallel processes to run. If not explicitly passed, will be set from the global configuration singleton. Returns ------- cobra.Solution The solution object containing all the constraints required for geometric FBA. References ---------- .. [1] Smallbone, Kieran & Simeonidis, Vangelis. (2009). Flux balance analysis: A geometric perspective. Journal of theoretical biology.258. 311-5. 10.1016/j.jtbi.2009.01.027. """ with model: # Variables' and constraints' storage variables. consts = [] obj_vars = [] updating_vars_cons = [] # The first iteration. prob = model.problem add_pfba(model) # Minimize the solution space to a convex hull. model.optimize() fva_sol = flux_variability_analysis(model, processes=processes) mean_flux = (fva_sol["maximum"] + fva_sol["minimum"]).abs() / 2 # Set the gFBA constraints. for rxn in model.reactions: var = prob.Variable("geometric_fba_" + rxn.id,
python
{ "resource": "" }
q255170
DictList._generate_index
validation
def _generate_index(self): """rebuild the _dict index"""
python
{ "resource": "" }
q255171
DictList.get_by_any
validation
def get_by_any(self, iterable): """ Get a list of members using several different ways of indexing Parameters ---------- iterable : list (if not, turned into single element list) list where each element is either int (referring to an index in in this DictList), string (a id of a member in this DictList) or member of this DictList for pass-through
python
{ "resource": "" }
q255172
DictList.query
validation
def query(self, search_function, attribute=None): """Query the list Parameters ---------- search_function : a string, regular expression or function Used to find the matching elements in the list. - a regular expression (possibly compiled), in which case the given attribute of the object should match the regular expression. - a function which takes one argument and returns True for desired values attribute : string or None the name attribute of the object to passed as argument to the `search_function`. If this is None, the object itself is used. Returns ------- DictList a new list of objects which match the query Examples -------- >>> import cobra.test >>> model = cobra.test.create_test_model('textbook') >>> model.reactions.query(lambda x: x.boundary) >>> import re >>> regex = re.compile('^g', flags=re.IGNORECASE) >>> model.metabolites.query(regex, attribute='name') """ def select_attribute(x): if attribute is None: return x else:
python
{ "resource": "" }
q255173
DictList._replace_on_id
validation
def _replace_on_id(self, new_object): """Replace an object by another with the same id.""" the_id = new_object.id
python
{ "resource": "" }
q255174
DictList.append
validation
def append(self, object): """append object to end""" the_id = object.id self._check(the_id)
python
{ "resource": "" }
q255175
DictList.union
validation
def union(self, iterable): """adds elements with id's not already in the model""" _dict = self._dict append =
python
{ "resource": "" }
q255176
DictList.extend
validation
def extend(self, iterable): """extend list by appending elements from the iterable""" # Sometimes during initialization from an older pickle, _dict # will not have initialized yet, because the initialization class was # left unspecified. This is an issue because unpickling calls # DictList.extend, which requires the presence of _dict. Therefore, # the issue is caught and addressed here. if not hasattr(self, "_dict") or self._dict is None: self._dict = {} _dict = self._dict current_length = len(self) list.extend(self, iterable) for i, obj in enumerate(islice(self, current_length, None), current_length): the_id = obj.id if the_id not in _dict: _dict[the_id] = i
python
{ "resource": "" }
q255177
DictList._extend_nocheck
validation
def _extend_nocheck(self, iterable): """extends without checking for uniqueness This function should only be used internally by DictList when it can guarantee elements are already unique (as in when coming from self or other DictList). It will be faster
python
{ "resource": "" }
q255178
DictList.index
validation
def index(self, id, *args): """Determine the position in the list id: A string or a :class:`~cobra.core.Object.Object` """ # because values are unique, start and stop are not relevant if isinstance(id, string_types): try: return self._dict[id] except KeyError: raise ValueError("%s not found" % id) try:
python
{ "resource": "" }
q255179
DictList.insert
validation
def insert(self, index, object): """insert object before index""" self._check(object.id) list.insert(self, index, object) # all subsequent entries now have been shifted up by 1 _dict = self._dict
python
{ "resource": "" }
q255180
Metabolite.elements
validation
def elements(self): """ Dictionary of elements as keys and their count in the metabolite as integer. When set, the `formula` property is update accordingly """ tmp_formula = self.formula if tmp_formula is None: return {} # necessary for some old pickles which use the deprecated # Formula class tmp_formula = str(self.formula) # commonly occurring characters in incorrectly constructed formulas if "*" in tmp_formula: warn("invalid character '*' found in formula '%s'" % self.formula) tmp_formula = tmp_formula.replace("*", "") if "(" in tmp_formula or ")" in tmp_formula: warn("invalid formula (has parenthesis) in '%s'" % self.formula) return None composition = {} parsed = element_re.findall(tmp_formula) for (element, count) in parsed: if count == '': count = 1 else: try:
python
{ "resource": "" }
q255181
Metabolite.shadow_price
validation
def shadow_price(self): """ The shadow price in the most recent solution. Shadow price is the dual value of the corresponding constraint in the model. Warnings -------- * Accessing shadow prices through a `Solution` object is the safer, preferred, and only guaranteed to be correct way. You can see how to do so easily in the examples. * Shadow price is retrieved from the currently defined `self._model.solver`. The solver status is checked but there are no guarantees that the current solver state is the one you are looking for. * If you modify the underlying model after an optimization, you will retrieve the old optimization values. Raises ------ RuntimeError If the underlying model was never optimized beforehand or the metabolite is not part of a model. OptimizationError If the solver status is anything other than 'optimal'. Examples -------- >>> import cobra >>> import cobra.test >>> model = cobra.test.create_test_model("textbook") >>> solution = model.optimize() >>> model.metabolites.glc__D_e.shadow_price -0.09166474637510488
python
{ "resource": "" }
q255182
to_yaml
validation
def to_yaml(model, sort=False, **kwargs): """ Return the model as a YAML document. ``kwargs`` are passed on to ``yaml.dump``. Parameters ---------- model : cobra.Model The cobra model to represent. sort : bool, optional Whether to sort the metabolites, reactions, and
python
{ "resource": "" }
q255183
save_yaml_model
validation
def save_yaml_model(model, filename, sort=False, **kwargs): """ Write the cobra model to a file in YAML format. ``kwargs`` are passed on to ``yaml.dump``. Parameters ---------- model : cobra.Model The cobra model to represent. filename : str or file-like File path or descriptor that the YAML representation should be written to. sort : bool, optional Whether to sort the metabolites, reactions, and genes or maintain the order defined in the model. See Also -------- to_yaml : Return a string representation.
python
{ "resource": "" }
q255184
load_yaml_model
validation
def load_yaml_model(filename): """ Load a cobra model from a file in YAML format. Parameters ---------- filename : str or file-like File path or descriptor that contains the YAML document describing the cobra model. Returns ------- cobra.Model The cobra model as represented in the YAML document. See Also
python
{ "resource": "" }
q255185
add_pfba
validation
def add_pfba(model, objective=None, fraction_of_optimum=1.0): """Add pFBA objective Add objective to minimize the summed flux of all reactions to the current objective. See Also ------- pfba Parameters ---------- model : cobra.Model The model to add the objective to objective : An objective to set in combination with the pFBA objective. fraction_of_optimum : float Fraction of optimum which must be maintained. The original objective reaction is constrained to be greater than maximal_value * fraction_of_optimum. """ if objective is not None: model.objective = objective if model.solver.objective.name == '_pfba_objective': raise ValueError('The model already has
python
{ "resource": "" }
q255186
_process_flux_dataframe
validation
def _process_flux_dataframe(flux_dataframe, fva, threshold, floatfmt): """Some common methods for processing a database of flux information into print-ready formats. Used in both model_summary and metabolite_summary. """ abs_flux = flux_dataframe['flux'].abs() flux_threshold = threshold * abs_flux.max() # Drop unused boundary fluxes if fva is None: flux_dataframe = flux_dataframe.loc[ abs_flux >= flux_threshold, :].copy() else: flux_dataframe = flux_dataframe.loc[ (abs_flux >= flux_threshold) | (flux_dataframe['fmin'].abs() >= flux_threshold) | (flux_dataframe['fmax'].abs() >= flux_threshold), :].copy() # Why set to zero? If included show true value? # flux_dataframe.loc[ # flux_dataframe['flux'].abs() < flux_threshold, 'flux'] = 0 # Make all fluxes positive if fva is None: flux_dataframe['is_input'] = (flux_dataframe['flux'] >= 0) flux_dataframe['flux'] = flux_dataframe['flux'].abs() else: def get_direction(flux, fmin, fmax): """ decide whether or not to reverse a flux to make it positive """ if flux < 0: return -1 elif flux > 0: return 1 elif (fmax > 0) & (fmin <= 0): return 1 elif (fmax < 0) & (fmin >= 0): return -1 elif ((fmax + fmin) / 2) < 0: return -1 else: return 1 sign = flux_dataframe.apply( lambda x: get_direction(x.flux, x.fmin, x.fmax), 1)
python
{ "resource": "" }
q255187
linear_reaction_coefficients
validation
def linear_reaction_coefficients(model, reactions=None): """Coefficient for the reactions in a linear objective. Parameters ---------- model : cobra model the model object that defined the objective reactions : list an optional list for the reactions to get the coefficients for. All reactions if left missing. Returns ------- dict A dictionary where the key is the reaction object and the value is the corresponding coefficient. Empty dictionary if there are no linear terms in the objective. """ linear_coefficients = {} reactions = model.reactions if not reactions else reactions try: objective_expression = model.solver.objective.expression coefficients = objective_expression.as_coefficients_dict()
python
{ "resource": "" }
q255188
_valid_atoms
validation
def _valid_atoms(model, expression): """Check whether a sympy expression references the correct variables. Parameters ---------- model : cobra.Model The model in which to check for variables. expression : sympy.Basic A sympy expression. Returns ------- boolean
python
{ "resource": "" }
q255189
set_objective
validation
def set_objective(model, value, additive=False): """Set the model objective. Parameters ---------- model : cobra model The model to set the objective for value : model.problem.Objective, e.g. optlang.glpk_interface.Objective, sympy.Basic or dict If the model objective is linear, the value can be a new Objective object or a dictionary with linear coefficients where each key is a reaction and the element the new coefficient (float). If the objective is not linear and `additive` is true, only values of class Objective. additive : boolmodel.reactions.Biomass_Ecoli_core.bounds = (0.1, 0.1) If true, add the terms to the current objective, otherwise start with an empty objective. """ interface = model.problem reverse_value = model.solver.objective.expression reverse_value = interface.Objective( reverse_value, direction=model.solver.objective.direction, sloppy=True) if isinstance(value, dict): if not model.objective.is_Linear: raise ValueError('can only update
python
{ "resource": "" }
q255190
interface_to_str
validation
def interface_to_str(interface): """Give a string representation for an optlang interface. Parameters ---------- interface : string, ModuleType Full name of the interface in optlang or cobra representation. For instance 'optlang.glpk_interface' or 'optlang-glpk'. Returns ------- string
python
{ "resource": "" }
q255191
get_solver_name
validation
def get_solver_name(mip=False, qp=False): """Select a solver for a given optimization problem. Parameters ---------- mip : bool Does the solver require mixed integer linear programming capabilities? qp : bool Does the solver require quadratic programming capabilities? Returns ------- string The name of feasible solver. Raises ------ SolverNotFound If no suitable solver could be found. """ if len(solvers) == 0: raise SolverNotFound("no solvers installed") # Those lists need to be updated as optlang implements
python
{ "resource": "" }
q255192
choose_solver
validation
def choose_solver(model, solver=None, qp=False): """Choose a solver given a solver name and model. This will choose a solver compatible with the model and required capabilities. Also respects model.solver where it can. Parameters ---------- model : a cobra model The model for which to choose the solver. solver : str, optional The name of the solver to be used. qp : boolean, optional Whether the solver needs Quadratic Programming capabilities. Returns ------- solver : an optlang solver interface Returns a valid solver for the problem. Raises ------
python
{ "resource": "" }
q255193
add_cons_vars_to_problem
validation
def add_cons_vars_to_problem(model, what, **kwargs): """Add variables and constraints to a Model's solver object. Useful for variables and constraints that can not be expressed with reactions and lower/upper bounds. Will integrate with the Model's context manager in order to revert changes upon leaving the context. Parameters
python
{ "resource": "" }
q255194
remove_cons_vars_from_problem
validation
def remove_cons_vars_from_problem(model, what): """Remove variables and constraints from a Model's solver object. Useful to temporarily remove variables and constraints from a Models's solver object. Parameters ---------- model : a cobra model The model from which to remove the variables and constraints. what : list or tuple of optlang variables or constraints. The variables or
python
{ "resource": "" }
q255195
add_absolute_expression
validation
def add_absolute_expression(model, expression, name="abs_var", ub=None, difference=0, add=True): """Add the absolute value of an expression to the model. Also defines a variable for the absolute value that can be used in other objectives or constraints. Parameters ---------- model : a cobra model The model to which to add the absolute expression. expression : A sympy expression Must be a valid expression within the Model's solver object. The absolute value is applied automatically on the expression. name : string The name of the newly created variable. ub : positive float The upper bound for the variable. difference : positive float The difference between the expression and the variable. add : bool Whether to add the variable to the model at once. Returns ------- namedtuple A named tuple with variable and two constraints (upper_constraint, lower_constraint) describing the new variable and the constraints that assign the absolute value of the expression to it. """ Components = namedtuple('Components', ['variable', 'upper_constraint',
python
{ "resource": "" }
q255196
fix_objective_as_constraint
validation
def fix_objective_as_constraint(model, fraction=1, bound=None, name='fixed_objective_{}'): """Fix current objective as an additional constraint. When adding constraints to a model, such as done in pFBA which minimizes total flux, these constraints can become too powerful, resulting in solutions that satisfy optimality but sacrifices too much for the original objective function. To avoid that, we can fix the current objective value as a constraint to ignore solutions that give a lower (or higher depending on the optimization direction) objective value than the original model. When done with the model as a context, the modification to the objective will be reverted when exiting that context. Parameters ---------- model : cobra.Model The model to operate on fraction : float The fraction of the optimum the objective is allowed to reach. bound : float, None The bound to use instead of fraction of maximum optimal value. If not None, fraction is ignored. name : str Name of the objective. May contain one `{}` placeholder which is filled with the name of the old objective.
python
{ "resource": "" }
q255197
check_solver_status
validation
def check_solver_status(status, raise_error=False): """Perform standard checks on a solver's status.""" if status == OPTIMAL: return elif (status in has_primals) and not raise_error: warn("solver status is '{}'".format(status), UserWarning) elif status is None: raise OptimizationError(
python
{ "resource": "" }
q255198
assert_optimal
validation
def assert_optimal(model, message='optimization failed'): """Assert model solver status is optimal. Do nothing if model solver status is optimal, otherwise throw appropriate exception depending on the status. Parameters ---------- model : cobra.Model The model to check the solver status for. message : str (optional) Message to
python
{ "resource": "" }
q255199
add_lp_feasibility
validation
def add_lp_feasibility(model): """ Add a new objective and variables to ensure a feasible solution. The optimized objective will be zero for a feasible solution and otherwise represent the distance from feasibility (please see [1]_ for more information). Parameters ---------- model : cobra.Model The model whose feasibility is to be tested. References ---------- .. [1] Gomez, Jose A., Kai Höffner, and Paul I. Barton. “DFBAlab: A Fast and Reliable MATLAB Code for Dynamic Flux Balance Analysis.” BMC Bioinformatics 15, no. 1 (December 18, 2014): 409. https://doi.org/10.1186/s12859-014-0409-8. """ obj_vars = [] prob = model.problem for met in model.metabolites: s_plus = prob.Variable("s_plus_" + met.id, lb=0)
python
{ "resource": "" }