code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def block_ranges(start_block, last_block, step=5): if last_block is not None and start_block > last_block: raise TypeError( "Incompatible start and stop arguments.", "Start must be less than or equal to stop.") return ( (from_block, to_block - 1) for from_block, to_block in segment_count(start_block, last_block + 1, step) )
Returns 2-tuple ranges describing ranges of block from start_block to last_block Ranges do not overlap to facilitate use as ``toBlock``, ``fromBlock`` json-rpc arguments, which are both inclusive.
def _xml_for_episode_index(self, ep_ind): model_file = self.demo_file["data/{}".format(ep_ind)].attrs["model_file"] model_path = os.path.join(self.demo_path, "models", model_file) with open(model_path, "r") as model_f: model_xml = model_f.read() return model_xml
Helper method to retrieve the corresponding model xml string for the passed episode index.
def get_bucket_props(self, bucket): msg_code = riak.pb.messages.MSG_CODE_GET_BUCKET_REQ codec = self._get_codec(msg_code) msg = codec.encode_get_bucket_props(bucket) resp_code, resp = self._request(msg, codec) return codec.decode_bucket_props(resp.props)
Serialize bucket property request and deserialize response
def doFindAny(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND', **params): self._preFind(WHAT, SORT, SKIP, MAX, LOP) for key in params: self._addDBParam(key, params[key]) return self._doAction('-findany')
This function will perform the command -findany.
def next_url(request): next = request.GET.get("next", request.POST.get("next", "")) host = request.get_host() return next if next and is_safe_url(next, host=host) else None
Returns URL to redirect to from the ``next`` param in the request.
def cli_forms(self, *args): forms = [] missing = [] for key, item in schemastore.items(): if 'form' in item and len(item['form']) > 0: forms.append(key) else: missing.append(key) self.log('Schemata with form:', forms) self.log('Missing forms:', missing)
List all available form definitions
def get_program_path(): src_folder = os.path.dirname(__file__) program_path = os.sep.join(src_folder.split(os.sep)[:-1]) + os.sep return program_path
Returns the path in which pyspread is installed
def register_measurements(self, end, rows, between, refresh_presision): if not self.end and len(rows) > 0: self.append_rows(rows, between, refresh_presision) self.go_inactive(end) self.save()
Register the measurements if it has measurements and close the configuration, if it hasen't got measurements clean the temporal file on disk. Keyword arguments: f -- open memory file end -- datetime of the moment when the configuration go inactive between -- time between integral_measurements in seconds refresh_presision -- time between sensor values that compose the integral_measurements
def upload_from_fileobject(f, profile=None, label=None): if profile is None: profile = 'default' conf = get_profile_configs(profile) f.seek(0) if not is_image(f, types=conf['TYPES']): msg = (('Format of uploaded file is not allowed. ' 'Allowed formats is: %(formats)s.') % {'formats': ', '.join(map(lambda t: t.upper(), conf['TYPES']))}) raise RuntimeError(msg) return _custom_upload(f, profile, label, conf)
Saves image from f with TMP prefix and returns img_id.
def load_directory(self, directory, ext=None): self._say("Loading from directory: " + directory) if ext is None: ext = ['.rive', '.rs'] elif type(ext) == str: ext = [ext] if not os.path.isdir(directory): self._warn("Error: " + directory + " is not a directory.") return for root, subdirs, files in os.walk(directory): for file in files: for extension in ext: if file.lower().endswith(extension): self.load_file(os.path.join(root, file)) break
Load RiveScript documents from a directory. :param str directory: The directory of RiveScript documents to load replies from. :param []str ext: List of file extensions to consider as RiveScript documents. The default is ``[".rive", ".rs"]``.
def _resolve_device_type(self, device): try: from tests.unit import fakes server_types = (pyrax.CloudServer, fakes.FakeServer) lb_types = (CloudLoadBalancer, fakes.FakeLoadBalancer, fakes.FakeDNSDevice) except ImportError: server_types = (pyrax.CloudServer, ) lb_types = (CloudLoadBalancer, ) if isinstance(device, server_types): device_type = "server" elif isinstance(device, lb_types): device_type = "loadbalancer" else: raise exc.InvalidDeviceType("The device '%s' must be a CloudServer " "or a CloudLoadBalancer." % device) return device_type
Given a device, determines if it is a CloudServer, a CloudLoadBalancer, or an invalid device.
def find_rt_jar(javahome=None): if not javahome: if 'JAVA_HOME' in os.environ: javahome = os.environ['JAVA_HOME'] elif sys.platform == 'darwin': javahome = _find_osx_javahome() else: javahome = _get_javahome_from_java(_find_java_binary()) rtpath = os.path.join(javahome, 'jre', 'lib', 'rt.jar') if not os.path.isfile(rtpath): msg = 'Could not find rt.jar: {} is not a file'.format(rtpath) raise ExtensionError(msg) return rtpath
Find the path to the Java standard library jar. The jar is expected to exist at the path 'jre/lib/rt.jar' inside a standard Java installation directory. The directory is found using the following procedure: 1. If the javehome argument is provided, use the value as the directory. 2. If the JAVA_HOME environment variable is set, use the value as the directory. 3. Find the location of the ``java`` binary in the current PATH and compute the installation directory from this location. Args: javahome: A path to a Java installation directory (optional).
def _from_dict(cls, _dict): args = {} if 'type' in _dict: args['type'] = _dict.get('type') if 'text' in _dict: args['text'] = _dict.get('text') if 'relevance' in _dict: args['relevance'] = _dict.get('relevance') if 'mentions' in _dict: args['mentions'] = [ EntityMention._from_dict(x) for x in (_dict.get('mentions')) ] if 'count' in _dict: args['count'] = _dict.get('count') if 'emotion' in _dict: args['emotion'] = EmotionScores._from_dict(_dict.get('emotion')) if 'sentiment' in _dict: args['sentiment'] = FeatureSentimentResults._from_dict( _dict.get('sentiment')) if 'disambiguation' in _dict: args['disambiguation'] = DisambiguationResult._from_dict( _dict.get('disambiguation')) return cls(**args)
Initialize a EntitiesResult object from a json dictionary.
def transform_to(ext): def decor(f): @functools.wraps(f) def wrapper(*args, **kwargs): out_file = kwargs.get("out_file", None) if not out_file: in_path = kwargs.get("in_file", args[0]) out_dir = kwargs.get("out_dir", os.path.dirname(in_path)) safe_mkdir(out_dir) out_name = replace_suffix(os.path.basename(in_path), ext) out_file = os.path.join(out_dir, out_name) kwargs["out_file"] = out_file if not file_exists(out_file): out_file = f(*args, **kwargs) return out_file return wrapper return decor
Decorator to create an output filename from an output filename with the specified extension. Changes the extension, in_file is transformed to a new type. Takes functions like this to decorate: f(in_file, out_dir=None, out_file=None) or, f(in_file=in_file, out_dir=None, out_file=None) examples: @transform(".bam") f("the/input/path/file.sam") -> f("the/input/path/file.sam", out_file="the/input/path/file.bam") @transform(".bam") f("the/input/path/file.sam", out_dir="results") -> f("the/input/path/file.sam", out_file="results/file.bam")
def _item_to_blob(iterator, item): name = item.get("name") blob = Blob(name, bucket=iterator.bucket) blob._set_properties(item) return blob
Convert a JSON blob to the native object. .. note:: This assumes that the ``bucket`` attribute has been added to the iterator after being created. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that has retrieved the item. :type item: dict :param item: An item to be converted to a blob. :rtype: :class:`.Blob` :returns: The next blob in the page.
def from_geojson(geojson, srid=4326): type_ = geojson["type"].lower() if type_ == "geometrycollection": geometries = [] for geometry in geojson["geometries"]: geometries.append(Geometry.from_geojson(geometry, srid=None)) return GeometryCollection(geometries, srid) elif type_ == "point": return Point(geojson["coordinates"], srid=srid) elif type_ == "linestring": return LineString(geojson["coordinates"], srid=srid) elif type_ == "polygon": return Polygon(geojson["coordinates"], srid=srid) elif type_ == "multipoint": geometries = _MultiGeometry._multi_from_geojson(geojson, Point) return MultiPoint(geometries, srid=srid) elif type_ == "multilinestring": geometries = _MultiGeometry._multi_from_geojson(geojson, LineString) return MultiLineString(geometries, srid=srid) elif type_ == "multipolygon": geometries = _MultiGeometry._multi_from_geojson(geojson, Polygon) return MultiPolygon(geometries, srid=srid)
Create a Geometry from a GeoJSON. The SRID can be overridden from the expected 4326.
def load(cls, path_to_file): import mimetypes mimetypes.init() mime = mimetypes.guess_type('file://%s' % path_to_file)[0] img_type = ImageTypeEnum.lookup_by_mime_type(mime) with open(path_to_file, 'rb') as f: data = f.read() return Image(data, image_type=img_type)
Loads the image data from a file on disk and tries to guess the image MIME type :param path_to_file: path to the source file :type path_to_file: str :return: a `pyowm.image.Image` instance
def visit_set(self, node): return "{%s}" % ", ".join(child.accept(self) for child in node.elts)
return an astroid.Set node as string
def _write_header(self): for line in self.header.lines: print(line.serialize(), file=self.stream) if self.header.samples.names: print( "\t".join(list(parser.REQUIRE_SAMPLE_HEADER) + self.header.samples.names), file=self.stream, ) else: print("\t".join(parser.REQUIRE_NO_SAMPLE_HEADER), file=self.stream)
Write out the header
def notebook_to_md(notebook): tmp_file = tempfile.NamedTemporaryFile(delete=False) tmp_file.write(ipynb_writes(notebook).encode('utf-8')) tmp_file.close() pandoc(u'--from ipynb --to markdown -s --atx-headers --wrap=preserve --preserve-tabs', tmp_file.name, tmp_file.name) with open(tmp_file.name, encoding='utf-8') as opened_file: text = opened_file.read() os.unlink(tmp_file.name) return '\n'.join(text.splitlines())
Convert a notebook to its Markdown representation, using Pandoc
def memberships(self, group, include=None): return self._get(self._build_url(self.endpoint.memberships(id=group, include=include)))
Return the GroupMemberships for this group. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param group: Group object or id
def install(self, version): c = self._pyenv( 'install', '-s', str(version), timeout=PIPENV_INSTALL_TIMEOUT, ) return c
Install the given version with pyenv. The version must be a ``Version`` instance representing a version found in pyenv. A ValueError is raised if the given version does not have a match in pyenv. A PyenvError is raised if the pyenv command fails.
def get_templates(self): use = getattr(self, 'use', '') if isinstance(use, list): return [n.strip() for n in use if n.strip()] return [n.strip() for n in use.split(',') if n.strip()]
Get list of templates this object use :return: list of templates :rtype: list
def evaluate(self, source, cards=None) -> str: from ..utils import weighted_card_choice if cards: self.weights = [1] card_sets = [list(cards)] elif not self.weightedfilters: self.weights = [1] card_sets = [self.find_cards(source)] else: wf = [{**x, **self.filters} for x in self.weightedfilters] card_sets = [self.find_cards(source, **x) for x in wf] return weighted_card_choice(source, self.weights, card_sets, self.count)
This picks from a single combined card pool without replacement, weighting each filtered set of cards against the total
def _GetAppYamlHostname(application_path, open_func=open): try: app_yaml_file = open_func(os.path.join(application_path or '.', 'app.yaml')) config = yaml.safe_load(app_yaml_file.read()) except IOError: return None application = config.get('application') if not application: return None if ':' in application: return None tilde_index = application.rfind('~') if tilde_index >= 0: application = application[tilde_index + 1:] if not application: return None return '%s.appspot.com' % application
Build the hostname for this app based on the name in app.yaml. Args: application_path: A string with the path to the AppEngine application. This should be the directory containing the app.yaml file. open_func: Function to call to open a file. Used to override the default open function in unit tests. Returns: A hostname, usually in the form of "myapp.appspot.com", based on the application name in the app.yaml file. If the file can't be found or there's a problem building the name, this will return None.
def bump(match): before, old_version, after = match.groups() major, minor, patch = map(int, old_version.split('.')) patch += 1 if patch == 10: patch = 0 minor += 1 if minor == 10: minor = 0 major += 1 new_version = '{0}.{1}.{2}'.format(major, minor, patch) print('{0} => {1}'.format(old_version, new_version)) return before + new_version + after
Bumps the version
def sign(self, byts): chosen_hash = c_hashes.SHA256() hasher = c_hashes.Hash(chosen_hash, default_backend()) hasher.update(byts) digest = hasher.finalize() return self.priv.sign(digest, c_ec.ECDSA(c_utils.Prehashed(chosen_hash)) )
Compute the ECC signature for the given bytestream. Args: byts (bytes): The bytes to sign. Returns: bytes: The RSA Signature bytes.
def _to_chimera(M, N, L, q): "Converts a qubit's linear index to chimera coordinates." return (q // N // L // 2, (q // L // 2) % N, (q // L) % 2, q % L)
Converts a qubit's linear index to chimera coordinates.
def remove_from_s3(self, file_name, bucket_name): try: self.s3_client.head_bucket(Bucket=bucket_name) except botocore.exceptions.ClientError as e: error_code = int(e.response['Error']['Code']) if error_code == 404: return False try: self.s3_client.delete_object(Bucket=bucket_name, Key=file_name) return True except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError): return False
Given a file name and a bucket, remove it from S3. There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3. Returns True on success, False on failure.
def _convert_to_var(self, graph, var_res): with graph.as_default(): var = {} for key, value in var_res.items(): if value is not None: var[key] = tf.Variable(value, name="tf_%s" % key) else: var[key] = None return var
Create tf.Variables from a list of numpy arrays var_res: dictionary of numpy arrays with the key names corresponding to var
def red(cls): "Make the text foreground color red." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.FOREGROUND_MASK wAttributes |= win32.FOREGROUND_RED cls._set_text_attributes(wAttributes)
Make the text foreground color red.
def inet_pton(address_family, ip_string): global __inet_pton if __inet_pton is None: if hasattr(socket, 'inet_pton'): __inet_pton = socket.inet_pton else: from ospd import win_socket __inet_pton = win_socket.inet_pton return __inet_pton(address_family, ip_string)
A platform independent version of inet_pton
def createPlotPanel(self): self.figure = Figure() self.axes = self.figure.add_subplot(111) self.canvas = FigureCanvas(self,-1,self.figure) self.canvas.SetSize(wx.Size(300,300)) self.axes.axis('off') self.figure.subplots_adjust(left=0,right=1,top=1,bottom=0) self.sizer = wx.BoxSizer(wx.VERTICAL) self.sizer.Add(self.canvas,1,wx.EXPAND,wx.ALL) self.SetSizerAndFit(self.sizer) self.Fit()
Creates the figure and axes for the plotting panel.
def next(self): self._set_consumer_timeout_start() while True: try: return six.next(self._get_message_iterator()) except StopIteration: self._reset_message_iterator() self._check_consumer_timeout()
Return the next available message Blocks indefinitely unless consumer_timeout_ms > 0 Returns: a single KafkaMessage from the message iterator Raises: ConsumerTimeout after consumer_timeout_ms and no message Note: This is also the method called internally during iteration
def value_right(self, other): return self if isinstance(other, self.__class__) else self.value
Returns the value of the type instance calling an to use in an operator method, namely when the method's instance is on the right side of the expression.
def _load_script(self, filename: str) -> Script: with open(path.join(here, 'redis_scripts', filename), mode='rb') as f: script_data = f.read() rv = self._r.register_script(script_data) if script_data.startswith(b'-- idempotency protected script'): self._idempotency_protected_scripts.append(rv) return rv
Load a Lua script. Read the Lua script file to generate its Script object. If the script starts with a magic string, add it to the list of scripts requiring an idempotency token to execute.
def link_markdown_cells(cells, modules): "Create documentation links for all cells in markdown with backticks." for i, cell in enumerate(cells): if cell['cell_type'] == 'markdown': cell['source'] = link_docstring(modules, cell['source'])
Create documentation links for all cells in markdown with backticks.
def has_field(mc, field_name): try: mc._meta.get_field(field_name) except FieldDoesNotExist: return False return True
detect if a model has a given field has :param field_name: :param mc: :return:
def highlight_differences(s1, s2, color): ls1, ls2 = len(s1), len(s2) diff_indices = [i for i, (a, b) in enumerate(zip(s1, s2)) if a != b] print(s1) if ls2 > ls1: colorise.cprint('_' * (ls2-ls1), fg=color) else: print() colorise.highlight(s2, indices=diff_indices, fg=color, end='') if ls1 > ls2: colorise.cprint('_' * (ls1-ls2), fg=color) else: print()
Highlight the characters in s2 that differ from those in s1.
def getNonDefaultsDict(self): dct = self._nodeGetNonDefaultsDict() childList = [] for childCti in self.childItems: childDct = childCti.getNonDefaultsDict() if childDct: childList.append(childDct) if childList: dct['childItems'] = childList if dct: dct['nodeName'] = self.nodeName return dct
Recursively retrieves values as a dictionary to be used for persistence. Does not save defaultData and other properties, only stores values if they differ from the defaultData. If the CTI and none of its children differ from their default, a completely empty dictionary is returned. This is to achieve a smaller json representation. Typically descendants should override _nodeGetNonDefaultsDict instead of this function.
def make_map(declarations): mapper = routes.Mapper() for route, methods in ROUTE_LIST: allowed_methods = [] for method, func in methods.items(): mapper.connect(route, action=func, conditions=dict(method=[method])) allowed_methods.append(method) allowed_methods = ', '.join(allowed_methods) mapper.connect(route, action=handle_not_allowed, _methods=allowed_methods) return mapper
Process route declarations to create a Route Mapper.
def subscribe(self, connection, destination): self.log.debug("Subscribing %s to %s" % (connection, destination)) self._topics[destination].add(connection)
Subscribes a connection to the specified topic destination. @param connection: The client connection to subscribe. @type connection: L{coilmq.server.StompConnection} @param destination: The topic destination (e.g. '/topic/foo') @type destination: C{str}
def remove_listener(self, listener): self.logger.debug('discarding listener %r', listener) with self._lock: self._listeners.discard(listener)
Unregister some listener; ignore if the listener was never registered. :type listener: :class:`SessionListener`
def setsebools(pairs, persist=False): if not isinstance(pairs, dict): return {} if persist: cmd = 'setsebool -P ' else: cmd = 'setsebool ' for boolean, value in six.iteritems(pairs): cmd = '{0} {1}={2}'.format(cmd, boolean, value) return not __salt__['cmd.retcode'](cmd, python_shell=False)
Set the value of multiple booleans CLI Example: .. code-block:: bash salt '*' selinux.setsebools '{virt_use_usb: on, squid_use_tproxy: off}'
def remove_infinite_values(self): if util.is_shape(self.faces, (-1, 3)): face_mask = np.isfinite(self.faces).all(axis=1) self.update_faces(face_mask) if util.is_shape(self.vertices, (-1, 3)): vertex_mask = np.isfinite(self.vertices).all(axis=1) self.update_vertices(vertex_mask)
Ensure that every vertex and face consists of finite numbers. This will remove vertices or faces containing np.nan and np.inf Alters ---------- self.faces : masked to remove np.inf/np.nan self.vertices : masked to remove np.inf/np.nan
def __execute_str(self, instr): op0_val = self.read_operand(instr.operands[0]) self.write_operand(instr.operands[2], op0_val) return None
Execute STR instruction.
def slice(self, x, y, width): return self._double_buffer[y][x:x + width]
Provide a slice of data from the buffer at the specified location :param x: The X origin :param y: The Y origin :param width: The width of slice required :return: The slice of tuples from the current double-buffer
def has_permission(cls, user): if not cls.requires_login: return True if not user.is_authenticated: return False perms = cls.get_permission_required() if not perms: return True role = user.urole.role.name if role == cls.ADMIN: return True for permission in perms: if cls.check_permission(role, permission): return True return False
We override this method to customize the way permissions are checked. Using our roles to check permissions.
def persistant_warning(request, message, extra_tags='', fail_silently=False, *args, **kwargs): add_message(request, WARNING_PERSISTENT, message, extra_tags=extra_tags, fail_silently=fail_silently, *args, **kwargs)
Adds a persistant message with the ``WARNING`` level.
def purity(labels, true_labels): purity = 0.0 for i in set(labels): indices = (labels==i) true_clusters = true_labels[indices] if len(true_clusters)==0: continue counts = Counter(true_clusters) lab, count = counts.most_common()[0] purity += count return float(purity)/len(labels)
Calculates the purity score for the given labels. Args: labels (array): 1D array of integers true_labels (array): 1D array of integers - true labels Returns: purity score - a float bewteen 0 and 1. Closer to 1 is better.
def remove_none_dict_values(obj): if isinstance(obj, (list, tuple, set)): return type(obj)(remove_none_dict_values(x) for x in obj) elif isinstance(obj, dict): return type(obj)((k, remove_none_dict_values(v)) for k, v in obj.items() if v is not None) else: return obj
Remove None values from dict.
def auth_user_oauth(self, userinfo): if "username" in userinfo: user = self.find_user(username=userinfo["username"]) elif "email" in userinfo: user = self.find_user(email=userinfo["email"]) else: log.error("User info does not have username or email {0}".format(userinfo)) return None if user and not user.is_active: log.info(LOGMSG_WAR_SEC_LOGIN_FAILED.format(userinfo)) return None if not user and not self.auth_user_registration: return None if not user: user = self.add_user( username=userinfo["username"], first_name=userinfo.get("first_name", ""), last_name=userinfo.get("last_name", ""), email=userinfo.get("email", ""), role=self.find_role(self.auth_user_registration_role), ) if not user: log.error("Error creating a new OAuth user %s" % userinfo["username"]) return None self.update_user_auth_stat(user) return user
OAuth user Authentication :userinfo: dict with user information the keys have the same name as User model columns.
def get_view(self, columns: Sequence[str], query: str=None) -> PopulationView: if 'tracked' not in columns: query_with_track = query + 'and tracked == True' if query else 'tracked == True' return PopulationView(self, columns, query_with_track) return PopulationView(self, columns, query)
Return a configured PopulationView Notes ----- Client code should only need this (and only through the version exposed as ``population_view`` on the builder during setup) if it uses dynamically generated column names that aren't known at definition time. Otherwise components should use ``uses_columns``.
def resolve(self, pointer): dp = DocumentPointer(pointer) obj, fetcher = self.prototype(dp) for token in dp.pointer: obj = token.extract(obj, bypass_ref=True) reference = ref(obj) if reference: obj = fetcher.resolve(reference) return obj
Resolve from documents. :param pointer: foo :type pointer: DocumentPointer
def __getContributions(self, web): contributions_raw = web.find_all('h2', {'class': 'f4 text-normal mb-2'}) try: contrText = contributions_raw[0].text contrText = contrText.lstrip().split(" ")[0] contrText = contrText.replace(",", "") except IndexError as error: print("There was an error with the user " + self.name) print(error) except AttributeError as error: print("There was an error with the user " + self.name) print(error) self.contributions = int(contrText)
Scrap the contributions from a GitHub profile. :param web: parsed web. :type web: BeautifulSoup node.
def _prepare_filtering_params(domain=None, category=None, sponsored_source=None, has_field=None, has_fields=None, query_params_match=None, query_person_match=None, **kwargs): if query_params_match not in (None, True): raise ValueError('query_params_match can only be `True`') if query_person_match not in (None, True): raise ValueError('query_person_match can only be `True`') params = [] if domain is not None: params.append('domain:%s' % domain) if category is not None: Source.validate_categories([category]) params.append('category:%s' % category) if sponsored_source is not None: params.append('sponsored_source:%s' % sponsored_source) if query_params_match is not None: params.append('query_params_match') if query_person_match is not None: params.append('query_person_match') has_fields = has_fields or [] if has_field is not None: has_fields.append(has_field) for has_field in has_fields: params.append('has_field:%s' % has_field.__name__) return params
Transform the params to the API format, return a list of params.
def get_open_orders(self, asset=None): if asset is None: return { key: [order.to_api_obj() for order in orders] for key, orders in iteritems(self.blotter.open_orders) if orders } if asset in self.blotter.open_orders: orders = self.blotter.open_orders[asset] return [order.to_api_obj() for order in orders] return []
Retrieve all of the current open orders. Parameters ---------- asset : Asset If passed and not None, return only the open orders for the given asset instead of all open orders. Returns ------- open_orders : dict[list[Order]] or list[Order] If no asset is passed this will return a dict mapping Assets to a list containing all the open orders for the asset. If an asset is passed then this will return a list of the open orders for this asset.
def connect(self, func=None, event=None, set_method=False): if func is None: return partial(self.connect, set_method=set_method) if event is None: event = self._get_on_name(func) self._callbacks[event].append(func) if set_method: self._create_emitter(event) return func
Register a callback function to a given event. To register a callback function to the `spam` event, where `obj` is an instance of a class deriving from `EventEmitter`: ```python @obj.connect def on_spam(arg1, arg2): pass ``` This is called when `obj.emit('spam', arg1, arg2)` is called. Several callback functions can be registered for a given event. The registration order is conserved and may matter in applications.
def delete_label_by_id(self, content_id, label_name, callback=None): params = {"name": label_name} return self._service_delete_request("rest/api/content/{id}/label".format(id=content_id), params=params, callback=callback)
Deletes a labels to the specified content. There is an alternative form of this delete method that is not implemented. A DELETE request to /rest/api/content/{id}/label/{label} will also delete a label, but is more limited in the label name that can be accepted (and has no real apparent upside). :param content_id (string): A string containing the id of the labels content container. :param label_name (string): OPTIONAL: The name of the label to be removed from the content. Default: Empty (probably deletes all labels). :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: Empty if successful, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
def _database_exists(self): con = psycopg2.connect(host=self.host, database="postgres", user=self.user, password=self.password, port=self.port) query_check = "select datname from pg_catalog.pg_database" query_check += " where datname = '{0}';".format(self.dbname) c = con.cursor() c.execute(query_check) result = c.fetchall() if len(result) > 0: return True return False
Check if the database exists.
def transform_audio(self, y): data = super(CQTPhaseDiff, self).transform_audio(y) data['dphase'] = self.phase_diff(data.pop('phase')) return data
Compute the CQT with unwrapped phase Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['mag'] : np.ndarray, shape=(n_frames, n_bins) CQT magnitude data['dphase'] : np.ndarray, shape=(n_frames, n_bins) Unwrapped phase differential
def parse_timespan_value(s): number, unit = split_number_and_unit(s) if not unit or unit == "s": return number elif unit == "min": return number * 60 elif unit == "h": return number * 60 * 60 elif unit == "d": return number * 24 * 60 * 60 else: raise ValueError('unknown unit: {} (allowed are s, min, h, and d)'.format(unit))
Parse a string that contains a time span, optionally with a unit like s. @return the number of seconds encoded by the string
def write_ds9region(self, region, *args, **kwargs): lines = self.to_ds9(*args,**kwargs) with open(region,'w') as fo: fo.write("\n".join(lines))
Create a ds9 compatible region file from the ROI. It calls the `to_ds9` method and write the result to the region file. Only the file name is required. All other parameters will be forwarded to the `to_ds9` method, see the documentation of that method for all accepted parameters and options. Parameters ---------- region : str name of the region file (string)
def _get_site_amplification_term(self, C, vs30): return C["gamma"] * np.log10(vs30 / self.CONSTS["Vref"])
Returns the site amplification term for the case in which Vs30 is used directly
def fullversion(): cmd = __catalina_home() + '/bin/catalina.sh version' ret = {} out = __salt__['cmd.run'](cmd).splitlines() for line in out: if not line: continue if ': ' in line: comps = line.split(': ') ret[comps[0]] = comps[1].lstrip() return ret
Return all server information from catalina.sh version CLI Example: .. code-block:: bash salt '*' tomcat.fullversion
def ended(self): self._end_time = time.time() if setting(key='memory_profile', expected_type=bool): self._end_memory = get_free_memory()
We call this method when the function is finished.
def add_ability(self, phase, ability): if phase not in self.abilities: self.abilities[phase] = [] self.abilities[phase].append(ability) return len(self.abilities[phase])
Add the given ability to this Card under the given phase. Returns the length of the abilities for the given phase after the addition.
def get_brain_by_uid(self, uid): if uid == "0": return api.get_portal() if self._catalog is None: uid_catalog = api.get_tool("uid_catalog") results = uid_catalog({"UID": uid}) if len(results) != 1: raise ValueError("No object found for UID '{}'".format(uid)) brain = results[0] self._catalog = self.get_catalog_for(brain) results = self.catalog({"UID": uid}) if not results: raise ValueError("No results found for UID '{}'".format(uid)) if len(results) != 1: raise ValueError("Found more than one object for UID '{}'" .format(uid)) return results[0]
Lookup brain from the right catalog
async def abort(self, *, comment: str = None): params = { "system_id": self.system_id } if comment: params["comment"] = comment self._data = await self._handler.abort(**params) return self
Abort the current action. :param comment: Reason for aborting the action. :param type: `str`
def parse(region_string): rp = RegionParser() ss = rp.parse(region_string) sss1 = rp.convert_attr(ss) sss2 = _check_wcs(sss1) shape_list, comment_list = rp.filter_shape2(sss2) return ShapeList(shape_list, comment_list=comment_list)
Parse DS9 region string into a ShapeList. Parameters ---------- region_string : str Region string Returns ------- shapes : `ShapeList` List of `~pyregion.Shape`
def remove_node(self, node): preds = self.reverse_edges.get(node, []) for pred in preds: self.edges[pred].remove(node) succs = self.edges.get(node, []) for suc in succs: self.reverse_edges[suc].remove(node) exc_preds = self.reverse_catch_edges.pop(node, []) for pred in exc_preds: self.catch_edges[pred].remove(node) exc_succs = self.catch_edges.pop(node, []) for suc in exc_succs: self.reverse_catch_edges[suc].remove(node) self.nodes.remove(node) if node in self.rpo: self.rpo.remove(node) del node
Remove the node from the graph, removes also all connections. :param androguard.decompiler.dad.node.Node node: the node to remove
async def chain(*sources): for source in sources: async with streamcontext(source) as streamer: async for item in streamer: yield item
Chain asynchronous sequences together, in the order they are given. Note: the sequences are not iterated until it is required, so if the operation is interrupted, the remaining sequences will be left untouched.
def isfortran(env, source): try: fsuffixes = env['FORTRANSUFFIXES'] except KeyError: return 0 if not source: return 0 for s in source: if s.sources: ext = os.path.splitext(str(s.sources[0]))[1] if ext in fsuffixes: return 1 return 0
Return 1 if any of code in source has fortran files in it, 0 otherwise.
def html_to_dom(html, default_encoding=DEFAULT_ENCODING, encoding=None, errors=DEFAULT_ENC_ERRORS): if isinstance(html, unicode): decoded_html = html forced_encoding = encoding if encoding else default_encoding html = html.encode(forced_encoding, errors) else: decoded_html = decode_html(html, default_encoding, encoding, errors) try: dom = lxml.html.fromstring(decoded_html, parser=lxml.html.HTMLParser()) except ValueError: dom = lxml.html.fromstring(html, parser=lxml.html.HTMLParser()) return dom
Converts HTML to DOM.
def cmd_output_remove(self, args): device = args[0] for i in range(len(self.mpstate.mav_outputs)): conn = self.mpstate.mav_outputs[i] if str(i) == device or conn.address == device: print("Removing output %s" % conn.address) try: mp_util.child_fd_list_add(conn.port.fileno()) except Exception: pass conn.close() self.mpstate.mav_outputs.pop(i) return
remove an output
def release(ctx, yes, latest): m = RepoManager(ctx.obj['agile']) api = m.github_repo() if latest: latest = api.releases.latest() if latest: click.echo(latest['tag_name']) elif m.can_release('sandbox'): branch = m.info['branch'] version = m.validate_version() name = 'v%s' % version body = ['Release %s from agiletoolkit' % name] data = dict( tag_name=name, target_commitish=branch, name=name, body='\n\n'.join(body), draft=False, prerelease=False ) if yes: data = api.releases.create(data=data) m.message('Successfully created a new Github release') click.echo(niceJson(data)) else: click.echo('skipped')
Create a new release in github
def validate(self, output_type, output_params): return self.request.post('validate', dict(output_type=output_type, output_params=output_params))
Check that a subscription is defined correctly. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushvalidate :param output_type: One of DataSift's supported output types, e.g. s3 :type output_type: str :param output_params: The set of parameters required by the specified output_type for docs on all available connectors see http://dev.datasift.com/docs/push/connectors/ :type output_params: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def add(self, config, strip_app_name=False, filter_by_app_name=False, key_normalisation_func=default_key_normalisation_func): config = walk_recursive(key_normalisation_func, OrderedDict(config)) if filter_by_app_name: config = funcy.compact(funcy.select_keys( lambda k: k.startswith(self._app_name), config)) if strip_app_name: strip_app_name_regex = re.compile("^%s" % self._app_name) config = funcy.walk_keys( lambda k: re.sub(strip_app_name_regex, '', k), config) self._sources.append(config) return self
Add a dict of config data. Values from later dicts will take precedence over those added earlier, so the order data is added matters. Note: Double underscores can be used to indicate dict key name boundaries. i.e. if we have a dict like: { 'logging': { 'level': INFO ... } } we could pass an environment variable LOGGING__LEVEL=DEBUG to override the log level. Note: Key names will be normalised by recursively applying the key_normalisation_func function. By default this will: 1) Convert keys to lowercase 2) Replace hyphens with underscores 3) Strip leading underscores This allows key names from different sources (e.g. CLI args, env vars, etc.) to be able to override each other. :param config dict: config data :param strip_app_name boolean: If True, the configured app_name will stripped from the start of top-level input keys if present. :param filter_by_app_name boolean: If True, keys that don't begin with the app name will be discarded. :return:
def editor_interfaces(self): return ContentTypeEditorInterfacesProxy(self._client, self.space.id, self._environment_id, self.id)
Provides access to editor interface management methods for the given content type. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/editor-interface :return: :class:`ContentTypeEditorInterfacesProxy <contentful_management.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy>` object. :rtype: contentful.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy Usage: >>> content_type_editor_interfaces_proxy = content_type.editor_interfaces() <ContentTypeEditorInterfacesProxy space_id="cfexampleapi" environment_id="master" content_type_id="cat">
def GetEntries(self, parser_mediator, cache=None, database=None, **kwargs): if database is None: raise ValueError('Invalid database.') for table_name, callback_method in iter(self._tables.items()): if parser_mediator.abort: break if not callback_method: continue callback = getattr(self, callback_method, None) if callback is None: logger.warning( '[{0:s}] missing callback method: {1:s} for table: {2:s}'.format( self.NAME, callback_method, table_name)) continue esedb_table = database.get_table_by_name(table_name) if not esedb_table: logger.warning('[{0:s}] missing table: {1:s}'.format( self.NAME, table_name)) continue callback( parser_mediator, cache=cache, database=database, table=esedb_table, **kwargs)
Extracts event objects from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache (Optional[ESEDBCache]): cache. database (Optional[pyesedb.file]): ESE database. Raises: ValueError: If the database attribute is not valid.
def stick_perm(presenter, egg, dist_dict, strategy): np.random.seed() egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg) regg = order_stick(presenter, egg, dist_dict, strategy) regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg) regg_pres = list(regg_pres) egg_pres = list(egg_pres) idx = [egg_pres.index(r) for r in regg_pres] weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict) orders = idx return weights, orders
Computes weights for one reordering using stick-breaking method
def render_headers(self): lines = [] sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location'] for sort_key in sort_keys: if self.headers.get(sort_key, False): lines.append('%s: %s' % (sort_key, self.headers[sort_key])) for header_name, header_value in self.headers.items(): if header_name not in sort_keys: if header_value: lines.append('%s: %s' % (header_name, header_value)) lines.append('\r\n') return '\r\n'.join(lines)
Renders the headers for this request field.
def bm_create_button(self, **kwargs): kwargs.update(self._sanitize_locals(locals())) return self._call('BMCreateButton', **kwargs)
Shortcut to the BMCreateButton method. See the docs for details on arguments: https://cms.paypal.com/mx/cgi-bin/?cmd=_render-content&content_ID=developer/e_howto_api_nvp_BMCreateButton The L_BUTTONVARn fields are especially important, so make sure to read those and act accordingly. See unit tests for some examples.
def GenerarPDF(self, archivo="", dest="F"): "Generar archivo de salida en formato PDF" try: self.template.render(archivo, dest=dest) return True except Exception, e: self.Excepcion = str(e) return False
Generar archivo de salida en formato PDF
def is_feeder(self, team_id=None): if team_id is None: return self._is_feeder team_id = uuid.UUID(str(team_id)) if team_id not in self.teams_ids: return False return self.teams[team_id]['role'] == 'FEEDER'
Ensure ther resource has the role FEEDER.
def iter_query(query): try: itr = click.open_file(query).readlines() except IOError: itr = [query] return itr
Accept a filename, stream, or string. Returns an iterator over lines of the query.
def insertBefore(self, child: Node, ref_node: Node) -> Node: if self.connected: self._insert_before_web(child, ref_node) return self._insert_before(child, ref_node)
Insert new child node before the reference child node. If the reference node is not a child of this node, raise ValueError. If this instance is connected to the node on browser, the child node is also added to it.
def getSigned(self, ns_uri, ns_key, default=None): if self.isSigned(ns_uri, ns_key): return self.message.getArg(ns_uri, ns_key, default) else: return default
Return the specified signed field if available, otherwise return default
def get_ratings(data): episodes = data['episodes'] ratings = {} for season in episodes: ratings[season] = collapse(episodes[season]) return co.OrderedDict(sorted(ratings.items()))
Ratings of all the episodes of all the seasons
def __updateNavButtons(self): navButtons = None for v in self.views: if v.getId() == 'com.android.systemui:id/nav_buttons': navButtons = v break if navButtons: self.navBack = self.findViewById('com.android.systemui:id/back', navButtons) self.navHome = self.findViewById('com.android.systemui:id/home', navButtons) self.navRecentApps = self.findViewById('com.android.systemui:id/recent_apps', navButtons) else: if self.uiAutomatorHelper: print >> sys.stderr, "WARNING: nav buttons not found. Perhaps the device has hardware buttons." self.navBack = None self.navHome = None self.navRecentApps = None
Updates the navigation buttons that might be on the device screen.
def read_sex_problems(file_name): if file_name is None: return frozenset() problems = None with open(file_name, 'r') as input_file: header_index = dict([ (col_name, i) for i, col_name in enumerate(input_file.readline().rstrip("\r\n").split("\t")) ]) if "IID" not in header_index: msg = "{}: no column named IID".format(file_name) raise ProgramError(msg) problems = frozenset([ i.rstrip("\r\n").split("\t")[header_index["IID"]] for i in input_file.readlines() ]) return problems
Reads the sex problem file. :param file_name: the name of the file containing sex problems. :type file_name: str :returns: a :py:class:`frozenset` containing samples with sex problem. If there is no ``file_name`` (*i.e.* is ``None``), then an empty :py:class:`frozenset` is returned.
def measurement_key( val: Any, default: Any = RaiseTypeErrorIfNotProvided): getter = getattr(val, '_measurement_key_', None) result = NotImplemented if getter is None else getter() if result is not NotImplemented: return result if default is not RaiseTypeErrorIfNotProvided: return default if getter is None: raise TypeError( "object of type '{}' has no _measurement_key_ method." .format(type(val))) raise TypeError("object of type '{}' does have a _measurement_key_ method, " "but it returned NotImplemented.".format(type(val)))
Get the measurement key for the given value. Args: val: The value which has the measurement key.. default: Determines the fallback behavior when `val` doesn't have a measurement key. If `default` is not set, a TypeError is raised. If default is set to a value, that value is returned if the value does not have `_measurement_key_`. Returns: If `val` has a `_measurement_key_` method and its result is not `NotImplemented`, that result is returned. Otherwise, if a default value was specified, the default value is returned. Raises: TypeError: `val` doesn't have a _measurement_key_ method (or that method returned NotImplemented) and also no default value was specified.
def close(self): with self.lock: if self.device is not None: try: self.device.close() except IOError: pass self.device = None
Close the contacless reader device.
def get_fba_obj_flux(self, objective): flux_result = self.solve_fba(objective) return flux_result.get_value(self._v_wt[objective])
Return the maximum objective flux solved by FBA.
def getStartingApplication(self, pchAppKeyBuffer, unAppKeyBufferLen): fn = self.function_table.getStartingApplication result = fn(pchAppKeyBuffer, unAppKeyBufferLen) return result
Returns the app key for the application that is starting up
def get_all_for(self, key): if not isinstance(key, _string_type): raise TypeError("Key needs to be a string.") return [self[(idx, key)] for idx in _range(self.__kcount[key])]
Returns all values of the given key
def fetch(cls, client, _id, symbol): url = "https://api.robinhood.com/options/chains/" params = { "equity_instrument_ids": _id, "state": "active", "tradability": "tradable" } data = client.get(url, params=params) def filter_func(x): return x["symbol"] == symbol results = list(filter(filter_func, data["results"])) return results[0]
fetch option chain for instrument
def _load_data_and_files(self): if not _hasattr(self, '_data'): self._data, self._files = self._parse() if self._files: self._full_data = self._data.copy() self._full_data.update(self._files) else: self._full_data = self._data
Parses the request content into `self.data`.
def connect(self, addr): if _debug: RouterToRouterService._debug("connect %r", addr) conn = ConnectionState(addr) self.multiplexer.connections[addr] = conn conn.service = self conn.pendingNPDU = [] request = ServiceRequest(ROUTER_TO_ROUTER_SERVICE_ID) request.pduDestination = addr self.service_request(request) return conn
Initiate a connection request to the peer router.
def freeze(self, number=None): if number is None: number = self.head_layers for idx, child in enumerate(self.model.children()): if idx < number: mu.freeze_layer(child)
Freeze given number of layers in the model