code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def sync(self, force=None): try: if force: sd = force else: sd = self.sync_dir() if sd == self.SYNC_DIR.FILE_TO_RECORD: if force and not self.exists(): return None self.fs_to_record() elif sd == self.SYNC_DIR.RECORD_TO_FILE: self.record_to_fs() else: return None self._dataset.config.sync[self.file_const][sd] = time.time() return sd except Exception as e: self._bundle.rollback() self._bundle.error("Failed to sync '{}': {}".format(self.file_const, e)) raise
Synchronize between the file in the file system and the field record
def user_remove(name, user=None, password=None, host=None, port=None, database='admin', authdb=None): conn = _connect(user, password, host, port) if not conn: return 'Failed to connect to mongo database' try: log.info('Removing user %s', name) mdb = pymongo.database.Database(conn, database) mdb.remove_user(name) except pymongo.errors.PyMongoError as err: log.error('Creating database %s failed with error: %s', name, err) return six.text_type(err) return True
Remove a MongoDB user CLI Example: .. code-block:: bash salt '*' mongodb.user_remove <name> <user> <password> <host> <port> <database>
def create_rule(self): return BlockRule( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of block rule services facade.
def del_current_vrf(self): vrf_id = int(request.json['vrf_id']) if vrf_id in session['current_vrfs']: del session['current_vrfs'][vrf_id] session.save() return json.dumps(session.get('current_vrfs', {}))
Remove VRF to filter list session variable
def update_score(self, node, addToScore): current_score = 0 score_string = self.parser.getAttribute(node, 'gravityScore') if score_string: current_score = int(score_string) new_score = current_score + addToScore self.parser.setAttribute(node, "gravityScore", str(new_score))
\ adds a score to the gravityScore Attribute we put on divs we'll get the current score then add the score we're passing in to the current
def get_long_description(): import codecs with codecs.open('README.rst', encoding='UTF-8') as f: readme = [line for line in f if not line.startswith('.. contents::')] return ''.join(readme)
Strip the content index from the long description.
def hashing_type(self, cluster='main'): if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) hashing_type = 'carbon_ch' try: return self.config.get(cluster, 'hashing_type') except NoOptionError: return hashing_type
Hashing type of cluster.
def import_symbol(name=None, path=None, typename=None, base_path=None): _, symbol = _import(name or typename, path or base_path) return symbol
Import a module, or a typename within a module from its name. Arguments: name: An absolute or relative (starts with a .) Python path path: If name is relative, path is prepended to it. base_path: (DEPRECATED) Same as path typename: (DEPRECATED) Same as path
def execute_sql(self, sql): cursor = self.get_cursor() cursor.execute(sql) return cursor
Executes SQL and returns cursor for it
def facets_boundary(self): edges = self.edges_sorted.reshape((-1, 6)) edges_facet = [edges[i].reshape((-1, 2)) for i in self.facets] edges_boundary = np.array([i[grouping.group_rows(i, require_count=1)] for i in edges_facet]) return edges_boundary
Return the edges which represent the boundary of each facet Returns --------- edges_boundary : sequence of (n, 2) int Indices of self.vertices
def smartfields_get_field_status(self, field_name): manager = self._smartfields_managers.get(field_name, None) if manager is not None: return manager.get_status(self) return {'state': 'ready'}
A way to find out a status of a filed.
def create_connection(cls, address, timeout=None, source_address=None): sock = socket.create_connection(address, timeout, source_address) return cls(sock)
Create a SlipSocket connection. This convenience method creates a connection to the the specified address using the :func:`socket.create_connection` function. The socket that is returned from that call is automatically wrapped in a :class:`SlipSocket` object. .. note:: The :meth:`create_connection` method does not magically turn the socket at the remote address into a SlipSocket. For the connection to work properly, the remote socket must already have been configured to use the SLIP protocol.
def _reverse_convert(x, factor1, factor2): return x * factor1 / ((1-x) * factor2 + x * factor1)
Converts mixing ratio x in c1 - c2 tie line to that in comp1 - comp2 tie line. Args: x (float): Mixing ratio x in c1 - c2 tie line, a float between 0 and 1. factor1 (float): Compositional ratio between composition c1 and processed composition comp1. E.g., factor for Composition('SiO2') and Composition('O') is 2. factor2 (float): Compositional ratio between composition c2 and processed composition comp2. Returns: Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1.
def do_placeholder(parser, token): name, params = parse_placeholder(parser, token) return PlaceholderNode(name, **params)
Method that parse the placeholder template tag. Syntax:: {% placeholder <name> [on <page>] [with <widget>] \ [parsed] [as <varname>] %} Example usage:: {% placeholder about %} {% placeholder body with TextArea as body_text %} {% placeholder welcome with TextArea parsed as welcome_text %} {% placeholder teaser on next_page with TextArea parsed %}
def export(datastore_key, calc_id=-1, exports='csv', export_dir='.'): dstore = util.read(calc_id) parent_id = dstore['oqparam'].hazard_calculation_id if parent_id: dstore.parent = util.read(parent_id) dstore.export_dir = export_dir with performance.Monitor('export', measuremem=True) as mon: for fmt in exports.split(','): fnames = export_((datastore_key, fmt), dstore) nbytes = sum(os.path.getsize(f) for f in fnames) print('Exported %s in %s' % (general.humansize(nbytes), fnames)) if mon.duration > 1: print(mon) dstore.close()
Export an output from the datastore.
def get_oauth_request(self): try: method = os.environ['REQUEST_METHOD'] except: method = 'GET' postdata = None if method in ('POST', 'PUT'): postdata = self.request.body return oauth.Request.from_request(method, self.request.uri, headers=self.request.headers, query_string=postdata)
Return an OAuth Request object for the current request.
def plugins(self): if self._plugins is None: self._plugins = {} for _, plugin in self.load_extensions('iotile.plugin'): links = plugin() for name, value in links: self._plugins[name] = value return self._plugins
Lazily load iotile plugins only on demand. This is a slow operation on computers with a slow FS and is rarely accessed information, so only compute it when it is actually asked for.
def yield_sorted_by_type(*typelist): def decorate(fun): @wraps(fun) def decorated(*args, **kwds): return iterate_by_type(fun(*args, **kwds), typelist) return decorated return decorate
a useful decorator for the collect_impl method of SuperChange subclasses. Caches the yielded changes, and re-emits them collected by their type. The order of the types can be specified by listing the types as arguments to this decorator. Unlisted types will be yielded last in no guaranteed order. Grouping happens by exact type match only. Inheritance is not taken into consideration for grouping.
def dry_run_scan(self, scan_id, targets): os.setsid() for _, target in enumerate(targets): host = resolve_hostname(target[0]) if host is None: logger.info("Couldn't resolve %s.", target[0]) continue port = self.get_scan_ports(scan_id, target=target[0]) logger.info("%s:%s: Dry run mode.", host, port) self.add_scan_log(scan_id, name='', host=host, value='Dry run result') self.finish_scan(scan_id)
Dry runs a scan.
def get_attribute_selected(self, attribute): items_list = self.get_options() return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None)
Performs search of selected item from Web List Return attribute of selected item @params attribute - string attribute name
def get_dm_online(self): if not requests: return False try: req = requests.get("https://earthref.org/MagIC/data-models/3.0.json", timeout=3) if not req.ok: return False return req except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout): return False
Use requests module to get data model from Earthref. If this fails or times out, return false. Returns --------- result : requests.models.Response, False if unsuccessful
def get_content_dict(vocabularies, content_vocab): if vocabularies.get(content_vocab, None) is None: raise UNTLFormException( 'Could not retrieve content vocabulary "%s" for the form.' % (content_vocab) ) else: return vocabularies.get(content_vocab)
Get the content dictionary based on the element's content vocabulary.
def get_edited(self, subreddit='mod', *args, **kwargs): url = self.config['edited'].format(subreddit=six.text_type(subreddit)) return self.get_content(url, *args, **kwargs)
Return a get_content generator of edited items. :param subreddit: Either a Subreddit object or the name of the subreddit to return the edited items for. Defaults to `mod` which includes items for all the subreddits you moderate. The additional parameters are passed directly into :meth:`.get_content`. Note: the `url` parameter cannot be altered.
def flush(self): self.acquire() try: self.stream.flush() except (EnvironmentError, ValueError): pass finally: self.release()
Flushes all log files.
def listdir_matches(match): import os last_slash = match.rfind('/') if last_slash == -1: dirname = '.' match_prefix = match result_prefix = '' else: match_prefix = match[last_slash + 1:] if last_slash == 0: dirname = '/' result_prefix = '/' else: dirname = match[0:last_slash] result_prefix = dirname + '/' def add_suffix_if_dir(filename): try: if (os.stat(filename)[0] & 0x4000) != 0: return filename + '/' except FileNotFoundError: pass return filename matches = [add_suffix_if_dir(result_prefix + filename) for filename in os.listdir(dirname) if filename.startswith(match_prefix)] return matches
Returns a list of filenames contained in the named directory. Only filenames which start with `match` will be returned. Directories will have a trailing slash.
def loadTextureD3D11_Async(self, textureId, pD3D11Device): fn = self.function_table.loadTextureD3D11_Async ppD3D11Texture2D = c_void_p() result = fn(textureId, pD3D11Device, byref(ppD3D11Texture2D)) return result, ppD3D11Texture2D.value
Creates a D3D11 texture and loads data into it.
def OECDas(self, to='name_short'): if isinstance(to, str): to = [to] return self.data[self.data.OECD > 0][to]
Return OECD member states in the specified classification Parameters ---------- to : str, optional Output classification (valid str for an index of country_data file), default: name_short Returns ------- Pandas DataFrame
def add_field(self, key, field): if key in self._fields: raise PayloadFieldAlreadyDefinedError( 'Key {key} is already set on this payload. The existing field was {existing_field}.' ' Tried to set new field {field}.' .format(key=key, existing_field=self._fields[key], field=field)) elif self._frozen: raise PayloadFrozenError( 'Payload is frozen, field with key {key} cannot be added to it.' .format(key=key)) else: self._fields[key] = field self._fingerprint_memo = None
Add a field to the Payload. :API: public :param string key: The key for the field. Fields can be accessed using attribute access as well as `get_field` using `key`. :param PayloadField field: A PayloadField instance. None is an allowable value for `field`, in which case it will be skipped during hashing.
async def _get_person_json(self, id_, url_params=None): url = self.url_builder( 'person/{person_id}', dict(person_id=id_), url_params=url_params or OrderedDict(), ) data = await self.get_data(url) return data
Retrieve raw person JSON by ID. Arguments: id_ (:py:class:`int`): The person's TMDb ID. url_params (:py:class:`dict`): Any additional URL parameters. Returns: :py:class:`dict`: The JSON data.
def container(self, name, length, type, *parameters): self.new_struct('Container', name, 'length=%s' % length) BuiltIn().run_keyword(type, *parameters) self.end_struct()
Define a container with given length. This is a convenience method creating a `Struct` with `length` containing fields defined in `type`.
def request(self, requests): logging.info('Request resources from Mesos') return self.driver.requestResources(map(encode, requests))
Requests resources from Mesos. (see mesos.proto for a description of Request and how, for example, to request resources from specific slaves.) Any resources available are offered to the framework via Scheduler.resourceOffers callback, asynchronously.
def ship_move(ship, x, y, speed): click.echo('Moving ship %s to %s,%s with speed %s' % (ship, x, y, speed))
Moves SHIP to the new location X,Y.
def check_alert(self, text): try: alert = Alert(world.browser) if alert.text != text: raise AssertionError( "Alert text expected to be {!r}, got {!r}.".format( text, alert.text)) except WebDriverException: pass
Assert an alert is showing with the given text.
def collect_metrics(): def _register(action): handler = Handler.get(action) handler.add_predicate(partial(_restricted_hook, 'collect-metrics')) return action return _register
Register the decorated function to run for the collect_metrics hook.
def verify_signature(self, signature_filename, data_filename, keystore=None): if not self.gpg: raise DistlibException('verification unavailable because gpg ' 'unavailable') cmd = self.get_verify_command(signature_filename, data_filename, keystore) rc, stdout, stderr = self.run_command(cmd) if rc not in (0, 1): raise DistlibException('verify command failed with error ' 'code %s' % rc) return rc == 0
Verify a signature for a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: True if the signature was verified, else False.
def write(entries): try: with open(get_rc_path(), 'w') as rc: rc.writelines(entries) except IOError: print('Error writing your ~/.vacationrc file!')
Write an entire rc file.
def update(self, resource, timeout=-1): self.__set_default_values(resource) uri = self._client.build_uri(resource['logicalSwitch']['uri']) return self._client.update(resource, uri=uri, timeout=timeout)
Updates a Logical Switch. Args: resource (dict): Object to update. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Updated resource.
def main() -> None: _ = testdata = [ "hello", 1, ["bongos", "today"], ] for data in testdata: compare_python_to_reference_murmur3_32(data, seed=0) compare_python_to_reference_murmur3_64(data, seed=0) print("All OK")
Command-line validation checks.
def _expand_subsystems(self, scope_infos): def subsys_deps(subsystem_client_cls): for dep in subsystem_client_cls.subsystem_dependencies_iter(): if dep.scope != GLOBAL_SCOPE: yield self._scope_to_info[dep.options_scope] for x in subsys_deps(dep.subsystem_cls): yield x for scope_info in scope_infos: yield scope_info if scope_info.optionable_cls is not None: if issubclass(scope_info.optionable_cls, GlobalOptionsRegistrar): for scope, info in self._scope_to_info.items(): if info.category == ScopeInfo.SUBSYSTEM and enclosing_scope(scope) == GLOBAL_SCOPE: yield info for subsys_dep in subsys_deps(info.optionable_cls): yield subsys_dep elif issubclass(scope_info.optionable_cls, SubsystemClientMixin): for subsys_dep in subsys_deps(scope_info.optionable_cls): yield subsys_dep
Add all subsystems tied to a scope, right after that scope.
def message(self): if self.type == 'cleartext': return self.bytes_to_text(self._message) if self.type == 'literal': return self._message.contents if self.type == 'encrypted': return self._message
The message contents
def _get_id(self, id_, pkg_name): collection = JSONClientValidated('id', collection=pkg_name + 'Ids', runtime=self._runtime) try: result = collection.find_one({'aliasIds': {'$in': [str(id_)]}}) except errors.NotFound: return id_ else: return Id(result['_id'])
Returns the primary id given an alias. If the id provided is not in the alias table, it will simply be returned as is. Only looks within the Id Alias namespace for the session package
def handle_api_exception(error): _mp_track( type="exception", status_code=error.status_code, message=error.message, ) response = jsonify(dict( message=error.message )) response.status_code = error.status_code return response
Converts an API exception into an error response.
def np2str(value): if hasattr(value, 'dtype') and \ issubclass(value.dtype.type, (np.string_, np.object_)) and value.size == 1: value = np.asscalar(value) if not isinstance(value, str): value = value.decode() return value else: raise ValueError("Array is not a string type or is larger than 1")
Convert an `numpy.string_` to str. Args: value (ndarray): scalar or 1-element numpy array to convert Raises: ValueError: if value is array larger than 1-element or it is not of type `numpy.string_` or it is not a numpy array
def DeregisterAnalyzer(cls, analyzer_class): analyzer_name = analyzer_class.NAME.lower() if analyzer_name not in cls._analyzer_classes: raise KeyError('analyzer class not set for name: {0:s}'.format( analyzer_class.NAME)) del cls._analyzer_classes[analyzer_name]
Deregisters a analyzer class. The analyzer classes are identified based on their lower case name. Args: analyzer_class (type): class object of the analyzer. Raises: KeyError: if analyzer class is not set for the corresponding name.
def add_callback(self, fn, *args, **kwargs): if not callable(fn): raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn))) self._callbacks.append((fn, args, kwargs))
Add a function and arguments to be passed to it to be executed after the batch executes. A batch can support multiple callbacks. Note, that if the batch does not execute, the callbacks are not executed. A callback, thus, is an "on batch success" handler. :param fn: Callable object :type fn: callable :param \*args: Positional arguments to be passed to the callback at the time of execution :param \*\*kwargs: Named arguments to be passed to the callback at the time of execution
def get_snapshot_policy(self, name, view=None): return self._get("snapshots/policies/%s" % name, ApiSnapshotPolicy, params=view and dict(view=view) or None, api_version=6)
Retrieve a single snapshot policy. @param name: The name of the snapshot policy to retrieve. @param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'. @return: The requested snapshot policy. @since: API v6
def get_vectors_loss(ops, docs, prediction, objective="L2"): ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs]) target = docs[0].vocab.vectors.data[ids] if objective == "L2": d_target = prediction - target loss = (d_target ** 2).sum() elif objective == "cosine": loss, d_target = get_cossim_loss(prediction, target) return loss, d_target
Compute a mean-squared error loss between the documents' vectors and the prediction. Note that this is ripe for customization! We could compute the vectors in some other word, e.g. with an LSTM language model, or use some other type of objective.
def create_game( self, map_name, bot_difficulty=sc_pb.VeryEasy, bot_race=sc_common.Random, bot_first=False): self._controller.ping() map_inst = maps.get(map_name) map_data = map_inst.data(self._run_config) if map_name not in self._saved_maps: self._controller.save_map(map_inst.path, map_data) self._saved_maps.add(map_name) create = sc_pb.RequestCreateGame( local_map=sc_pb.LocalMap(map_path=map_inst.path, map_data=map_data), disable_fog=False) if not bot_first: create.player_setup.add(type=sc_pb.Participant) create.player_setup.add( type=sc_pb.Computer, race=bot_race, difficulty=bot_difficulty) if bot_first: create.player_setup.add(type=sc_pb.Participant) self._controller.create_game(create)
Create a game, one remote agent vs the specified bot. Args: map_name: The map to use. bot_difficulty: The difficulty of the bot to play against. bot_race: The race for the bot. bot_first: Whether the bot should be player 1 (else is player 2).
def filter(self, *args, **kwargs): if args or kwargs: self.q_filters = Q(self.q_filters & Q(*args, **kwargs)) return self
Apply filters to the existing nodes in the set. :param kwargs: filter parameters Filters mimic Django's syntax with the double '__' to separate field and operators. e.g `.filter(salary__gt=20000)` results in `salary > 20000`. The following operators are available: * 'lt': less than * 'gt': greater than * 'lte': less than or equal to * 'gte': greater than or equal to * 'ne': not equal to * 'in': matches one of list (or tuple) * 'isnull': is null * 'regex': matches supplied regex (neo4j regex format) * 'exact': exactly match string (just '=') * 'iexact': case insensitive match string * 'contains': contains string * 'icontains': case insensitive contains * 'startswith': string starts with * 'istartswith': case insensitive string starts with * 'endswith': string ends with * 'iendswith': case insensitive string ends with :return: self
def get_connection_params(self): return { 'uri': self.settings_dict['NAME'], 'tls': self.settings_dict.get('TLS', False), 'bind_dn': self.settings_dict['USER'], 'bind_pw': self.settings_dict['PASSWORD'], 'retry_max': self.settings_dict.get('RETRY_MAX', 1), 'retry_delay': self.settings_dict.get('RETRY_DELAY', 60.0), 'options': { k if isinstance(k, int) else k.lower(): v for k, v in self.settings_dict.get('CONNECTION_OPTIONS', {}).items() }, }
Compute appropriate parameters for establishing a new connection. Computed at system startup.
def multi_pop(d, *args): retval = {} for key in args: if key in d: retval[key] = d.pop(key) return retval
pops multiple keys off a dict like object
def parse_input(self, text): parts = util.split(text) command = parts[0] if text and parts else None command = command.lower() if command else None args = parts[1:] if len(parts) > 1 else [] return (command, args)
Parse ctl user input. Double quotes are used to group together multi words arguments.
def get_json(self): try: usernotes = self.subreddit.wiki[self.page_name].content_md notes = json.loads(usernotes) except NotFound: self._init_notes() else: if notes['ver'] != self.schema: raise RuntimeError( 'Usernotes schema is v{0}, puni requires v{1}'. format(notes['ver'], self.schema) ) self.cached_json = self._expand_json(notes) return self.cached_json
Get the JSON stored on the usernotes wiki page. Returns a dict representation of the usernotes (with the notes BLOB decoded). Raises: RuntimeError if the usernotes version is incompatible with this version of puni.
def get_error_code_msg(cls, full_error_message): for pattern in cls.ERROR_PATTERNS: match = pattern.match(full_error_message) if match: return int(match.group('code')), match.group('msg').strip() return 0, full_error_message
Extract the code and message of the exception that clickhouse-server generated. See the list of error codes here: https://github.com/yandex/ClickHouse/blob/master/dbms/src/Common/ErrorCodes.cpp
def read_json(file_path): try: with open(file_path, 'r') as f: config = json_tricks.load(f) except ValueError: print(' '+'!'*58) print(' Woops! Looks the JSON syntax is not valid in:') print(' {}'.format(file_path)) print(' Note: commonly this is a result of having a trailing comma \n in the file') print(' '+'!'*58) raise return config
Read in a json file and return a dictionary representation
def set_encode_key_value(self, value, store_type=PUBLIC_KEY_STORE_TYPE_BASE64): if store_type == PUBLIC_KEY_STORE_TYPE_PEM: PublicKeyBase.set_encode_key_value(self, value.exportKey('PEM').decode(), store_type) else: PublicKeyBase.set_encode_key_value(self, value.exportKey('DER'), store_type)
Set the value based on the type of encoding supported by RSA.
def init_body_buffer(self, method, headers): content_length = headers.get("CONTENT-LENGTH", None) if method in (HTTPMethod.POST, HTTPMethod.PUT): if content_length is None: raise HTTPErrorBadRequest("HTTP Method requires a CONTENT-LENGTH header") self.content_length = int(content_length) self.body_buffer = bytearray(0) elif content_length is not None: raise HTTPErrorBadRequest( "HTTP method %s may NOT have a CONTENT-LENGTH header" )
Sets up the body_buffer and content_length attributes based on method and headers.
def to_cloudformation(self, **kwargs): resources = [] function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if self.Method is not None: self.Method = self.Method.lower() resources.extend(self._get_permissions(kwargs)) explicit_api = kwargs['explicit_api'] if explicit_api.get("__MANAGE_SWAGGER"): self._add_swagger_integration(explicit_api, function) return resources
If the Api event source has a RestApi property, then simply return the Lambda Permission resource allowing API Gateway to call the function. If no RestApi is provided, then additionally inject the path, method, and the x-amazon-apigateway-integration into the Swagger body for a provided implicit API. :param dict kwargs: a dict containing the implicit RestApi to be modified, should no explicit RestApi \ be provided. :returns: a list of vanilla CloudFormation Resources, to which this Api event expands :rtype: list
def read(self): try: f = urllib.request.urlopen(self.url) except urllib.error.HTTPError as err: if err.code in (401, 403): self.disallow_all = True elif err.code >= 400: self.allow_all = True else: raw = f.read() self.parse(raw.decode("utf-8").splitlines())
Reads the robots.txt URL and feeds it to the parser.
def predict(self, dataset, new_observation_data=None, new_user_data=None, new_item_data=None): if new_observation_data is None: new_observation_data = _SFrame() if new_user_data is None: new_user_data = _SFrame() if new_item_data is None: new_item_data = _SFrame() dataset = self.__prepare_dataset_parameter(dataset) def check_type(arg, arg_name, required_type, allowed_types): if not isinstance(arg, required_type): raise TypeError("Parameter " + arg_name + " must be of type(s) " + (", ".join(allowed_types)) + "; Type '" + str(type(arg)) + "' not recognized.") check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"]) check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"]) check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"]) response = self.__proxy__.predict(dataset, new_user_data, new_item_data) return response['prediction']
Return a score prediction for the user ids and item ids in the provided data set. Parameters ---------- dataset : SFrame Dataset in the same form used for training. new_observation_data : SFrame, optional ``new_observation_data`` gives additional observation data to the model, which may be used by the models to improve score accuracy. Must be in the same format as the observation data passed to ``create``. How this data is used varies by model. new_user_data : SFrame, optional ``new_user_data`` may give additional user data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the user data passed to ``create``. new_item_data : SFrame, optional ``new_item_data`` may give additional item data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the item data passed to ``create``. Returns ------- out : SArray An SArray with predicted scores for each given observation predicted by the model. See Also -------- recommend, evaluate
def get_log_entry_log_session(self, proxy): if not self.supports_log_entry_log(): raise errors.Unimplemented() return sessions.LogEntryLogSession(proxy=proxy, runtime=self._runtime)
Gets the session for retrieving log entry to log mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.logging.LogEntryLogSession) - a ``LogEntryLogSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_log_entry_log()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_log_entry_log()`` is ``true``.*
def get_property(self, property_key: str) -> str: self._check_object_exists() return DB.get_hash_value(self.key, property_key)
Get a scheduling object property.
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): self.name = name self.fileExtension = extension with open(path, 'r') as f: self.text = f.read()
Generic File Read from File Method
def to_frequencies(self, fill=np.nan): an = np.sum(self, axis=1)[:, None] with ignore_invalid(): af = np.where(an > 0, self / an, fill) return af
Compute allele frequencies. Parameters ---------- fill : float, optional Value to use when number of allele calls is 0. Returns ------- af : ndarray, float, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.to_frequencies() array([[0.75, 0.25, 0. ], [0.25, 0.5 , 0.25], [0. , 0. , 1. ]])
def LessThan(self, value): self._awql = self._CreateSingleValueCondition(value, '<') return self._query_builder
Sets the type of the WHERE clause as "less than". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
def set_output(self, outfile): if self._orig_stdout: sys.stdout = self._orig_stdout self._stream = outfile sys.stdout = _LineWriter(self, self._stream, self.default)
Set's the output file, currently only useful with context-managers. Note: This function is experimental and may not last.
def register_scope(self, scope): if not isinstance(scope, Scope): raise TypeError("Invalid scope type.") assert scope.id not in self.scopes self.scopes[scope.id] = scope
Register a scope. :param scope: A :class:`invenio_oauth2server.models.Scope` instance.
def spkuds(descr): assert len(descr) is 5 descr = stypes.toDoubleVector(descr) body = ctypes.c_int() center = ctypes.c_int() framenum = ctypes.c_int() typenum = ctypes.c_int() first = ctypes.c_double() last = ctypes.c_double() begin = ctypes.c_int() end = ctypes.c_int() libspice.spkuds_c(descr, ctypes.byref(body), ctypes.byref(center), ctypes.byref(framenum), ctypes.byref(typenum), ctypes.byref(first), ctypes.byref(last), ctypes.byref(begin), ctypes.byref(end)) return body.value, center.value, framenum.value, typenum.value, \ first.value, last.value, begin.value, end.value
Unpack the contents of an SPK segment descriptor. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkuds_c.html :param descr: An SPK segment descriptor. :type descr: 5-Element Array of floats :return: The NAIF ID code for the body of the segment, The center of motion for body, The ID code for the frame of this segment, The type of SPK segment, The first epoch for which the segment is valid, The last epoch for which the segment is valid, Beginning DAF address of the segment, Ending DAF address of the segment. :rtype: tuple
def add_nio(self, nio, port_number): if port_number in self._nios: raise DynamipsError("Port {} isn't free".format(port_number)) yield from self._hypervisor.send('ethsw add_nio "{name}" {nio}'.format(name=self._name, nio=nio)) log.info('Ethernet switch "{name}" [{id}]: NIO {nio} bound to port {port}'.format(name=self._name, id=self._id, nio=nio, port=port_number)) self._nios[port_number] = nio for port_settings in self._ports: if port_settings["port_number"] == port_number: yield from self.set_port_settings(port_number, port_settings) break
Adds a NIO as new port on Ethernet switch. :param nio: NIO instance to add :param port_number: port to allocate for the NIO
def monkey_patch(): reset() time_mod.time = time time_mod.sleep = sleep time_mod.gmtime = gmtime time_mod.localtime = localtime time_mod.ctime = ctime time_mod.asctime = asctime time_mod.strftime = strftime
monkey patch `time` module to use out versions
def get_function_for_aws_event(self, record): if 's3' in record: if ':' in record['s3']['configurationId']: return record['s3']['configurationId'].split(':')[-1] arn = None if 'Sns' in record: try: message = json.loads(record['Sns']['Message']) if message.get('command'): return message['command'] except ValueError: pass arn = record['Sns'].get('TopicArn') elif 'dynamodb' in record or 'kinesis' in record: arn = record.get('eventSourceARN') elif 'eventSource' in record and record.get('eventSource') == 'aws:sqs': arn = record.get('eventSourceARN') elif 's3' in record: arn = record['s3']['bucket']['arn'] if arn: return self.settings.AWS_EVENT_MAPPING.get(arn) return None
Get the associated function to execute for a triggered AWS event Support S3, SNS, DynamoDB, kinesis and SQS events
def run_script(pycode): if pycode[0] == "\n": pycode = pycode[1:] pycode.rstrip() pycode = textwrap.dedent(pycode) globs = {} six.exec_(pycode, globs, globs) return globs
Run the Python in `pycode`, and return a dict of the resulting globals.
def embedding(self, sentences, oov_way='avg'): data_iter = self.data_loader(sentences=sentences) batches = [] for token_ids, valid_length, token_types in data_iter: token_ids = token_ids.as_in_context(self.ctx) valid_length = valid_length.as_in_context(self.ctx) token_types = token_types.as_in_context(self.ctx) sequence_outputs = self.bert(token_ids, token_types, valid_length.astype(self.dtype)) for token_id, sequence_output in zip(token_ids.asnumpy(), sequence_outputs.asnumpy()): batches.append((token_id, sequence_output)) return self.oov(batches, oov_way)
Get tokens, tokens embedding Parameters ---------- sentences : List[str] sentences for encoding. oov_way : str, default avg. use **avg**, **sum** or **last** to get token embedding for those out of vocabulary words Returns ------- List[(List[str], List[ndarray])] List of tokens, and tokens embedding
def extract(self, name): if type(name) == type(''): ndx = self.toc.find(name) if ndx == -1: return None else: ndx = name (dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx) self.lib.seek(self.pkgstart+dpos) rslt = self.lib.read(dlen) if flag == 2: global AES import AES key = rslt[:32] rslt = AES.new(key, AES.MODE_CFB, "\0"*AES.block_size).decrypt(rslt[32:]) if flag == 1 or flag == 2: rslt = zlib.decompress(rslt) if typcd == 'M': return (1, rslt) return (0, rslt)
Get the contents of an entry. NAME is an entry name. Return the tuple (ispkg, contents). For non-Python resoures, ispkg is meaningless (and 0). Used by the import mechanism.
def isDone(self): done = pydaq.bool32() self.IsTaskDone(ctypes.byref(done)) return done.value
Returns true if task is done.
def rh45(msg): d = hex2bin(data(msg)) if d[38] == '0': return None rh = bin2int(d[39:51]) * 16 return rh
Radio height. Args: msg (String): 28 bytes hexadecimal message string Returns: int: radio height in ft
def _original_path(self, path): def components_to_path(): if len(path_components) > len(normalized_components): normalized_components.extend( path_components[len(normalized_components):]) sep = self._path_separator(path) normalized_path = sep.join(normalized_components) if path.startswith(sep) and not normalized_path.startswith(sep): normalized_path = sep + normalized_path return normalized_path if self.is_case_sensitive or not path: return path path_components = self._path_components(path) normalized_components = [] current_dir = self.root for component in path_components: if not isinstance(current_dir, FakeDirectory): return components_to_path() dir_name, current_dir = self._directory_content( current_dir, component) if current_dir is None or ( isinstance(current_dir, FakeDirectory) and current_dir._byte_contents is None and current_dir.st_size == 0): return components_to_path() normalized_components.append(dir_name) return components_to_path()
Return a normalized case version of the given path for case-insensitive file systems. For case-sensitive file systems, return path unchanged. Args: path: the file path to be transformed Returns: A version of path matching the case of existing path elements.
def build(self, stmts=None, set_check_var=True, invert=False): out = "" if set_check_var: out += self.check_var + " = False\n" out += self.out() if stmts is not None: out += "if " + ("not " if invert else "") + self.check_var + ":" + "\n" + openindent + "".join(stmts) + closeindent return out
Construct code for performing the match then executing stmts.
def copy(self): new = type(self)(str(self)) new._init_from_channel(self) return new
Returns a copy of this channel
def canonical_stylename(font): from fontbakery.constants import (STATIC_STYLE_NAMES, VARFONT_SUFFIXES) from fontbakery.profiles.shared_conditions import is_variable_font from fontTools.ttLib import TTFont valid_style_suffixes = [name.replace(' ', '') for name in STATIC_STYLE_NAMES] filename = os.path.basename(font) basename = os.path.splitext(filename)[0] s = suffix(font) varfont = os.path.exists(font) and is_variable_font(TTFont(font)) if ('-' in basename and (s in VARFONT_SUFFIXES and varfont) or (s in valid_style_suffixes and not varfont)): return s
Returns the canonical stylename of a given font.
def process_parameters(parameters): if not parameters: return {} params = copy.copy(parameters) for param_name in parameters: value = parameters[param_name] server_param_name = re.sub(r'_(\w)', lambda m: m.group(1).upper(), param_name) if isinstance(value, dict): value = process_parameters(value) params[server_param_name] = value if server_param_name != param_name: del params[param_name] return params
Allows the use of Pythonic-style parameters with underscores instead of camel-case. :param parameters: The parameters object. :type parameters: dict :return: The processed parameters. :rtype: dict
def fill_n_todo(self): left = self.left right = self.right top = self.top bottom = self.bottom for i in xrange(self.n_chunks): self.n_todo.ravel()[i] = np.sum([left.ravel()[i].n_todo, right.ravel()[i].n_todo, top.ravel()[i].n_todo, bottom.ravel()[i].n_todo])
Calculate and record the number of edge pixels left to do on each tile
def primary_key_field(self): return [field for field in self.instance._meta.fields if field.primary_key][0]
Return the primary key field. Is `id` in most cases. Is `history_id` for Historical models.
def available_domains(self): if not hasattr(self, '_available_domains'): url = 'http://{0}/request/domains/format/json/'.format( self.api_domain) req = requests.get(url) domains = req.json() setattr(self, '_available_domains', domains) return self._available_domains
Return list of available domains for use in email address.
async def update(self, fields=''): path = 'Users/{{UserId}}/Items/{}'.format(self.id) info = await self.connector.getJson(path, remote=False, Fields='Path,Overview,'+fields ) self.object_dict.update(info) self.extras = {} return self
reload object info from emby |coro| Parameters ---------- fields : str additional fields to request when updating See Also -------- refresh : same thing send : post :
def guess_project_dir(): projname = settings.SETTINGS_MODULE.split(".",1)[0] projmod = import_module(projname) projdir = os.path.dirname(projmod.__file__) if os.path.isfile(os.path.join(projdir,"manage.py")): return projdir projdir = os.path.abspath(os.path.join(projdir, os.path.pardir)) if os.path.isfile(os.path.join(projdir,"manage.py")): return projdir msg = "Unable to determine the Django project directory;"\ " use --project-dir to specify it" raise RuntimeError(msg)
Find the top-level Django project directory. This function guesses the top-level Django project directory based on the current environment. It looks for module containing the currently- active settings module, in both pre-1.4 and post-1.4 layours.
def collect(self): from dvc.scm import SCM from dvc.utils import is_binary from dvc.repo import Repo from dvc.exceptions import NotDvcRepoError self.info[self.PARAM_DVC_VERSION] = __version__ self.info[self.PARAM_IS_BINARY] = is_binary() self.info[self.PARAM_USER_ID] = self._get_user_id() self.info[self.PARAM_SYSTEM_INFO] = self._collect_system_info() try: scm = SCM(root_dir=Repo.find_root()) self.info[self.PARAM_SCM_CLASS] = type(scm).__name__ except NotDvcRepoError: pass
Collect analytics report.
def remote_delete(self, remote_path, r_st): if S_ISDIR(r_st.st_mode): for item in self.sftp.listdir_attr(remote_path): full_path = path_join(remote_path, item.filename) self.remote_delete(full_path, item) self.sftp.rmdir(remote_path) else: try: self.sftp.remove(remote_path) except FileNotFoundError as e: self.logger.error( "error while removing {}. trace: {}".format(remote_path, e) )
Remove the remote directory node.
def _generate(self): part = creator.Particle( [random.uniform(-1, 1) for _ in range(len(self.value_means))]) part.speed = [ random.uniform(-self.max_speed, self.max_speed) for _ in range(len(self.value_means))] part.smin = -self.max_speed part.smax = self.max_speed part.ident = None part.neighbours = None return part
Generates a particle using the creator function. Notes ----- Position and speed are uniformly randomly seeded within allowed bounds. The particle also has speed limit settings taken from global values. Returns ------- part : particle object A particle used during optimisation.
def _process_assignments(self, anexec, contents, mode="insert"): for assign in self.RE_ASSIGN.finditer(contents): assignee = assign.group("assignee").strip() target = re.split(r"[(%\s]", assignee)[0].lower() if target in self._intrinsic: continue if target in anexec.members or \ target in anexec.parameters or \ (isinstance(anexec, Function) and target.lower() == anexec.name.lower()): if mode == "insert": anexec.add_assignment(re.split(r"[(\s]", assignee)[0]) elif mode == "delete": try: index = element.assignments.index(assign) del element.assignments[index] except ValueError: pass
Extracts all variable assignments from the body of the executable. :arg mode: for real-time update; either 'insert', 'delete' or 'replace'.
def delete(self, doc_id: str) -> bool: try: self.instance.delete(self.index, self.doc_type, doc_id) except RequestError as ex: logging.error(ex) return False else: return True
Delete a document with id.
def parse_game_event(self, ge): if ge.name == "dota_combatlog": if ge.keys["type"] == 4: try: source = self.dp.combat_log_names.get(ge.keys["sourcename"], "unknown") target = self.dp.combat_log_names.get(ge.keys["targetname"], "unknown") target_illusion = ge.keys["targetillusion"] timestamp = ge.keys["timestamp"] if (target.startswith("npc_dota_hero") and not target_illusion): self.kills.append({ "target": target, "source": source, "timestamp": timestamp, "tick": self.tick, }) elif source.startswith("npc_dota_hero"): self.heroes[source].creep_kill(target, timestamp) except KeyError: pass
Game events contain the combat log as well as 'chase_hero' events which could be interesting
async def stop(self, _task=None): self._logger.info("Stopping adapter wrapper") if self._task.stopped: return for task in self._task.subtasks: await task.stop() self._logger.debug("Stopping underlying adapter %s", self._adapter.__class__.__name__) await self._execute(self._adapter.stop_sync)
Stop the device adapter. See :meth:`AbstractDeviceAdapter.stop`.
def join(self, distbase, location): sep = '' if distbase and distbase[-1] not in (':', '/'): sep = '/' return distbase + sep + location
Join 'distbase' and 'location' in such way that the result is a valid scp destination.
def Transfer(self, wallet, from_addr, to_addr, amount, tx_attributes=None): if not tx_attributes: tx_attributes = [] sb = ScriptBuilder() sb.EmitAppCallWithOperationAndArgs(self.ScriptHash, 'transfer', [PromptUtils.parse_param(from_addr, wallet), PromptUtils.parse_param(to_addr, wallet), PromptUtils.parse_param(amount)]) tx, fee, results, num_ops, engine_success = test_invoke(sb.ToArray(), wallet, [], from_addr=from_addr, invoke_attrs=tx_attributes) return tx, fee, results
Transfer a specified amount of the NEP5Token to another address. Args: wallet (neo.Wallets.Wallet): a wallet instance. from_addr (str): public address of the account to transfer the given amount from. to_addr (str): public address of the account to transfer the given amount to. amount (int): quantity to send. tx_attributes (list): a list of TransactionAtribute objects. Returns: tuple: InvocationTransaction: the transaction. int: the transaction fee. list: the neo VM evaluationstack results.
def has_ext(path_name, *, multiple=None, if_all_ext=False): base = os.path.basename(path_name) count = base.count(EXT) if not if_all_ext and base[0] == EXT and count != 0: count -= 1 if multiple is None: return count >= 1 elif multiple: return count > 1 else: return count == 1
Determine if the given path name has an extension
def sample_binned(self, wavelengths=None, flux_unit=None, **kwargs): x = self._validate_binned_wavelengths(wavelengths) i = np.searchsorted(self.binset, x) if not np.allclose(self.binset[i].value, x.value): raise exceptions.InterpolationNotAllowed( 'Some or all wavelength values are not in binset.') y = self.binflux[i] if flux_unit is None: flux = y else: flux = units.convert_flux(x, y, flux_unit, **kwargs) return flux
Sample binned observation without interpolation. To sample unbinned data, use ``__call__``. Parameters ---------- wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None` Wavelength values for sampling. If not a Quantity, assumed to be in Angstrom. If `None`, `binset` is used. flux_unit : str or `~astropy.units.core.Unit` or `None` Flux is converted to this unit. If not given, internal unit is used. kwargs : dict Keywords acceptable by :func:`~synphot.units.convert_flux`. Returns ------- flux : `~astropy.units.quantity.Quantity` Binned flux in given unit. Raises ------ synphot.exceptions.InterpolationNotAllowed Interpolation of binned data is not allowed.
def docs_client(self): if not hasattr(self, '_docs_client'): client = DocsClient() client.ClientLogin(self.google_user, self.google_password, SOURCE_NAME) self._docs_client = client return self._docs_client
A DocsClient singleton, used to look up spreadsheets by name.
def unpack_rpc_response(status, response=None, rpc_id=0, address=0): status_code = status & ((1 << 6) - 1) if address == 8: status_code &= ~(1 << 7) if status == 0: raise BusyRPCResponse() elif status == 2: raise RPCNotFoundError("rpc %d:%04X not found" % (address, rpc_id)) elif status == 3: raise RPCErrorCode(status_code) elif status == 0xFF: raise TileNotFoundError("tile %d not found" % address) elif status_code != 0: raise RPCErrorCode(status_code) if response is None: response = b'' return response
Unpack an RPC status back in to payload or exception.
def render( self, tag, single, between, kwargs ): out = "<%s" % tag for key, value in list( kwargs.items( ) ): if value is not None: key = key.strip('_') if key == 'http_equiv': key = 'http-equiv' elif key == 'accept_charset': key = 'accept-charset' out = "%s %s=\"%s\"" % ( out, key, escape( value ) ) else: out = "%s %s" % ( out, key ) if between is not None: out = "%s>%s</%s>" % ( out, between, tag ) else: if single: out = "%s />" % out else: out = "%s>" % out if self.parent is not None: self.parent.content.append( out ) else: return out
Append the actual tags to content.