code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def usn_v4_record(header, record): length, major_version, minor_version = header fields = V4_RECORD.unpack_from(record, RECORD_HEADER.size) raise NotImplementedError('Not implemented')
Extracts USN V4 record information.
def urlencode(txt): if isinstance(txt, unicode): txt = txt.encode('utf-8') return urllib.quote_plus(txt)
Url encode a path.
def saveComicStrip(self, strip): allskipped = True for image in strip.getImages(): try: if self.options.dry_run: filename, saved = "", False else: filename, saved = image.save(self.options.basepath) if saved: allskipped = False if self.stopped: break except Exception as msg: out.exception('Could not save image at %s to %s: %r' % (image.referrer, image.filename, msg)) self.errors += 1 return allskipped
Save a comic strip which can consist of multiple images.
def _get_ssl_sock(self): assert self.scheme == u"https", self raw_connection = self.url_connection.raw._connection if raw_connection.sock is None: raw_connection.connect() return raw_connection.sock
Get raw SSL socket.
def _get_formatting_template(self, number_pattern, number_format): longest_phone_number = unicod("999999999999999") number_re = re.compile(number_pattern) m = number_re.search(longest_phone_number) a_phone_number = m.group(0) if len(a_phone_number) < len(self._national_number): return U_EMPTY_STRING template = re.sub(number_pattern, number_format, a_phone_number) template = re.sub("9", _DIGIT_PLACEHOLDER, template) return template
Gets a formatting template which can be used to efficiently format a partial number where digits are added one by one.
def _build_zmat(self, construction_table): c_table = construction_table default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] optional_cols = list(set(self.columns) - {'atom', 'x', 'y', 'z'}) zmat_frame = pd.DataFrame(columns=default_cols + optional_cols, dtype='float', index=c_table.index) zmat_frame.loc[:, optional_cols] = self.loc[c_table.index, optional_cols] zmat_frame.loc[:, 'atom'] = self.loc[c_table.index, 'atom'] zmat_frame.loc[:, ['b', 'a', 'd']] = c_table zmat_values = self._calculate_zmat_values(c_table) zmat_frame.loc[:, ['bond', 'angle', 'dihedral']] = zmat_values zmatrix = Zmat(zmat_frame, metadata=self.metadata, _metadata={'last_valid_cartesian': self.copy()}) return zmatrix
Create the Zmatrix from a construction table. Args: Construction table (pd.DataFrame): Returns: Zmat: A new instance of :class:`Zmat`.
def cached_property(func): name = func.__name__ doc = func.__doc__ def getter(self, name=name): try: return self.__dict__[name] except KeyError: self.__dict__[name] = value = func(self) return value getter.func_name = name return property(getter, doc=doc)
Special property decorator that caches the computed property value in the object's instance dict the first time it is accessed.
def ints(l, ifilter=lambda x: x, idescr=None): if isinstance(l, string_types): if l[0] == '[' and l[-1] == ']': l = l[1:-1] l = list(map(lambda x: x.strip(), l.split(','))) try: l = list(map(ifilter, list(map(int, l)))) except: raise ValueError("Bad list of {}integers" .format("" if idescr is None else idescr + " ")) return l
Parses a comma-separated list of ints.
def triangle_plots(self, basename=None, format='png', **kwargs): if self.fit_for_distance: fig1 = self.triangle(plot_datapoints=False, params=['mass','radius','Teff','logg','feh','age', 'distance','AV'], **kwargs) else: fig1 = self.triangle(plot_datapoints=False, params=['mass','radius','Teff','feh','age'], **kwargs) if basename is not None: plt.savefig('{}_physical.{}'.format(basename,format)) plt.close() fig2 = self.prop_triangle(**kwargs) if basename is not None: plt.savefig('{}_observed.{}'.format(basename,format)) plt.close() return fig1, fig2
Returns two triangle plots, one with physical params, one observational :param basename: If basename is provided, then plots will be saved as "[basename]_physical.[format]" and "[basename]_observed.[format]" :param format: Format in which to save figures (e.g., 'png' or 'pdf') :param **kwargs: Additional keyword arguments passed to :func:`StarModel.triangle` and :func:`StarModel.prop_triangle` :return: * Physical parameters triangle plot (mass, radius, Teff, feh, age, distance) * Observed properties triangle plot.
def get_descendants(self, include_self=False, depth=None): params = {"%s__parent" % self._closure_childref():self.pk} if depth is not None: params["%s__depth__lte" % self._closure_childref()] = depth descendants = self._toplevel().objects.filter(**params) if not include_self: descendants = descendants.exclude(pk=self.pk) return descendants.order_by("%s__depth" % self._closure_childref())
Return all the descendants of this object.
def _handle_wikilink_separator(self): self._context ^= contexts.WIKILINK_TITLE self._context |= contexts.WIKILINK_TEXT self._emit(tokens.WikilinkSeparator())
Handle the separator between a wikilink's title and its text.
def on_binlog(event, stream): rows, meta = _rows_event_to_dict(event, stream) table_name = '%s.%s' % (meta['schema'], meta['table']) if meta['action'] == 'insert': sig = signals.rows_inserted elif meta['action'] == 'update': sig = signals.rows_updated elif meta['action'] == 'delete': sig = signals.rows_deleted else: raise RuntimeError('Invalid action "%s"' % meta['action']) sig.send(table_name, rows=rows, meta=meta)
Process on a binlog event 1. Convert event instance into a dict 2. Send corresponding schema/table/signals Args: event (pymysqlreplication.row_event.RowsEvent): the event
def exception(self, *exceptions): def response(handler): for exception in exceptions: if isinstance(exception, (tuple, list)): for e in exception: self.error_handler.add(e, handler) else: self.error_handler.add(exception, handler) return handler return response
Decorate a function to be registered as a handler for exceptions :param exceptions: exceptions :return: decorated function
def dump(obj, file_path, prettify=False): with open(file_path, 'w') as fp: fp.write(dumps(obj))
Dumps a data structure to the filesystem as TOML. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module.
def detect_stream_mode(stream): if hasattr(stream, 'mode'): if 'b' in stream.mode: return bytes elif 't' in stream.mode: return str if hasattr(stream, 'read'): zeroStr = stream.read(0) if type(zeroStr) is str: return str return bytes elif hasattr(stream, 'recv'): zeroStr = stream.recv(0) if type(zeroStr) is str: return str return bytes return bytes
detect_stream_mode - Detect the mode on a given stream @param stream <object> - A stream object If "mode" is present, that will be used. @return <type> - "Bytes" type or "str" type
def counts(args): p = OptionParser(counts.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) vcffile, = args vcf_reader = vcf.Reader(open(vcffile)) for r in vcf_reader: v = CPRA(r) if not v.is_valid: continue for sample in r.samples: ro = sample["RO"] ao = sample["AO"] print("\t".join(str(x) for x in (v, ro, ao)))
%prog counts vcffile Collect allele counts from RO and AO fields.
def endpoint_get(auth=None, **kwargs): cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_endpoint(**kwargs)
Get a single endpoint CLI Example: .. code-block:: bash salt '*' keystoneng.endpoint_get id=02cffaa173b2460f98e40eda3748dae5
def set_time(time): time_format = _get_date_time_format(time) dt_obj = datetime.strptime(time, time_format) cmd = 'systemsetup -settime {0}'.format(dt_obj.strftime('%H:%M:%S')) return salt.utils.mac_utils.execute_return_success(cmd)
Sets the current time. Must be in 24 hour format. :param str time: The time to set in 24 hour format. The value must be double quoted. ie: '"17:46"' :return: True if successful, False if not :rtype: bool :raises: SaltInvocationError on Invalid Time format :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_time '"17:34"'
def user_delete(users, **kwargs): conn_args = _login(**kwargs) ret = {} try: if conn_args: method = 'user.delete' if not isinstance(users, list): params = [users] else: params = users ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result']['userids'] else: raise KeyError except KeyError: return ret
Delete zabbix users. .. versionadded:: 2016.3.0 :param users: array of users (userids) to delete :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: On success array with userids of deleted users. CLI Example: .. code-block:: bash salt '*' zabbix.user_delete 15
def zip_namedtuple(nt_list): if not nt_list: return dict() if not isinstance(nt_list, list): nt_list = [nt_list] for nt in nt_list: assert type(nt) == type(nt_list[0]) ret = {k : [v] for k, v in nt_list[0]._asdict().items()} for nt in nt_list[1:]: for k, v in nt._asdict().items(): ret[k].append(v) return ret
accept list of namedtuple, return a dict of zipped fields
def get_context_data(self, *args, **kwargs): context = super().get_context_data(**kwargs) context["is_plans_plural"] = Plan.objects.count() > 1 context["customer"], _created = Customer.get_or_create( subscriber=djstripe_settings.subscriber_request_callback(self.request) ) context["subscription"] = context["customer"].subscription return context
Inject is_plans_plural and customer into context_data.
def rotate_scale(im, angle, scale, borderValue=0, interp=cv2.INTER_CUBIC): im = np.asarray(im, dtype=np.float32) rows, cols = im.shape M = cv2.getRotationMatrix2D( (cols / 2, rows / 2), -angle * 180 / np.pi, 1 / scale) im = cv2.warpAffine(im, M, (cols, rows), borderMode=cv2.BORDER_CONSTANT, flags=interp, borderValue=borderValue) return im
Rotates and scales the image Parameters ---------- im: 2d array The image angle: number The angle, in radians, to rotate scale: positive number The scale factor borderValue: number, default 0 The value for the pixels outside the border (default 0) Returns ------- im: 2d array the rotated and scaled image Notes ----- The output image has the same size as the input. Therefore the image may be cropped in the process.
def removeByIndex(self, index): if index < len(self._invites) -1 and \ index >=0: self._invites.remove(index)
removes a user from the invitation list by position
def vars_to_array(self): logger.warn('This function is deprecated. You can inspect `self.np_vars` directly as NumPy arrays ' 'without conversion.') if not self.vars: return None vars_matrix = matrix(self.vars, size=(self.vars[0].size[0], len(self.vars))).trans() self.vars_array = np.array(vars_matrix) return self.vars_array
Convert `self.vars` to a numpy array Returns ------- numpy.array
def get_channels_by_sln_year_quarter( self, channel_type, sln, year, quarter): return self.search_channels( type=channel_type, tag_sln=sln, tag_year=year, tag_quarter=quarter)
Search for all channels by sln, year and quarter
async def queryone(self, stmt, *args): results = await self.query(stmt, *args) if len(results) == 0: raise NoResultError() elif len(results) > 1: raise ValueError("Expected 1 result, got %d" % len(results)) return results[0]
Query for exactly one result. Raises NoResultError if there are no results, or ValueError if there are more than one.
def get_all_guild_roles(self, guild_id: int) -> List[Dict[str, Any]]: return self._query(f'guilds/{guild_id}/roles', 'GET')
Gets all the roles for the specified guild Args: guild_id: snowflake id of the guild Returns: List of dictionary objects of roles in the guild. Example: [ { "id": "41771983423143936", "name": "WE DEM BOYZZ!!!!!!", "color": 3447003, "hoist": true, "position": 1, "permissions": 66321471, "managed": false, "mentionable": false }, { "hoist": false, "name": "Admin", "mentionable": false, "color": 15158332, "position": 2, "id": "151107620239966208", "managed": false, "permissions": 66583679 }, { "hoist": false, "name": "@everyone", "mentionable": false, "color": 0, "position": 0, "id": "151106790233210882", "managed": false, "permissions": 37215297 } ]
def block_icmp(zone, icmp, permanent=True): if icmp not in get_icmp_types(permanent): log.error('Invalid ICMP type') return False if icmp in list_icmp_block(zone, permanent): log.info('ICMP block already exists') return 'success' cmd = '--zone={0} --add-icmp-block={1}'.format(zone, icmp) if permanent: cmd += ' --permanent' return __firewall_cmd(cmd)
Block a specific ICMP type on a zone .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.block_icmp zone echo-reply
def update_single_grading_period(self, id, course_id, grading_periods_end_date, grading_periods_start_date, grading_periods_weight=None): path = {} data = {} params = {} path["course_id"] = course_id path["id"] = id data["grading_periods[start_date]"] = grading_periods_start_date data["grading_periods[end_date]"] = grading_periods_end_date if grading_periods_weight is not None: data["grading_periods[weight]"] = grading_periods_weight self.logger.debug("PUT /api/v1/courses/{course_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/grading_periods/{id}".format(**path), data=data, params=params, no_data=True)
Update a single grading period. Update an existing grading period.
def _get_key_file_path(): if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME), os.W_OK): return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME) return os.path.join(os.getcwd(), KEY_FILE_NAME)
Return the key file path.
def print_time(self, message="Time is now: ", print_frame_info=True): if print_frame_info: frame_info = inspect.getouterframes(inspect.currentframe())[1] print(message, (datetime.now() - self.start_time), frame_info) else: print(message, (datetime.now() - self.start_time))
Print the current elapsed time. Kwargs: message (str) : Message to prefix the time stamp. print_frame_info (bool) : Add frame info to the print message.
def create_event(service_key=None, description=None, details=None, incident_key=None, profile=None): trigger_url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' if isinstance(details, six.string_types): details = salt.utils.yaml.safe_load(details) if isinstance(details, six.string_types): details = {'details': details} ret = salt.utils.json.loads(salt.utils.pagerduty.query( method='POST', profile_dict=__salt__['config.option'](profile), api_key=service_key, data={ 'service_key': service_key, 'incident_key': incident_key, 'event_type': 'trigger', 'description': description, 'details': details, }, url=trigger_url, opts=__opts__ )) return ret
Create an event in PagerDuty. Designed for use in states. CLI Example: .. code-block:: yaml salt myminion pagerduty.create_event <service_key> <description> <details> \ profile=my-pagerduty-account The following parameters are required: service_key This key can be found by using pagerduty.list_services. description This is a short description of the event. details This can be a more detailed description of the event. profile This refers to the configuration profile to use to connect to the PagerDuty service.
def add(self, error): if not self._path_of_(error): self.errors.append(error) self.errors.sort() else: super(ErrorTree, self).add(error)
Add an error to the tree. :param error: :class:`~cerberus.errors.ValidationError`
def command_load(ctx, config, socket_name, socket_path, answer_yes, detached, colors): util.oh_my_zsh_auto_title() tmux_options = { 'socket_name': socket_name, 'socket_path': socket_path, 'answer_yes': answer_yes, 'colors': colors, 'detached': detached, } if not config: click.echo("Enter at least one CONFIG") click.echo(ctx.get_help(), color=ctx.color) ctx.exit() if isinstance(config, string_types): load_workspace(config, **tmux_options) elif isinstance(config, tuple): config = list(config) for cfg in config[:-1]: opt = tmux_options.copy() opt.update({'detached': True}) load_workspace(cfg, **opt) load_workspace(config[-1], **tmux_options)
Load a tmux workspace from each CONFIG. CONFIG is a specifier for a configuration file. If CONFIG is a path to a directory, tmuxp will search it for ".tmuxp.{yaml,yml,json}". If CONFIG is has no directory component and only a filename, e.g. "myconfig.yaml", tmuxp will search the users's config directory for that file. If CONFIG has no directory component, and only a name with no extension, e.g. "myconfig", tmuxp will search the users's config directory for any file with the extension ".yaml", ".yml", or ".json" that matches that name. If multiple configuration files that match a given CONFIG are found, tmuxp will warn and pick the first one found. If multiple CONFIGs are provided, workspaces will be created for all of them. The last one provided will be attached. The others will be created in detached mode.
def make_dataset(self, dataset, raise_if_exists=False, body=None): if body is None: body = {} try: body['datasetReference'] = { 'projectId': dataset.project_id, 'datasetId': dataset.dataset_id } if dataset.location is not None: body['location'] = dataset.location self.client.datasets().insert(projectId=dataset.project_id, body=body).execute() except http.HttpError as ex: if ex.resp.status == 409: if raise_if_exists: raise luigi.target.FileAlreadyExists() else: raise
Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists
def legal_edge_coords(): edges = set() for tile_id in legal_tile_ids(): for edge in edges_touching_tile(tile_id): edges.add(edge) logging.debug('Legal edge coords({})={}'.format(len(edges), edges)) return edges
Return all legal edge coordinates on the grid.
def get_data(self): "Get SNMP values from host" alarm_oids = [netsnmp.Varbind(alarms[alarm_id]['oid']) for alarm_id in self.models[self.modem_type]['alarms']] metric_oids = [netsnmp.Varbind(metrics[metric_id]['oid']) for metric_id in self.models[self.modem_type]['metrics']] response = self.snmp_session.get(netsnmp.VarList(*alarm_oids + metric_oids)) return ( response[0:len(alarm_oids)], response[len(alarm_oids):] )
Get SNMP values from host
def run_per_switch_cmds(self, switch_cmds): for switch_ip, cmds in switch_cmds.items(): switch = self._switches.get(switch_ip) self.run_openstack_sg_cmds(cmds, switch)
Applies cmds to appropriate switches This takes in a switch->cmds mapping and runs only the set of cmds specified for a switch on that switch. This helper is used for applying/removing ACLs to/from interfaces as this config will vary from switch to switch.
def recover_all_handler(self): for handler in self._handler_cache: self.logger.addHandler(handler) self._handler_cache = list()
Relink the file handler association you just removed.
def location(hexgrid_type, coord): if hexgrid_type == TILE: return str(coord) elif hexgrid_type == NODE: tile_id = nearest_tile_to_node(coord) dirn = tile_node_offset_to_direction(coord - tile_id_to_coord(tile_id)) return '({} {})'.format(tile_id, dirn) elif hexgrid_type == EDGE: tile_id = nearest_tile_to_edge(coord) dirn = tile_edge_offset_to_direction(coord - tile_id_to_coord(tile_id)) return '({} {})'.format(tile_id, dirn) else: logging.warning('unsupported hexgrid_type={}'.format(hexgrid_type)) return None
Returns a formatted string representing the coordinate. The format depends on the coordinate type. Tiles look like: 1, 12 Nodes look like: (1 NW), (12 S) Edges look like: (1 NW), (12 SE) :param hexgrid_type: hexgrid.TILE, hexgrid.NODE, hexgrid.EDGE :param coord: integer coordinate in this module's hexadecimal coordinate system :return: formatted string for display
def GetArtifactParserDependencies(rdf_artifact): deps = set() processors = parser.Parser.GetClassesByArtifact(rdf_artifact.name) for p in processors: deps.update(p.knowledgebase_dependencies) return deps
Return the set of knowledgebase path dependencies required by the parser. Args: rdf_artifact: RDF artifact object. Returns: A set of strings for the required kb objects e.g. ["users.appdata", "systemroot"]
def get_scenenode(self, nodes): scenenodes = cmds.ls(nodes, type='jb_sceneNode') assert scenenodes, "Found no scene nodes!" return sorted(scenenodes)[0]
Get the scenenode in the given nodes There should only be one scenenode in nodes! :param nodes: :type nodes: :returns: None :rtype: None :raises: AssertionError
def _can_connect(ip, port): cs = socket.socket() try: cs.connect((ip, port)) cs.close() return True except socket.error: return False
Checks if a TCP port at IP address is possible to connect to
def raise_figure_window(f=0): if _fun.is_a_number(f): f = _pylab.figure(f) f.canvas.manager.window.raise_()
Raises the supplied figure number or figure window.
def experiments_predictions_update_state_success(self, experiment_id, run_id, result_file): model_run = self.experiments_predictions_get(experiment_id, run_id) if model_run is None: return None funcdata = self.funcdata.create_object(result_file) return self.predictions.update_state( run_id, modelrun.ModelRunSuccess(funcdata.identifier) )
Update state of given prediction to success. Create a function data resource for the given result file and associate it with the model run. Parameters ---------- experiment_id : string Unique experiment identifier run_id : string Unique model run identifier result_file : string Path to model run result file Returns ------- ModelRunHandle Handle for updated model run or None is prediction is undefined
def _build_deployments_object( contract_type: str, deployment_bytecode: Dict[str, Any], runtime_bytecode: Dict[str, Any], compiler: Dict[str, Any], address: HexStr, tx: HexStr, block: HexStr, manifest: Dict[str, Any], ) -> Iterable[Tuple[str, Any]]: yield "contract_type", contract_type yield "address", to_hex(address) if deployment_bytecode: yield "deployment_bytecode", deployment_bytecode if compiler: yield "compiler", compiler if tx: yield "transaction", tx if block: yield "block", block if runtime_bytecode: yield "runtime_bytecode", runtime_bytecode
Returns a dict with properly formatted deployment data.
def serialize(self): expects = [exp.serialize() for exp in self.expects] converted_dict = { 'id': self.id, 'name': self.pretty_name, 'raw_name': self.name, 'doc': self.doc, 'error': self.error, 'skipped': self.skipped, 'skip_reason': self.skip_reason, 'execute_kwargs': self.safe_execute_kwargs, 'metadata': self.metadata, 'start': self.start_time, 'end': self.end_time, 'expects': expects, 'success': self.success } return remove_empty_entries_from_dict(converted_dict)
Serializes the CaseWrapper object for collection. Warning, this will only grab the available information. It is strongly that you only call this once all specs and tests have completed.
def docker(gandi, vm, args): if not [basedir for basedir in os.getenv('PATH', '.:/usr/bin').split(':') if os.path.exists('%s/docker' % basedir)]: gandi.echo( ) return if vm: gandi.configure(True, 'dockervm', vm) else: vm = gandi.get('dockervm') if not vm: gandi.echo( ) return return gandi.docker.handle(vm, args)
Manage docker instance
def set_element_dt(self, el_name, dt, tz=None, el_idx=0): dt = d1_common.date_time.cast_naive_datetime_to_tz(dt, tz) self.get_element_by_name(el_name, el_idx).text = dt.isoformat()
Set the text of the selected element to an ISO8601 formatted datetime. Args: el_name : str Name of element to update. dt : datetime.datetime Date and time to set tz : datetime.tzinfo Timezone to set - Without a timezone, other contextual information is required in order to determine the exact represented time. - If dt has timezone: The ``tz`` parameter is ignored. - If dt is naive (without timezone): The timezone is set to ``tz``. - ``tz=None``: Prevent naive dt from being set to a timezone. Without a timezone, other contextual information is required in order to determine the exact represented time. - ``tz=d1_common.date_time.UTC()``: Set naive dt to UTC. el_idx : int Index of element to use in the event that there are multiple sibling elements with the same name.
def generate_age(issue_time): td = datetime.datetime.now() - issue_time age = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6 return unicode_type(age)
Generate a age parameter for MAC authentication draft 00.
def generate(basename, xml_list): generate_shared(basename, xml_list) for xml in xml_list: generate_message_definitions(basename, xml)
generate complete MAVLink Objective-C implemenation
def __CheckAndUnifyQueryFormat(self, query_body): if (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Default or self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Query): if not isinstance(query_body, dict) and not isinstance(query_body, six.string_types): raise TypeError('query body must be a dict or string.') if isinstance(query_body, dict) and not query_body.get('query'): raise ValueError('query body must have valid query text with key "query".') if isinstance(query_body, six.string_types): return {'query': query_body} elif (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.SqlQuery and not isinstance(query_body, six.string_types)): raise TypeError('query body must be a string.') else: raise SystemError('Unexpected query compatibility mode.') return query_body
Checks and unifies the format of the query body. :raises TypeError: If query_body is not of expected type (depending on the query compatibility mode). :raises ValueError: If query_body is a dict but doesn\'t have valid query text. :raises SystemError: If the query compatibility mode is undefined. :param (str or dict) query_body: :return: The formatted query body. :rtype: dict or string
def _fracRoiSparse(self): self.frac_roi_sparse = np.min([self.mask_1.frac_roi_sparse,self.mask_2.frac_roi_sparse],axis=0) return self.frac_roi_sparse
Calculate an approximate pixel coverage fraction from the two masks. We have no way to know a priori how much the coverage of the two masks overlap in a give pixel. For example, masks that each have frac = 0.5 could have a combined frac = [0.0 to 0.5]. The limits will be: max: min(frac1,frac2) min: max((frac1+frac2)-1, 0.0) Sometimes we are lucky and our fracdet is actually already calculated for the two masks combined, so that the max condition is satisfied. That is what we will assume...
def solve(graph, debug=False, anim=None): specs = ding0_graph_to_routing_specs(graph) RoutingGraph = Graph(specs) timeout = 30000 savings_solver = savings.ClarkeWrightSolver() local_search_solver = local_search.LocalSearchSolver() start = time.time() savings_solution = savings_solver.solve(RoutingGraph, timeout, debug, anim) if debug: logger.debug('ClarkeWrightSolver solution:') util.print_solution(savings_solution) logger.debug('Elapsed time (seconds): {}'.format(time.time() - start)) local_search_solution = local_search_solver.solve(RoutingGraph, savings_solution, timeout, debug, anim) if debug: logger.debug('Local Search solution:') util.print_solution(local_search_solution) logger.debug('Elapsed time (seconds): {}'.format(time.time() - start)) return routing_solution_to_ding0_graph(graph, local_search_solution)
Do MV routing for given nodes in `graph`. Translate data from node objects to appropriate format before. Args ---- graph: :networkx:`NetworkX Graph Obj< >` NetworkX graph object with nodes debug: bool, defaults to False If True, information is printed while routing anim: AnimationDing0 AnimationDing0 object Returns ------- :networkx:`NetworkX Graph Obj< >` NetworkX graph object with nodes and edges See Also -------- ding0.tools.animation.AnimationDing0 : for a more detailed description on anim parameter.
def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None): with tf.variable_scope("softmax_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.softmax_cross_entropy( onehot_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights
def cleanup(first_I, first_Z): cont = 0 Nmin = len(first_I) if len(first_Z) < Nmin: Nmin = len(first_Z) for kk in range(Nmin): if first_I[kk][0] != first_Z[kk][0]: print("\n WARNING: ") if first_I[kk] < first_Z[kk]: del first_I[kk] else: del first_Z[kk] print("Unmatched step number: ", kk + 1, ' ignored') cont = 1 if cont == 1: return first_I, first_Z, cont return first_I, first_Z, cont
cleans up unbalanced steps failure can be from unbalanced final step, or from missing steps, this takes care of missing steps
def coerce_to_list(val): if val: if not isinstance(val, (list, tuple)): val = [val] else: val = [] return val
For parameters that can take either a single string or a list of strings, this function will ensure that the result is a list containing the passed values.
def deletegitlabciservice(self, project_id, token, project_url): request = requests.delete( '{0}/{1}/services/gitlab-ci'.format(self.projects_url, project_id), headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return request.status_code == 200
Delete GitLab CI service settings :param project_id: Project ID :param token: Token :param project_url: Project URL :return: true if success, false if not
def _get_socket(self, sid): try: s = self.sockets[sid] except KeyError: raise KeyError('Session not found') if s.closed: del self.sockets[sid] raise KeyError('Session is disconnected') return s
Return the socket object for a given session.
def get_class(classname): parts = classname.split('.') module = ".".join(parts[:-1]) m = __import__(module) for comp in parts[1:]: m = getattr(m, comp) return m
Returns the class object associated with the dot-notation classname. Taken from here: http://stackoverflow.com/a/452981 :param classname: the classname :type classname: str :return: the class object :rtype: object
def from_soup(self,soup): if soup is None or soup is '': return None else: author_name = soup.find('em').contents[0].strip() if soup.find('em') else '' author_image = soup.find('img').get('src') if soup.find('img') else '' author_contact = Contact.from_soup(self,soup) return Author(author_name,author_image,author_contact)
Factory Pattern. Fetches author data from given soup and builds the object
def minimum(attrs, inputs, proto_obj): if len(inputs) > 1: mxnet_op = symbol.minimum(inputs[0], inputs[1]) for op_input in inputs[2:]: mxnet_op = symbol.minimum(mxnet_op, op_input) else: mxnet_op = symbol.minimum(inputs[0], inputs[0]) return mxnet_op, attrs, inputs
Elementwise minimum of arrays.
def cliques(self, reordered = True): if reordered: return [list(self.snrowidx[self.sncolptr[k]:self.sncolptr[k+1]]) for k in range(self.Nsn)] else: return [list(self.__p[self.snrowidx[self.sncolptr[k]:self.sncolptr[k+1]]]) for k in range(self.Nsn)]
Returns a list of cliques
def overlap(listA, listB): if (listA is None) or (listB is None): return [] else: return list(set(listA) & set(listB))
Return list of objects shared by listA, listB.
def cursor_to_line(self, line=None): self.cursor.y = (line or 1) - 1 if mo.DECOM in self.mode: self.cursor.y += self.margins.top self.ensure_vbounds()
Move cursor to a specific line in the current column. :param int line: line number to move the cursor to.
def create_notification_channel(self, callback_url, calendar_ids=()): data = {'callback_url': callback_url} if calendar_ids: data['filters'] = {'calendar_ids': calendar_ids} return self.request_handler.post('channels', data=data).json()['channel']
Create a new channel for receiving push notifications. :param string callback_url: The url that will receive push notifications. Must not be longer than 128 characters and should be HTTPS. :param tuple calendar_ids: List of calendar ids to create notification channels for. (Optional. Default empty tuple) :return: Channel id and channel callback :rtype: ``dict``
def execute_r(prog, quiet): FNULL = open(os.devnull, 'w') if quiet else None try: input_proc = subprocess.Popen(["echo", prog], stdout=subprocess.PIPE) status = subprocess.call("R --no-save --quiet", stdin=input_proc.stdout, stdout=FNULL, stderr=subprocess.STDOUT, shell=True) if status != 0: raise ValueError("ggplot2 bridge failed for program: {}." " Check for an error".format(prog)) finally: if FNULL is not None: FNULL.close()
Run the R code prog an R subprocess @raises ValueError if the subprocess exits with non-zero status
def bin_number(datapoint, intervals): index = numpy.searchsorted(intervals, datapoint) return [0 if index != i else 1 for i in range(len(intervals) + 1)]
Given a datapoint and intervals representing bins, returns the number represented in binned form, where the bin including the value is set to 1 and all others are 0.
def save(self): with open(self.configuration_file, 'w') as file_h: file_h.write(self._serializer('dumps', self._storage))
Save the current configuration to disk.
def get_intercom_data(self): return { "user_id": self.intercom_id, "email": self.email, "name": self.get_full_name(), "last_request_at": self.last_login.strftime("%s") if self.last_login else "", "created_at": self.date_joined.strftime("%s"), "custom_attributes": { "is_admin": self.is_superuser } }
Specify the user data sent to Intercom API
def search_phenotype( self, phenotype_association_set_id=None, phenotype_id=None, description=None, type_=None, age_of_onset=None): request = protocol.SearchPhenotypesRequest() request.phenotype_association_set_id = phenotype_association_set_id if phenotype_id: request.id = phenotype_id if description: request.description = description if type_: request.type.mergeFrom(type_) if age_of_onset: request.age_of_onset = age_of_onset request.page_size = pb.int(self._page_size) return self._run_search_request( request, "phenotypes", protocol.SearchPhenotypesResponse)
Returns an iterator over the Phenotypes from the server
def texture_map_to_plane(dataset, origin=None, point_u=None, point_v=None, inplace=False, name='Texture Coordinates'): alg = vtk.vtkTextureMapToPlane() if origin is None or point_u is None or point_v is None: alg.SetAutomaticPlaneGeneration(True) else: alg.SetOrigin(origin) alg.SetPoint1(point_u) alg.SetPoint2(point_v) alg.SetInputDataObject(dataset) alg.Update() output = _get_output(alg) if not inplace: return output t_coords = output.GetPointData().GetTCoords() t_coords.SetName(name) otc = dataset.GetPointData().GetTCoords() dataset.GetPointData().SetTCoords(t_coords) dataset.GetPointData().AddArray(t_coords) dataset.GetPointData().AddArray(otc) return
Texture map this dataset to a user defined plane. This is often used to define a plane to texture map an image to this dataset. The plane defines the spatial reference and extent of that image. Parameters ---------- origin : tuple(float) Length 3 iterable of floats defining the XYZ coordinates of the BOTTOM LEFT CORNER of the plane point_u : tuple(float) Length 3 iterable of floats defining the XYZ coordinates of the BOTTOM RIGHT CORNER of the plane point_v : tuple(float) Length 3 iterable of floats defining the XYZ coordinates of the TOP LEFT CORNER of the plane inplace : bool, optional If True, the new texture coordinates will be added to the dataset inplace. If False (default), a new dataset is returned with the textures coordinates name : str, optional The string name to give the new texture coordinates if applying the filter inplace.
def construct_parameter_pattern(parameter): name = parameter['name'] type = parameter['type'] repeated = '[^/]' if type == 'integer': repeated = '\d' return "(?P<{name}>{repeated}+)".format(name=name, repeated=repeated)
Given a parameter definition returns a regex pattern that will match that part of the path.
def moothedata(data, key=None): if not key: key = choice(list(data.keys())) logger.debug("Using randomly chosen key: %s", key) msg = cow.Moose().milk("{0}: {1}".format(key.capitalize(), data[key])) return msg
Return an amusing picture containing an item from a dict. Parameters ---------- data: mapping A mapping, such as a raster dataset's ``meta`` or ``profile`` property. key: A key of the ``data`` mapping.
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): if tensors_to_log is None: tensors_to_log = _TENSORS_TO_LOG return tf.train.LoggingTensorHook( tensors=tensors_to_log, every_n_iter=every_n_iter)
Function to get LoggingTensorHook. Args: every_n_iter: `int`, print the values of `tensors` once every N local steps taken on the current worker. tensors_to_log: List of tensor names or dictionary mapping labels to tensor names. If not set, log _TENSORS_TO_LOG by default. **kwargs: a dictionary of arguments to LoggingTensorHook. Returns: Returns a LoggingTensorHook with a standard set of tensors that will be printed to stdout.
def textContent(self, text: str) -> None: self._set_text_content(text) if self.connected: self._set_text_content_web(text)
Set textContent both on this node and related browser node.
def update_module_file(redbaron_tree, module_path, show_diff=False, dry_run=False): with tempfile.NamedTemporaryFile() as tmp_file: tmp_file.write(redbaron_tree_to_module_str(redbaron_tree)) tmp_file.seek(0) if are_files_equal(module_path, tmp_file.name): logging.debug('Source unchanged') return False logging.debug('Source modified') tmp_file.seek(0) diff_update_file(module_path, tmp_file.read(), show_diff, dry_run)
Set show_diff to False to overwrite module_path with a new file generated from ``redbaron_tree``. Returns True if tree is different from source.
def intervals_to_samples(intervals, labels, offset=0, sample_size=0.1, fill_value=None): num_samples = int(np.floor(intervals.max() / sample_size)) sample_indices = np.arange(num_samples, dtype=np.float32) sample_times = (sample_indices*sample_size + offset).tolist() sampled_labels = interpolate_intervals( intervals, labels, sample_times, fill_value) return sample_times, sampled_labels
Convert an array of labeled time intervals to annotated samples. Parameters ---------- intervals : np.ndarray, shape=(n, d) An array of time intervals, as returned by :func:`mir_eval.io.load_intervals()` or :func:`mir_eval.io.load_labeled_intervals()`. The ``i`` th interval spans time ``intervals[i, 0]`` to ``intervals[i, 1]``. labels : list, shape=(n,) The annotation for each interval offset : float > 0 Phase offset of the sampled time grid (in seconds) (Default value = 0) sample_size : float > 0 duration of each sample to be generated (in seconds) (Default value = 0.1) fill_value : type(labels[0]) Object to use for the label with out-of-range time points. (Default value = None) Returns ------- sample_times : list list of sample times sample_labels : list array of labels for each generated sample Notes ----- Intervals will be rounded down to the nearest multiple of ``sample_size``.
def option(self, opt): if 'config.merge' in self.functions: return self.functions['config.merge'](opt, {}, omit_master=True) return self.opts.get(opt, {})
Return options merged from config and pillar
def extensionResponse(self, namespace_uri, require_signed): if require_signed: return self.getSignedNS(namespace_uri) else: return self.message.getArgs(namespace_uri)
Return response arguments in the specified namespace. @param namespace_uri: The namespace URI of the arguments to be returned. @param require_signed: True if the arguments should be among those signed in the response, False if you don't care. If require_signed is True and the arguments are not signed, return None.
def _render_with_context(self, parsed_node, config): context = dbt.context.parser.generate( parsed_node, self.root_project_config, self.macro_manifest, config) dbt.clients.jinja.get_rendered( parsed_node.raw_sql, context, parsed_node.to_shallow_dict(), capture_macros=True)
Given the parsed node and a SourceConfig to use during parsing, render the node's sql wtih macro capture enabled. Note: this mutates the config object when config() calls are rendered.
def submit(cls, job_config, in_xg_transaction=False): cls.__validate_job_config(job_config) mapper_spec = job_config._get_mapper_spec() mapreduce_params = job_config._get_mr_params() mapreduce_spec = model.MapreduceSpec( job_config.job_name, job_config.job_id, mapper_spec.to_json(), mapreduce_params, util._obj_to_path(job_config._hooks_cls)) if in_xg_transaction: propagation = db.MANDATORY else: propagation = db.INDEPENDENT state = None @db.transactional(propagation=propagation) def _txn(): state = cls.__create_and_save_state(job_config, mapreduce_spec) cls.__add_kickoff_task(job_config, mapreduce_spec) return state state = _txn() return cls(state)
Submit the job to run. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this MR job. If True, there has to be an already opened cross-group transaction scope. MR will use one entity group from it. If False, MR will create an independent transaction to start the job regardless of any existing transaction scopes. Returns: a Job instance representing the submitted job.
def reverse_reference(self): self.ref_start = self.ref_length - self.ref_start - 1 self.ref_end = self.ref_length - self.ref_end - 1
Changes the coordinates as if the reference sequence has been reverse complemented
def add(self, name, arcname=None, **kwargs): if os.path.isdir(name): exclusions = get_exclusions(name) if exclusions: target_prefix = os.path.abspath(arcname or name) kwargs.setdefault('filter', get_filter_func(exclusions, target_prefix)) self.tarfile.add(name, arcname=arcname, **kwargs)
Add a file or directory to the context tarball. :param name: File or directory path. :type name: unicode | str :param args: Additional args for :meth:`tarfile.TarFile.add`. :param kwargs: Additional kwargs for :meth:`tarfile.TarFile.add`.
def CallMethod(self, method, controller, request, response_class, done): try: self.validate_request(request) if not self.sock: self.get_connection(self.host, self.port) self.send_rpc_message(method, request) byte_stream = self.recv_rpc_message() return self.parse_response(byte_stream, response_class) except RequestError: raise except Exception: self.close_socket() raise
Call the RPC method. The naming doesn't confirm PEP8, since it's a method called by protobuf
def apply_to(self, A): if A.ndim == 1: A = np.expand_dims(A, axis=0) rows, cols = A.shape A_new = np.hstack([A, np.ones((rows, 1))]) A_new = np.transpose(self.T.dot(np.transpose(A_new))) return A_new[:, 0:cols]
Apply the coordinate transformation to points in A.
def unlock_area(self, code, index): logger.debug("unlocking area code %s index %s" % (code, index)) return self.library.Srv_UnlockArea(self.pointer, code, index)
Unlocks a previously locked shared memory area.
def get_authors(repo_path, from_commit): repo = dulwich.repo.Repo(repo_path) refs = get_refs(repo) start_including = False authors = set() if from_commit is None: start_including = True for commit_sha, children in reversed( get_children_per_first_parent(repo_path).items() ): commit = get_repo_object(repo, commit_sha) if ( start_including or commit_sha.startswith(from_commit) or fuzzy_matches_refs(from_commit, refs.get(commit_sha, [])) ): authors.add(commit.author.decode()) for child in children: authors.add(child.author.decode()) start_including = True return '\n'.join(sorted(authors))
Given a repo and optionally a base revision to start from, will return the list of authors.
async def identify(self, duration_s: int = 5): count = duration_s * 4 on = False for sec in range(count): then = self._loop.time() self.set_lights(button=on) on = not on now = self._loop.time() await asyncio.sleep(max(0, 0.25-(now-then))) self.set_lights(button=True)
Blink the button light to identify the robot. :param int duration_s: The duration to blink for, in seconds.
def _needs_base64_encoding(self, attr_type, attr_value): return attr_type.lower() in self._base64_attrs or \ isinstance(attr_value, bytes) or \ UNSAFE_STRING_RE.search(attr_value) is not None
Return True if attr_value has to be base-64 encoded. This is the case because of special chars or because attr_type is in self._base64_attrs
def zeroed_observation(observation): if hasattr(observation, 'shape'): return np.zeros(observation.shape) elif hasattr(observation, '__iter__'): out = [] for x in observation: out.append(zeroed_observation(x)) return out else: return 0.
Return an array of zeros with same shape as given observation # Argument observation (list): List of observation # Return A np.ndarray of zeros with observation.shape
def EXTRA_LOGGING(self): input_text = get('EXTRA_LOGGING', '') modules = input_text.split(',') if input_text: modules = input_text.split(',') modules = [x.split(':') for x in modules] else: modules = [] return modules
lista modulos con los distintos niveles a logear y su nivel de debug Por ejemplo: [Logs] EXTRA_LOGGING = oscar.paypal:DEBUG, django.db:INFO
def _add_single_session_to_to_ordered_dict(self, d, dataset_index, recommended_only): for model_index, model in enumerate(self.models): show_null = False if recommended_only: if self.recommendation_enabled: if self.recommended_model is None: if model_index == 0: show_null = True else: continue elif self.recommended_model == model: pass else: continue else: if model_index == 0: show_null = True else: continue d["dataset_index"].append(dataset_index) d["doses_dropped"].append(self.doses_dropped) model._to_df(d, model_index, show_null)
Save a single session to an ordered dictionary.
def merge(file, feature_layers): tile = VectorTile(extents) for layer in feature_layers: tile.addFeatures(layer['features'], layer['name']) tile.complete() data = tile.out.SerializeToString() file.write(struct.pack(">I", len(data))) file.write(data)
Retrieve a list of OSciMap4 tile responses and merge them into one. get_tiles() retrieves data and performs basic integrity checks.
def format_name(subject): if isinstance(subject, x509.Name): subject = [(OID_NAME_MAPPINGS[s.oid], s.value) for s in subject] return '/%s' % ('/'.join(['%s=%s' % (force_text(k), force_text(v)) for k, v in subject]))
Convert a subject into the canonical form for distinguished names. This function does not take care of sorting the subject in any meaningful order. Examples:: >>> format_name([('CN', 'example.com'), ]) '/CN=example.com' >>> format_name([('CN', 'example.com'), ('O', "My Organization"), ]) '/CN=example.com/O=My Organization'
def draw(self): if not self.visible: return self.window.blit(self.textImage, self.loc)
Draws the current text in the window
def _parse_args(func, variables, annotations=None): arg_read_var = [] for arg_name, anno in (annotations or func.__annotations__).items(): if arg_name == 'return': continue var, read = _parse_arg(func, variables, arg_name, anno) arg = Argument(name=arg_name, read=read) arg_read_var.append((arg, var)) return arg_read_var
Return a list of arguments with the variable it reads. NOTE: Multiple arguments may read the same variable.
def fix_reference_url(url): new_url = url new_url = fix_url_bars_instead_of_slashes(new_url) new_url = fix_url_add_http_if_missing(new_url) new_url = fix_url_replace_tilde(new_url) try: rfc3987.parse(new_url, rule="URI") return new_url except ValueError: return url
Used to parse an incorect url to try to fix it with the most common ocurrences for errors. If the fixed url is still incorrect, it returns ``None``. Returns: String containing the fixed url or the original one if it could not be fixed.
def proxy(self, request, original_target_route, presenter_name, **kwargs): action_kwargs = kwargs.copy() action_name = 'index' if 'action' in action_kwargs: action_name = action_kwargs['action'] action_kwargs.pop('action') original_route = original_target_route.route() original_route_map = original_target_route.route_map() target_route = WWebTargetRoute( presenter_name, action_name, original_route, original_route_map, **action_kwargs ) return self.execute(request, target_route)
Execute the given presenter as a target for the given client request :param request: original client request :param original_target_route: previous target route :param presenter_name: target presenter name :param kwargs: presenter arguments :return: WWebResponseProto
def get_list_columns(self): url = self.build_url(self._endpoints.get('get_list_columns')) response = self.con.get(url) if not response: return [] data = response.json() return [self.list_column_constructor(parent=self, **{self._cloud_data_key: column}) for column in data.get('value', [])]
Returns the sharepoint list columns