Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
374,200
def add_job(self, queue_name, job, timeout=200, replicate=None, delay=None, retry=None, ttl=None, maxlen=None, asynchronous=None): command = [, queue_name, job, timeout] if replicate: command += [, replicate] if delay: command += [, delay] if retry is not None: command += [, retry] if ttl: command += [, ttl] if maxlen: command += [, maxlen] if asynchronous: command += [] logger.debug("sending job - %s", command) job_id = self.execute_command(*command) logger.debug("sent job - %s", command) logger.debug("job_id: %s " % job_id) return job_id
Add a job to a queue. ADDJOB queue_name job <ms-timeout> [REPLICATE <count>] [DELAY <sec>] [RETRY <sec>] [TTL <sec>] [MAXLEN <count>] [ASYNC] :param queue_name: is the name of the queue, any string, basically. :param job: is a string representing the job. :param timeout: is the command timeout in milliseconds. :param replicate: count is the number of nodes the job should be replicated to. :param delay: sec is the number of seconds that should elapse before the job is queued by any server. :param retry: sec period after which, if no ACK is received, the job is put again into the queue for delivery. If RETRY is 0, the job has an at-most-once delivery semantics. :param ttl: sec is the max job life in seconds. After this time, the job is deleted even if it was not successfully delivered. :param maxlen: count specifies that if there are already count messages queued for the specified queue name, the message is refused and an error reported to the client. :param asynchronous: asks the server to let the command return ASAP and replicate the job to other nodes in the background. The job gets queued ASAP, while normally the job is put into the queue only when the client gets a positive reply. Changing the name of this argument as async is reserved keyword in python 3.7 :returns: job_id
374,201
def check_lines(self, lines, i): max_chars = self.config.max_line_length ignore_long_line = self.config.ignore_long_lines def check_line(line, i): if not line.endswith("\n"): self.add_message("missing-final-newline", line=i) else: stripped_line = line.rstrip("\t\n\r\v ") if not stripped_line and _EMPTY_LINE in self.config.no_space_check: pass elif line[len(stripped_line) :] not in ("\n", "\r\n"): self.add_message( "trailing-whitespace", line=i, col_offset=len(stripped_line) ) line = stripped_line mobj = OPTION_RGX.search(line) if mobj and "=" in line: front_of_equal, _, back_of_equal = mobj.group(1).partition("=") if front_of_equal.strip() == "disable": if "line-too-long" in { _msg_id.strip() for _msg_id in back_of_equal.split(",") }: return None line = line.rsplit(" if len(line) > max_chars and not ignore_long_line.search(line): self.add_message("line-too-long", line=i, args=(len(line), max_chars)) return i + 1 unsplit_ends = { "\v", "\x0b", "\f", "\x0c", "\x1c", "\x1d", "\x1e", "\x85", "\u2028", "\u2029", } unsplit = [] for line in lines.splitlines(True): if line[-1] in unsplit_ends: unsplit.append(line) continue if unsplit: unsplit.append(line) line = "".join(unsplit) unsplit = [] i = check_line(line, i) if i is None: break if unsplit: check_line("".join(unsplit), i)
check lines have less than a maximum number of characters
374,202
def serialize(self) -> dict: data = {**self} if "attachments" in self: data["attachments"] = json.dumps(self["attachments"]) return data
Serialize the message for sending to slack API Returns: serialized message
374,203
def _construct_request(self): if self.parsed_endpoint.scheme == : conn = httplib.HTTPSConnection(self.parsed_endpoint.netloc) else: conn = httplib.HTTPConnection(self.parsed_endpoint.netloc) head = { "Accept": "application/json", "User-Agent": USER_AGENT, API_TOKEN_HEADER_NAME: self.api_token, } if self.api_version in [, ]: head[API_VERSION_HEADER_NAME] = self.api_version return conn, head
Utility for constructing the request header and connection
374,204
def readAsync(self, fileName, callback, **kwargs): if self._langext is not None: with open(fileName, ) as fin: newmodel = self._langext.translate(fin.read(), **kwargs) with open(fileName+, ) as fout: fout.write(newmodel) fileName += def async_call(): self._lock.acquire() try: self._impl.read(fileName) self._errorhandler_wrapper.check() except Exception: self._lock.release() raise else: self._lock.release() callback.run() Thread(target=async_call).start()
Interprets the specified file asynchronously, interpreting it as a model or a script file. As a side effect, it invalidates all entities (as the passed file can contain any arbitrary command); the lists of entities will be re-populated lazily (at first access). Args: fileName: Path to the file (Relative to the current working directory or absolute). callback: Callback to be executed when the file has been interpreted.
374,205
def top_stories(self, limit=5, first=None, last=None, json=False): story_ids = requests.get(TOP_STORIES_URL).json() story_urls = [] for story_id in story_ids: url = API_BASE + "item/" + str(story_id) + story_urls.append(url) if first and last: story_urls = story_urls[first:last] if limit != 5: story_urls[:limit] else: story_urls = story_urls[:limit] response_list = fetch_event(story_urls) if json: yield response_list else: for response in response_list: yield story_parser(response)
Get the top story objects list params : limit = (default | 5) number of story objects needed json = (default | False) The method uses asynchronous grequest form gevent
374,206
def _replay_index(replay_dir): run_config = run_configs.get() replay_dir = run_config.abs_replay_path(replay_dir) print("Checking: ", replay_dir) with run_config.start(want_rgb=False) as controller: print("-" * 60) print(",".join(( "filename", "build", "map_name", "game_duration_loops", "players", "P1-outcome", "P1-race", "P1-apm", "P2-race", "P2-apm", ))) try: bad_replays = [] for file_path in run_config.replay_paths(replay_dir): file_name = os.path.basename(file_path) try: info = controller.replay_info(run_config.replay_data(file_path)) except remote_controller.RequestError as e: bad_replays.append("%s: %s" % (file_name, e)) continue if info.HasField("error"): print("failed:", file_name, info.error, info.error_details) bad_replays.append(file_name) else: out = [ file_name, info.base_build, info.map_name, info.game_duration_loops, len(info.player_info), sc_pb.Result.Name(info.player_info[0].player_result.result), sc_common.Race.Name(info.player_info[0].player_info.race_actual), info.player_info[0].player_apm, ] if len(info.player_info) >= 2: out += [ sc_common.Race.Name( info.player_info[1].player_info.race_actual), info.player_info[1].player_apm, ] print(u",".join(str(s) for s in out)) except KeyboardInterrupt: pass finally: if bad_replays: print("\n") print("Replays with errors:") print("\n".join(bad_replays))
Output information for a directory of replays.
374,207
def run(self): if self.auto_clear: os.system( if os.name == else ) else: print print if self.auto_clear: print subprocess.call(, cwd=self.directory)
Called when a file is changed to re-run the tests with nose.
374,208
def get_meta(cls): merged_attributes = Struct() for class_ in reversed(cls.mro()): if hasattr(class_, ): for key, value in class_.Meta.__dict__.items(): merged_attributes[key] = value return merged_attributes
Collect all members of any contained :code:`Meta` class declarations from the given class or any of its base classes. (Sub class values take precedence.) :type cls: class :rtype: Struct
374,209
def initialize(self): if self.collname not in self.current_kv_names(): r = self.request(, self.url+"storage/collections/config", headers={: }, data={: self.collname}) r.raise_for_status() re = self.request(, self.url+"storage/collections/config/"+self.collname, headers = {: }, data=self.schema) re.raise_for_status() logger.info("initialized Splunk Key Value Collection %s with schema %s"\ % (self.collname, str(self.schema))) if self.collname not in self.current_kv_names(): raise EnvironmentError( % (self.collname, str(self.current_kv_names())))
Instantiates the cache area to be ready for updates
374,210
def UploadFile(self, fd, offset=0, amount=None): return self._UploadChunkStream( self._streamer.StreamFile(fd, offset=offset, amount=amount))
Uploads chunks of a given file descriptor to the transfer store flow. Args: fd: A file descriptor to upload. offset: An integer offset at which the file upload should start on. amount: An upper bound on number of bytes to stream. If it is `None` then the whole file is uploaded. Returns: A `BlobImageDescriptor` object.
374,211
def wrap_value(value, include_empty=False): if value is None: return [None] if include_empty else [] elif hasattr(value, ) and len(value) == 0: return [value] if include_empty else [] elif isinstance(value, _wrap_types): return [value] elif not hasattr(value, ): return [value] return value if include_empty else filter_empty(value, [])
:return: the value wrapped in a list unless it is already iterable (and not a dict); if so, empty values will be filtered out by default, and an empty list is returned.
374,212
def recv_result_from_workers(self): info = MPI.Status() result = self.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=info) logger.debug("Received result from workers: {}".format(result)) return result
Receives a results from the MPI worker pool and send it out via 0mq Returns: -------- result: task result from the workers
374,213
def register_reference(self, dispatcher, node): self.identifiers[node] = self.current_scope self.current_scope.reference(node.value)
Register this identifier to the current scope, and mark it as referenced in the current scope.
374,214
def imagetransformer_base_10l_16h_big_dr01_imgnet(): hparams = imagetransformer_base_14l_8h_big_dr01() hparams.num_decoder_layers = 10 hparams.num_heads = 16 hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.batch_size = 1 hparams.unconditional = False hparams.layer_prepostprocess_dropout = 0.1 return hparams
big 1d model for conditional image generation.
374,215
def spin_px(self): return conversions.primary_spin(self.mass1, self.mass2, self.spin1x, self.spin2x)
Returns the x-component of the spin of the primary mass.
374,216
def update(self, event): event = event.copy() reactor.callInThread(self._update_deferred, event)
All messages from the Protocol get passed through this method. This allows the client to have an up-to-date state for the client. However, this method doesn't actually update right away. Instead, the acutal update happens in another thread, potentially later, in order to allow user code to handle the event faster.
374,217
def detect_xid_devices(self): self.__xid_cons = [] for c in self.__com_ports: device_found = False for b in [115200, 19200, 9600, 57600, 38400]: con = XidConnection(c, b) try: con.open() except SerialException: continue con.flush_input() con.flush_output() returnval = con.send_xid_command("_c1", 5).decode() if returnval.startswith(): device_found = True self.__xid_cons.append(con) if(returnval != ): con.send_xid_command() con.flush_input() con.flush_output() con.send_xid_command() con.send_xid_command() con.close() if device_found: break
For all of the com ports connected to the computer, send an XID command '_c1'. If the device response with '_xid', it is an xid device.
374,218
def parse(filename_url_or_file, guess_charset=True, parser=None): if parser is None: parser = html_parser if not isinstance(filename_url_or_file, _strings): fp = filename_url_or_file elif _looks_like_url(filename_url_or_file): fp = urlopen(filename_url_or_file) else: fp = open(filename_url_or_file, ) return parser.parse(fp, useChardet=guess_charset)
Parse a filename, URL, or file-like object into an HTML document tree. Note: this returns a tree, not an element. Use ``parse(...).getroot()`` to get the document root.
374,219
def get_packet(self, generation_time, sequence_number): url = .format( self._instance, to_isostring(generation_time), sequence_number) response = self._client.get_proto(url) message = yamcs_pb2.TmPacketData() message.ParseFromString(response.content) return Packet(message)
Gets a single packet by its identifying key (gentime, seqNum). :param ~datetime.datetime generation_time: When the packet was generated (packet time) :param int sequence_number: Sequence number of the packet :rtype: .Packet
374,220
def create_long_form_weights(model_obj, wide_weights, rows_to_obs=None): check_validity_of_long_form_args(model_obj, wide_weights, rows_to_obs) if rows_to_obs is None: rows_to_obs = model_obj.get_mappings_for_fit()[] wide_weights_2d =\ wide_weights if wide_weights.ndim == 2 else wide_weights[:, None] long_weights = rows_to_obs.dot(wide_weights_2d) if wide_weights.ndim == 1: long_weights = long_weights.sum(axis=1) return long_weights
Converts an array of weights with one element per observation (wide-format) to an array of weights with one element per observation per available alternative (long-format). Parameters ---------- model_obj : an instance or sublcass of the MNDC class. Should be the model object that corresponds to the model we are constructing the bootstrap confidence intervals for. wide_weights : 1D or 2D ndarray. Should contain one element or one column per observation in `model_obj.data`, depending on whether `wide_weights` is 1D or 2D respectively. These elements should be the weights for optimizing the model's objective function for estimation. rows_to_obs : 2D scipy sparse array. A mapping matrix of zeros and ones, were `rows_to_obs[i, j]` is one if row `i` of the long-format data belongs to observation `j` and zero otherwise. Returns ------- long_weights : 1D or 2D ndarray. Should contain one element or one column per observation in `model_obj.data`, depending on whether `wide_weights` is 1D or 2D respectively. These elements should be the weights from `wide_weights`, simply mapping each observation's weight to the corresponding row in the long-format data.
374,221
def remove_extra_packages(self, packages, dry_run=False): removal_list = self.determine_extra_packages(packages) if not removal_list: print("No packages to be removed") else: if dry_run: print("The following packages would be removed:\n %s\n" % "\n ".join(removal_list)) else: print("Removing packages\n") args = [ "pip", "uninstall", "-y", ] args.extend(list(removal_list)) subprocess.check_call(args)
Remove all packages missing from list
374,222
def login_required(fn): @wraps(fn) def login_wrapper(ctx, *args, **kwargs): base_url = os.environ.get("ONE_CODEX_API_BASE", "https://app.onecodex.com") api_kwargs = {"telemetry": ctx.obj["TELEMETRY"]} api_key_prior_login = ctx.obj.get("API_KEY") bearer_token_env = os.environ.get("ONE_CODEX_BEARER_TOKEN") api_key_env = os.environ.get("ONE_CODEX_API_KEY") api_key_creds_file = _login(base_url, silent=True) if api_key_prior_login is not None: api_kwargs["api_key"] = api_key_prior_login elif bearer_token_env is not None: api_kwargs["bearer_token"] = bearer_token_env elif api_key_env is not None: api_kwargs["api_key"] = api_key_env elif api_key_creds_file is not None: api_kwargs["api_key"] = api_key_creds_file else: click.echo( "The command you specified requires authentication. Please login first.\n", err=True ) ctx.exit() ctx.obj["API"] = Api(**api_kwargs) return fn(ctx, *args, **kwargs) return login_wrapper
Requires login before proceeding, but does not prompt the user to login. Decorator should be used only on Click CLI commands. Notes ----- Different means of authentication will be attempted in this order: 1. An API key present in the Click context object from a previous successful authentication. 2. A bearer token (ONE_CODEX_BEARER_TOKEN) in the environment. 3. An API key (ONE_CODEX_API_KEY) in the environment. 4. An API key in the credentials file (~/.onecodex).
374,223
def isoformat(self, sep=): s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) + _format_time(self._hour, self._minute, self._second, self._microsecond)) off = self.utcoffset() if off is not None: if off.days < 0: sign = "-" off = -off else: sign = "+" hh, mm = divmod(off, timedelta(hours=1)) assert not mm % timedelta(minutes=1), "whole minute" mm //= timedelta(minutes=1) s += "%s%02d:%02d" % (sign, hh, mm) return s
Return the time formatted according to ISO. This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if self.microsecond == 0. If self.tzinfo is not None, the UTC offset is also attached, giving 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'. Optional argument sep specifies the separator between date and time, default 'T'.
374,224
def put_skeleton_files_on_disk(metadata_type, where, github_template=None, params={}): api_name = params["api_name"] file_name = github_template["file_name"] template_source = config.connection.get_plugin_client_setting(, ) template_location = config.connection.get_plugin_client_setting(, ) try: if template_location == : if in sys.platform: template_body = os.popen("wget https://raw.githubusercontent.com/{0}/{1}/{2} -q -O -".format(template_source, metadata_type, file_name)).read() else: template_body = urllib2.urlopen("https://raw.githubusercontent.com/{0}/{1}/{2}".format(template_source, metadata_type, file_name)).read() else: template_body = get_file_as_string(os.path.join(template_source,metadata_type,file_name)) except: template_body = get_file_as_string(os.path.join(config.base_path,config.support_dir,"templates","github-local",metadata_type,file_name)) template = env.from_string(template_body) file_body = template.render(params) metadata_type = get_meta_type_by_name(metadata_type) os.makedirs("{0}/{1}".format(where, metadata_type[])) f = open("{0}/{1}/{2}".format(where, metadata_type[], api_name+"."+metadata_type[]), ) f.write(file_body) f.close() template = env.get_template() file_body = template.render(api_name=api_name, sfdc_api_version=SFDC_API_VERSION,meta_type=metadata_type[]) f = open("{0}/{1}/{2}".format(where, metadata_type[], api_name+"."+metadata_type[])+"-meta.xml", ) f.write(file_body) f.close()
Generates file based on jinja2 templates
374,225
def _input_as_lines(self, data): self._input_filename = self.getTmpFilename( self.WorkingDir, suffix=) with open(self._input_filename, ) as f: for line in data: f.write(str(line).strip()) f.write() return self._input_filename
Write sequence of lines to temp file, return filename data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: '\n' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file
374,226
def xml(self, text=TEXT): def convert(line): xml = " <item>\n" for f in line.index: xml += " <field name=\"%s\">%s</field>\n" % (f, line[f]) xml += " </item>\n" return xml return "<items>\n" + .join(self._data.apply(convert, axis=1)) + \ "</items>"
Generate an XML output from the report data.
374,227
def update(self, scopes=[], add_scopes=[], rm_scopes=[], note=, note_url=): success = False json = None if scopes: d = {: scopes} json = self._json(self._post(self._api, data=d), 200) if add_scopes: d = {: add_scopes} json = self._json(self._post(self._api, data=d), 200) if rm_scopes: d = {: rm_scopes} json = self._json(self._post(self._api, data=d), 200) if note or note_url: d = {: note, : note_url} json = self._json(self._post(self._api, data=d), 200) if json: self._update_(json) success = True return success
Update this authorization. :param list scopes: (optional), replaces the authorization scopes with these :param list add_scopes: (optional), scopes to be added :param list rm_scopes: (optional), scopes to be removed :param str note: (optional), new note about authorization :param str note_url: (optional), new note URL about this authorization :returns: bool
374,228
def _state(self): state = {} required_keys = (, , , , ) try: for _ in range(self._state_retries): state.update(self._get_data()) except TypeError: self._logger.exception() message = ( ).format(state=state) self._logger.debug(message) self._state_.update(state) if not all([key in self._state_.keys() for key in required_keys]): raise IncompleteResponse(state) return self._state_
The internal state of the object. The api responses are not consistent so a retry is performed on every call with information updating the internally saved state refreshing the data. The info is cached for STATE_CACHING_SECONDS. :return: The current state of the toons' information state.
374,229
def dl_file(url, dest, chunk_size=6553): import urllib3 http = urllib3.PoolManager() r = http.request(, url, preload_content=False) with dest.open() as out: while True: data = r.read(chunk_size) if data is None or len(data) == 0: break out.write(data) r.release_conn()
Download `url` to `dest`
374,230
def read_source_models(fnames, converter, monitor): for fname in fnames: if fname.endswith((, )): sm = to_python(fname, converter) elif fname.endswith(): sm = sourceconverter.to_python(fname, converter) else: raise ValueError( % fname) sm.fname = fname yield sm
:param fnames: list of source model files :param converter: a SourceConverter instance :param monitor: a :class:`openquake.performance.Monitor` instance :yields: SourceModel instances
374,231
def make_config_data(*, guided): config_data = {} config_data[INCLUDE_DIRS_KEY] = _make_include_dirs(guided=guided) config_data[RUNTIME_DIRS_KEY] = _make_runtime_dirs(guided=guided) config_data[RUNTIME_KEY] = _make_runtime() return config_data
Makes the data necessary to construct a functional config file
374,232
def _open_connection(self): if (self._mode == PROP_MODE_SERIAL): self._serial = serial.Serial(self._serial_device, self._serial_speed) elif (self._mode == PROP_MODE_TCP): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.connect((self._ip, self._port)) elif (self._mode == PROP_MODE_FILE): self._file = open(self._file_path, "r")
Open a connection to the easyfire unit.
374,233
def add_task_status(self, name, **attrs): return TaskStatuses(self.requester).create(self.id, name, **attrs)
Add a Task status to the project and returns a :class:`TaskStatus` object. :param name: name of the :class:`TaskStatus` :param attrs: optional attributes for :class:`TaskStatus`
374,234
def decorator_with_args(func, return_original=False, target_pos=0): if sys.version_info[0] >= 3: target_name = inspect.getfullargspec(func).args[target_pos] else: target_name = inspect.getargspec(func).args[target_pos] @functools.wraps(func) def wrapper(*args, **kwargs): if len(args) > target_pos: res = func(*args, **kwargs) return args[target_pos] if return_original else res elif len(args) <= 0 and target_name in kwargs: res = func(*args, **kwargs) return kwargs[target_name] if return_original else res else: return wrap_with_args(*args, **kwargs) def wrap_with_args(*args, **kwargs): def wrapped_with_args(target): kwargs2 = dict() kwargs2[target_name] = target kwargs2.update(kwargs) res = func(*args, **kwargs2) return target if return_original else res return wrapped_with_args return wrapper
Enable a function to work with a decorator with arguments Args: func (callable): The input function. return_original (bool): Whether the resultant decorator returns the decorating target unchanged. If True, will return the target unchanged. Otherwise, return the returned value from *func*. Default to False. This is useful for converting a non-decorator function to a decorator. See examples below. Return: callable: a decorator with arguments. Examples: >>> @decorator_with_args ... def register_plugin(plugin, arg1=1): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) ... return plugin # note register_plugin is an ordinary decorator >>> @register_plugin(arg1=10) ... def plugin1(): pass Registering plugin1 with arg1=10 >>> @decorator_with_args(return_original=True) ... def register_plugin_xx(plugin, arg1=1): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) ... # Note register_plugin_xxx does not return plugin, so it cannot ... # be used as a decorator directly before applying ... # decorator_with_args. >>> @register_plugin_xx(arg1=10) ... def plugin1(): pass Registering plugin1 with arg1=10 >>> plugin1() >>> @decorator_with_args(return_original=True) ... def register_plugin_xxx(plugin, arg1=1): pass >>> # use result decorator as a function >>> register_plugin_xxx(plugin=plugin1, arg1=10) <function plugin1...> >>> @decorator_with_args(return_original=True, target_pos=1) ... def register_plugin_xxxx(arg1, plugin, arg2=10): ... print('Registering '+plugin.__name__+' with arg1='+str(arg1)) >>> @register_plugin_xxxx(100) ... def plugin2(): pass Registering plugin2 with arg1=100
374,235
def initialize(self, length=None): if length is None: length = len(self.bounds) max_i = min((len(self.bounds) - 1, length - 1)) self._lb = array([self.bounds[min((i, max_i))][0] if self.bounds[min((i, max_i))][0] is not None else -np.Inf for i in xrange(length)], copy=False) self._ub = array([self.bounds[min((i, max_i))][1] if self.bounds[min((i, max_i))][1] is not None else np.Inf for i in xrange(length)], copy=False) lb = self._lb ub = self._ub self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20]) if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False) self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20]) if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
see ``__init__``
374,236
def GetFormatStringAttributeNames(self): if self._format_string_attribute_names is None: self._format_string_attribute_names = [] for format_string_piece in self.FORMAT_STRING_PIECES: attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall( format_string_piece) if attribute_names: self._format_string_attribute_names.extend(attribute_names) return set(self._format_string_attribute_names)
Retrieves the attribute names in the format string. Returns: set(str): attribute names.
374,237
def generate_address(self): binary_pubkey = binascii.unhexlify(self.public_key) binary_digest_sha256 = hashlib.sha256(binary_pubkey).digest() binary_digest_ripemd160 = hashlib.new(, binary_digest_sha256).digest() binary_version_byte = bytes([0]) binary_with_version_key = binary_version_byte + binary_digest_ripemd160 checksum_intermed = hashlib.sha256(binary_with_version_key).digest() checksum_intermed = hashlib.sha256(checksum_intermed).digest() checksum = checksum_intermed[:4] binary_address = binary_digest_ripemd160 + checksum leading_zero_bytes = 0 for char in binary_address: if char == 0: leading_zero_bytes += 1 inp = binary_address + checksum result = 0 while len(inp) > 0: result *= 256 result += inp[0] inp = inp[1:] result_bytes = bytes() while result > 0: curcode = [result % 58] result_bytes = bytes([ord(curcode)]) + result_bytes result //= 58 pad_size = 0 - len(result_bytes) padding_element = b if pad_size > 0: result_bytes = padding_element * pad_size + result_bytes result = .join([chr(y) for y in result_bytes]) address = * leading_zero_bytes + result return address
Creates a Bitcoin address from the public key. Details of the steps for creating the address are outlined in this link: https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses The last step is Base58Check encoding, which is similar to Base64 encoding but slightly different to create a more human-readable string where '1' and 'l' won't get confused. More on Base64Check encoding here: https://en.bitcoin.it/wiki/Base58Check_encoding
374,238
def cli(env, zone): manager = SoftLayer.DNSManager(env.client) zone_id = helpers.resolve_id(manager.resolve_ids, zone, name=) if not (env.skip_confirmations or formatting.no_going_back(zone)): raise exceptions.CLIAbort("Aborted.") manager.delete_zone(zone_id)
Delete zone.
374,239
def structure(cls): if cls.signature is NotImplemented: raise NotImplementedError("no signature defined") up = cls.cutter.elucidate() down = str(Seq(up).reverse_complement()) ovhg = cls.cutter.ovhgseq upsig, downsig = cls.signature if cls.cutter.is_5overhang(): upsite = "^{}_".format(ovhg) downsite = "_{}^".format(Seq(ovhg).reverse_complement()) else: upsite = "_{}^".format(ovhg) downsite = "^{}_".format(Seq(ovhg).reverse_complement()) if issubclass(cls, AbstractModule): return "".join( [ up.replace(upsite, "({})(".format(upsig)), "N*", down.replace(downsite, ")({})".format(downsig)), ] ) elif issubclass(cls, AbstractVector): return "".join( [ down.replace(downsite, "({})(".format(downsig)), "N*", up.replace(upsite, ")({})".format(upsig)), ] ) else: raise RuntimeError("Part must be either a module or a vector!")
Get the part structure, as a DNA regex pattern. The structure of most parts can be obtained automatically from the part signature and the restriction enzyme used in the Golden Gate assembly. Warning: If overloading this method, the returned pattern must include 3 capture groups to capture the following features: 1. The upstream (5') overhang sequence 2. The vector placeholder sequence 3. The downstream (3') overhang sequence
374,240
def map_address(self, session, map_space, map_base, map_size, access=False, suggested=None): raise NotImplementedError
Maps the specified memory space into the process's address space. Corresponds to viMapAddress function of the VISA library. :param session: Unique logical identifier to a session. :param map_space: Specifies the address space to map. (Constants.*SPACE*) :param map_base: Offset (in bytes) of the memory to be mapped. :param map_size: Amount of memory to map (in bytes). :param access: :param suggested: If not Constants.VI_NULL (0), the operating system attempts to map the memory to the address specified in suggested. There is no guarantee, however, that the memory will be mapped to that address. This operation may map the memory into an address region different from suggested. :return: address in your process space where the memory was mapped, return value of the library call. :rtype: address, :class:`pyvisa.constants.StatusCode`
374,241
def save(self, fname): try: with open(fname, "w") as f: f.write(str(self)) except Exception as ex: print( + fname + str(ex))
saves a grid to file as ASCII text
374,242
def main(args=None, vc=None, cwd=None, apply_config=False): import signal try: signal.signal(signal.SIGPIPE, signal.SIG_DFL) except AttributeError: pass try: if args is None: args = [] try: code=code, description=description)) return 0 try: try: args = parse_args(args, apply_config=apply_config) except TypeError: pass if args.from_diff: r = Radius.from_diff(args.from_diff.read(), options=args, cwd=cwd) else: r = Radius(rev=args.rev, options=args, vc=vc, cwd=cwd) except NotImplementedError as e: print(e) return 1 except CalledProcessError as c: output = c.output.splitlines()[0] print(output) return c.returncode any_changes = r.fix() if any_changes and args.error_status: return 1 return 0 except KeyboardInterrupt: return 1
PEP8 clean only the parts of the files touched since the last commit, a previous commit or branch.
374,243
def _precedence_parens(self, node, child, is_left=True): if self._should_wrap(node, child, is_left): return "(%s)" % child.accept(self) return child.accept(self)
Wrap child in parens only if required to keep same semantics
374,244
def _PrintWarningsDetails(self, storage): if not storage.HasWarnings(): self._output_writer.Write() return for index, warning in enumerate(storage.GetWarnings()): title = .format(index) table_view = views.ViewsFactory.GetTableView( self._views_format_type, title=title) table_view.AddRow([, warning.message]) table_view.AddRow([, warning.parser_chain]) path_specification = warning.path_spec.comparable for path_index, line in enumerate(path_specification.split()): if not line: continue if path_index == 0: table_view.AddRow([, line]) else: table_view.AddRow([, line]) table_view.Write(self._output_writer)
Prints the details of the warnings. Args: storage (BaseStore): storage.
374,245
def getInterfaceInAllSpeeds(interface, endpoint_list, class_descriptor_list=()): interface = getDescriptor( USBInterfaceDescriptor, bNumEndpoints=len(endpoint_list), **interface ) class_descriptor_list = list(class_descriptor_list) fs_list = [interface] + class_descriptor_list hs_list = [interface] + class_descriptor_list ss_list = [interface] + class_descriptor_list need_address = ( endpoint_list[0][].get( , 0, ) & ~ch9.USB_DIR_IN == 0 ) for index, endpoint in enumerate(endpoint_list, 1): endpoint_kw = endpoint[].copy() transfer_type = endpoint_kw[ ] & ch9.USB_ENDPOINT_XFERTYPE_MASK fs_max, hs_max, ss_max = _MAX_PACKET_SIZE_DICT[transfer_type] if need_address: endpoint_kw[] = index | ( endpoint_kw.get(, 0) & ch9.USB_DIR_IN ) klass = ( USBEndpointDescriptor if in endpoint_kw or in endpoint_kw else USBEndpointDescriptorNoAudio ) interval = endpoint_kw.pop(, _MARKER) if interval is _MARKER: fs_interval = hs_interval = 0 else: if transfer_type == ch9.USB_ENDPOINT_XFER_BULK: fs_interval = 0 hs_interval = interval else: fs_interval = max(1, min(255, round(interval))) hs_interval = max( 1, min(16, int(round(1 + math.log(interval * 8, 2)))), ) packet_size = endpoint_kw.pop(, _MARKER) if packet_size is _MARKER: fs_packet_size = fs_max hs_packet_size = hs_max ss_packet_size = ss_max else: fs_packet_size = min(fs_max, packet_size) hs_packet_size = min(hs_max, packet_size) ss_packet_size = min(ss_max, packet_size) fs_list.append(getDescriptor( klass, wMaxPacketSize=fs_max, bInterval=fs_interval, **endpoint_kw )) hs_list.append(getDescriptor( klass, wMaxPacketSize=hs_max, bInterval=hs_interval, **endpoint_kw )) ss_list.append(getDescriptor( klass, wMaxPacketSize=ss_max, bInterval=hs_interval, **endpoint_kw )) ss_companion_kw = endpoint.get(, _EMPTY_DICT) ss_list.append(getDescriptor( USBSSEPCompDescriptor, **ss_companion_kw )) ssp_iso_kw = endpoint.get(, _EMPTY_DICT) if bool(ssp_iso_kw) != ( endpoint_kw.get(, 0) & ch9.USB_ENDPOINT_XFERTYPE_MASK == ch9.USB_ENDPOINT_XFER_ISOC and bool(ch9.USB_SS_SSP_ISOC_COMP( ss_companion_kw.get(, 0), )) ): raise ValueError() if ssp_iso_kw: ss_list.append(getDescriptor( USBSSPIsocEndpointDescriptor, **ssp_iso_kw )) return (fs_list, hs_list, ss_list)
Produce similar fs, hs and ss interface and endpoints descriptors. Should be useful for devices desiring to work in all 3 speeds with maximum endpoint wMaxPacketSize. Reduces data duplication from descriptor declarations. Not intended to cover fancy combinations. interface (dict): Keyword arguments for getDescriptor(USBInterfaceDescriptor, ...) in all speeds. bNumEndpoints must not be provided. endpoint_list (list of dicts) Each dict represents an endpoint, and may contain the following items: - "endpoint": required, contains keyword arguments for getDescriptor(USBEndpointDescriptorNoAudio, ...) or getDescriptor(USBEndpointDescriptor, ...) The with-audio variant is picked when its extra fields are assigned a value. wMaxPacketSize may be missing, in which case it will be set to the maximum size for given speed and endpoint type. bmAttributes must be provided. If bEndpointAddress is zero (excluding direction bit) on the first endpoint, endpoints will be assigned their rank in this list, starting at 1. Their direction bit is preserved. If bInterval is present on a INT or ISO endpoint, it must be in millisecond units (but may not be an integer), and will be converted to the nearest integer millisecond for full-speed descriptor, and nearest possible interval for high- and super-speed descriptors. If bInterval is present on a BULK endpoint, it is set to zero on full-speed descriptor and used as provided on high- and super-speed descriptors. - "superspeed": optional, contains keyword arguments for getDescriptor(USBSSEPCompDescriptor, ...) - "superspeed_iso": optional, contains keyword arguments for getDescriptor(USBSSPIsocEndpointDescriptor, ...) Must be provided and non-empty only when endpoint is isochronous and "superspeed" dict has "bmAttributes" bit 7 set. class_descriptor (list of descriptors of any type) Descriptors to insert in all speeds between the interface descriptor and endpoint descriptors. Returns a 3-tuple of lists: - fs descriptors - hs descriptors - ss descriptors
374,246
def _multi_blockify(tuples, dtype=None): grouper = itertools.groupby(tuples, lambda x: x[2].dtype) new_blocks = [] for dtype, tup_block in grouper: values, placement = _stack_arrays(list(tup_block), dtype) block = make_block(values, placement=placement) new_blocks.append(block) return new_blocks
return an array of blocks that potentially have different dtypes
374,247
def __get_keys(self, name=, passphrase=None): path = os.path.join(self.opts[], name + ) if not os.path.exists(path): log.info(, name, self.opts[]) gen_keys(self.opts[], name, self.opts[], self.opts.get(), passphrase) if HAS_M2: key_error = RSA.RSAError else: key_error = ValueError try: key = get_rsa_key(path, passphrase) except key_error as e: message = .format(path) log.error(message) raise MasterExit(message) log.debug(, name, path) return key
Returns a key object for a key in the pki-dir
374,248
def list_provincie_adapter(obj, request): return { : obj.niscode, : obj.naam, : { : obj.gewest.id, : obj.gewest.naam } }
Adapter for rendering a list of :class:`crabpy.gateway.crab.Provincie` to json.
374,249
def _get_available_placements(D, tt): _options = [] try: for _pc in ["paleoData", "chronData"]: if _pc in D: for section_name, section_data in D[_pc].items(): if tt == "measurement": if "measurementTable" in section_data: _options.append(_get_available_placements_1(section_data["measurementTable"], section_name, "measurement")) else: if "model" in section_data: if tt == "model": _options.append(_get_available_placements_1(section_data["model"], section_name, "model")) else: for _k, _v in section_data["model"]: _tt_table = "{}Table".format(tt) if _tt_table in _v: _options.append( _get_available_placements_1(_v[_tt_table], _k, tt)) else: if not _options: sys.exit("Error: No available positions found to place new data. Something went wrong.") return _options
Called from: _prompt_placement() Get a list of possible places that we can put the new model data into. If no model exists yet, we'll use something like chron0model0. If other models exist, we'll go for the n+1 entry. ex: chron0model0 already exists, so we'll look to chron0model1 next. :param dict D: Metadata :param str tt: Table Type :return list _options: Possible placements
374,250
def matrix_to_gl(matrix): matrix = np.asanyarray(matrix, dtype=np.float64) if matrix.shape != (4, 4): raise ValueError() column = matrix.T.flatten() glmatrix = (gl.GLfloat * 16)(*column) return glmatrix
Convert a numpy row- major homogenous transformation matrix to a flat column- major GLfloat transformation. Parameters ------------- matrix : (4,4) float Row- major homogenous transform Returns ------------- glmatrix : (16,) gl.GLfloat Transform in pyglet format
374,251
def last(self, values, axis=0): values = np.asarray(values) return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
return values at last occurance of its associated key Parameters ---------- values : array_like, [keys, ...] values to pick the last value of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
374,252
def isLoopback (self, ifname): if ifname.startswith(): return True return (self.getFlags(ifname) & self.IFF_LOOPBACK) != 0
Check whether interface is a loopback device. @param ifname: interface name @type ifname: string
374,253
def fftconvolve(in1, in2, mode="full", axis=None): s1 = np.array(in1.shape) s2 = np.array(in2.shape) complex_result = (np.issubdtype(in1.dtype, np.complexfloating) or np.issubdtype(in2.dtype, np.complexfloating)) if axis is None: size = s1 + s2 - 1 fslice = tuple([slice(0, int(sz)) for sz in size]) else: equal_shapes = s1 == s2 equal_shapes[axis] = True assert equal_shapes.all(), size = s1[axis] + s2[axis] - 1 fslice = [slice(l) for l in s1] fslice[axis] = slice(0, int(size)) fslice = tuple(fslice) fsize = 2 ** int(np.ceil(np.log2(size))) if axis is None: IN1 = fftpack.fftn(in1, fsize) IN1 *= fftpack.fftn(in2, fsize) ret = fftpack.ifftn(IN1)[fslice].copy() else: IN1 = fftpack.fft(in1, fsize, axis=axis) IN1 *= fftpack.fft(in2, fsize, axis=axis) ret = fftpack.ifft(IN1, axis=axis)[fslice].copy() del IN1 if not complex_result: ret = ret.real if mode == "full": return ret elif mode == "same": if np.product(s1, axis=0) > np.product(s2, axis=0): osize = s1 else: osize = s2 return signaltools._centered(ret, osize) elif mode == "valid": return signaltools._centered(ret, abs(s2 - s1) + 1)
Convolve two N-dimensional arrays using FFT. See convolve. This is a fix of scipy.signal.fftconvolve, adding an axis argument and importing locally the stuff only needed for this function
374,254
def add_port_profile_to_delete_table(self, profile_name, device_id): if not self.has_port_profile_to_delete(profile_name, device_id): port_profile = ucsm_model.PortProfileDelete( profile_id=profile_name, device_id=device_id) with self.session.begin(subtransactions=True): self.session.add(port_profile) return port_profile
Adds a port profile to the delete table.
374,255
def _find_line_start_index(self, index): indexes = self._line_start_indexes pos = bisect.bisect_right(indexes, index) - 1 return pos, indexes[pos]
For the index of a character at a certain line, calculate the index of the first character on that line. Return (row, index) tuple.
374,256
def submitted_projects(raw_df): df = raw_df.astype({: str, : str}) submitted_projects = df.groupby()[ ].agg([, ]) submitted_projects.columns = [, ] return submitted_projects
Return all submitted projects.
374,257
def _get_errors(self): errors = self.json.get().get() if errors: logger.error(errors) return errors
Gets errors from HTTP response
374,258
def list(region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) trails = conn.describe_trails() if not bool(trails.get()): log.warning() return {: trails.get(, [])} except ClientError as e: return {: __utils__[](e)}
List all trails Returns list of trails CLI Example: .. code-block:: yaml policies: - {...} - {...}
374,259
def wrapper__unignore(self, type_): if type_ in self.__exclusion_list: self.__exclusion_list.remove( type_ ) return self.__exclusion_list
Stop selectively ignoring certain types when wrapping attributes. :param class type: The class/type definition to stop ignoring. :rtype list(type): The current list of ignored types
374,260
def find_substring_edge(self, substring, suffix_tree_id): suffix_tree = self.suffix_tree_repo[suffix_tree_id] started = datetime.datetime.now() edge, ln = find_substring_edge(substring=substring, suffix_tree=suffix_tree, edge_repo=self.edge_repo) print(" - searched for edge in {} for substring: ".format(datetime.datetime.now() - started, substring)) return edge, ln
Returns an edge that matches the given substring.
374,261
def get_series(self, series): if series == "acs1": return self.census.acs1dp elif series == "acs5": return self.census.acs5 elif series == "sf1": return self.census.sf1 elif series == "sf3": return self.census.sf3 else: return None
Returns a census series API handler.
374,262
def parts(xs, number = None, length = None): if number is not None and type(number) is not int: raise PartsError("Number of parts must be an integer.") if length is not None: if type(length) is not int: if type(length) is not list or (not all([type(l) is int for l in length])): raise PartsError("Length parameter must be an integer or list of integers.") if number is not None and length is None: number = max(1, min(len(xs), number)) length = len(xs) // number i = 0 while number > 0 and i < len(xs): number -= 1 if number == 0: yield xs[i:] break else: yield xs[i:i + length] i += length length = (len(xs) - i) // number elif number is None and length is not None: if type(length) is int: length = max(1, length) for i in range(0, len(xs), length): yield xs[i:i + length] else: xs_index = 0 len_index = 0 while xs_index < len(xs): if xs_index + length[len_index] <= len(xs): yield xs[xs_index:xs_index + length[len_index]] xs_index += length[len_index] len_index += 1 else: raise PartsError("Cannot return part of requested length; list too short.") elif number is not None and length is not None: if type(length) is int: if length * number != len(xs): raise PartsError("List cannot be split into " + str(number) + " parts each of length " + str(length) + ".") length = max(1, length) for i in range(0, len(xs), length): yield xs[i:i + length] else: if len(length) == number: xs_index = 0 len_index = 0 while xs_index < len(xs): if xs_index + length[len_index] <= len(xs): yield xs[xs_index:xs_index + length[len_index]] xs_index += length[len_index] len_index += 1 else: raise PartsError("Cannot return part of requested length; list too short.") else: raise PartsError("Number of parts does not match number of part lengths specified in input.") else: raise PartsError("Must specify number of parts or length of each part.")
Split a list into either the specified number of parts or a number of parts each of the specified length. The elements are distributed somewhat evenly among the parts if possible. >>> list(parts([1,2,3,4,5,6,7], length=1)) [[1], [2], [3], [4], [5], [6], [7]] >>> list(parts([1,2,3,4,5,6,7], length=2)) [[1, 2], [3, 4], [5, 6], [7]] >>> list(parts([1,2,3,4,5,6,7], length=3)) [[1, 2, 3], [4, 5, 6], [7]] >>> list(parts([1,2,3,4,5,6,7], length=4)) [[1, 2, 3, 4], [5, 6, 7]] >>> list(parts([1,2,3,4,5,6,7], length=5)) [[1, 2, 3, 4, 5], [6, 7]] >>> list(parts([1,2,3,4,5,6,7], length=6)) [[1, 2, 3, 4, 5, 6], [7]] >>> list(parts([1,2,3,4,5,6,7], length=7)) [[1, 2, 3, 4, 5, 6, 7]] >>> list(parts([1,2,3,4,5,6,7], 1)) [[1, 2, 3, 4, 5, 6, 7]] >>> list(parts([1,2,3,4,5,6,7], 2)) [[1, 2, 3], [4, 5, 6, 7]] >>> list(parts([1,2,3,4,5,6,7], 3)) [[1, 2], [3, 4], [5, 6, 7]] >>> list(parts([1,2,3,4,5,6,7], 4)) [[1], [2, 3], [4, 5], [6, 7]] >>> list(parts([1,2,3,4,5,6,7], 5)) [[1], [2], [3], [4, 5], [6, 7]] >>> list(parts([1,2,3,4,5,6,7], 6)) [[1], [2], [3], [4], [5], [6, 7]] >>> list(parts([1,2,3,4,5,6,7], 7)) [[1], [2], [3], [4], [5], [6], [7]] >>> list(parts([1,2,3,4,5,6,7], 7, [1,1,1,1,1,1,1])) [[1], [2], [3], [4], [5], [6], [7]] >>> list(parts([1,2,3,4,5,6,7], length=[1,1,1,1,1,1,1])) [[1], [2], [3], [4], [5], [6], [7]] >>> list(parts([1,2,3,4,5,6], length=[2,2,2])) [[1, 2], [3, 4], [5, 6]] >>> list(parts([1,2,3,4,5,6], length=[1,2,3])) [[1], [2, 3], [4, 5, 6]] >>> list(parts([1,2,3,4,5,6], 2, 3)) [[1, 2, 3], [4, 5, 6]] >>> list(parts([1,2,3,4,5,6], number=3, length=2)) [[1, 2], [3, 4], [5, 6]] >>> list(parts([1,2,3,4,5,6], 2, length=[1,2,3])) Traceback (most recent call last): ... PartsError: 'Number of parts does not match number of part lengths specified in input.' >>> list(parts([1,2,3,4,5,6,7], number=3, length=2)) Traceback (most recent call last): ... PartsError: 'List cannot be split into 3 parts each of length 2.'
374,263
def unfinished_objects(self): mask = self._end_isnull if self._rbound is not None: mask = mask | (self._end > self._rbound) oids = set(self[mask]._oid.tolist()) return self[self._oid.apply(lambda oid: oid in oids)]
Leaves only versions of those objects that has some version with `_end == None` or with `_end > right cutoff`.
374,264
def _extract_battery_info_from_acpi(self): def _parse_battery_info(acpi_battery_lines): battery = {} battery["percent_charged"] = int( findall("(?<= )(\d+)(?=%)", acpi_battery_lines[0])[0] ) battery["charging"] = "Charging" in acpi_battery_lines[0] battery["capacity"] = int( findall("(?<= )(\d+)(?= mAh)", acpi_battery_lines[1])[1] ) try: battery["time_remaining"] = "".join( findall( "(?<=, )(\d+:\d+:\d+)(?= remaining)|" "(?<=, )(\d+:\d+:\d+)(?= until)", acpi_battery_lines[0], )[0] ) except IndexError: battery["time_remaining"] = FULLY_CHARGED return battery acpi_list = self.py3.command_output(["acpi", "-b", "-i"]).splitlines() acpi_list = [acpi_list[i : i + 2] for i in range(0, len(acpi_list) - 1, 2)] return [_parse_battery_info(battery) for battery in acpi_list]
Get the battery info from acpi # Example acpi -bi raw output (Discharging): Battery 0: Discharging, 94%, 09:23:28 remaining Battery 0: design capacity 5703 mAh, last full capacity 5283 mAh = 92% Battery 1: Unknown, 98% Battery 1: design capacity 1880 mAh, last full capacity 1370 mAh = 72% # Example Charging Battery 0: Charging, 96%, 00:20:40 until charged Battery 0: design capacity 5566 mAh, last full capacity 5156 mAh = 92% Battery 1: Unknown, 98% Battery 1: design capacity 1879 mAh, last full capacity 1370 mAh = 72%
374,265
def assert_close(a, b, rtol=1e-07, atol=0, context=None): if isinstance(a, float) or isinstance(a, numpy.ndarray) and a.shape: numpy.testing.assert_allclose(a, b, rtol, atol) return if isinstance(a, (str, bytes, int)): assert a == b, (a, b) return if hasattr(a, ): assert a._slots_ == b._slots_ for x in a._slots_: assert_close(getattr(a, x), getattr(b, x), rtol, atol, x) return if hasattr(a, ): assert a.keys() == b.keys() for x in a: if x != : assert_close(a[x], b[x], rtol, atol, x) return if hasattr(a, ): assert_close(vars(a), vars(b), context=a) return if hasattr(a, ): xs, ys = list(a), list(b) assert len(xs) == len(ys), ( % (len(xs), len(ys))) for x, y in zip(xs, ys): assert_close(x, y, rtol, atol, x) return if a == b: return ctx = if context is None else + repr(context) raise AssertionError( % (a, b, ctx))
Compare for equality up to a given precision two composite objects which may contain floats. NB: if the objects are or contain generators, they are exhausted. :param a: an object :param b: another object :param rtol: relative tolerance :param atol: absolute tolerance
374,266
def _parse_sigmak(line, lines): split_line = line.split() energy = float(split_line[0]) re_sigma_xx = float(split_line[1]) im_sigma_xx = float(split_line[2]) re_sigma_zz = float(split_line[3]) im_sigma_zz = float(split_line[4]) return {"energy": energy, "re_sigma_xx": re_sigma_xx, "im_sigma_xx": im_sigma_xx, "re_sigma_zz": re_sigma_zz, "im_sigma_zz": im_sigma_zz}
Parse Energy, Re sigma xx, Im sigma xx, Re sigma zz, Im sigma zz
374,267
def build_specfile_filesection(spec, files): str = if not in spec: spec[] = str = str + % spec[] supported_tags = { : , : , : , : , : , : , : , : , : , } for file in files: tags = {} for k in list(supported_tags.keys()): try: v = file.GetTag(k) if v: tags[k] = v except AttributeError: pass str = str + SimpleTagCompiler(supported_tags, mandatory=0).compile( tags ) str = str + str = str + file.GetTag() str = str + return str
builds the %file section of the specfile
374,268
def get_content_type(self): ctype = self.part.get_content_type() if ctype in [, , ]: ctype = guess_mimetype(self.get_data()) return ctype
mime type of the attachment part
374,269
def compress(func): def wrapper(*args, **kwargs): ret = func(*args, **kwargs) logger.debug(.format( request.method, request.url, [.format(h, request.headers.get(h)) for h in request.headers.keys()] )) if in request.headers.get(, ): response.headers[] = ret = deflate_compress(ret) else: response.headers[] = return ret def deflate_compress(data, compress_level=6): zobj = zlib.compressobj(compress_level, zlib.DEFLATED, zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, zlib.Z_DEFAULT_STRATEGY) return zobj.compress(b(data)) + zobj.flush() return wrapper
Compress result with deflate algorithm if the client ask for it.
374,270
def href(*args, **kw): result = [(request.script_root if request else "") + "/"] for idx, arg in enumerate(args): result.append(("/" if idx else "") + url_quote(arg)) if kw: result.append("?" + url_encode(kw)) return "".join(result)
Simple function for URL generation. Position arguments are used for the URL path and keyword arguments are used for the url parameters.
374,271
def audits(self, ticket=None, include=None, **kwargs): if ticket is not None: return self._query_zendesk(self.endpoint.audits, , id=ticket, include=include) else: return self._query_zendesk(self.endpoint.audits.cursor, , include=include, **kwargs)
Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit. If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator works is a different to the other Zenpy generators as it is cursor based, allowing you to change the direction that you are consuming objects. This is done with the reversed() python method. For example: .. code-block:: python for audit in reversed(zenpy_client.tickets.audits()): print(audit) See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for information on additional parameters. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param ticket: Ticket object or id
374,272
def get_gpd_line(self,transcript_name=None,gene_name=None,direction=None): return transcript_to_gpd_line(self,transcript_name=transcript_name,gene_name=gene_name,direction=direction)
Get the genpred format string representation of the mapping
374,273
def load_umatrix(self, filename): self.umatrix = np.loadtxt(filename, comments=) if self.umatrix.shape != (self._n_rows, self._n_columns): raise Exception("The dimensions of the U-matrix do not " "match that of the map")
Load the umatrix from a file to the Somoclu object. :param filename: The name of the file. :type filename: str.
374,274
def better_print(self, printer=None): printer = printer or pprint.pprint printer(self.value)
Print the value using a *printer*. :param printer: Callable used to print the value, by default: :func:`pprint.pprint`
374,275
def _send_reliable_message(self, msg): result = False max_retries = 15 trans_id = next(LWLink.transaction_id) msg = "%d,%s" % (trans_id, msg) try: with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \ as write_sock, \ socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \ as read_sock: write_sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) read_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) read_sock.settimeout(self.SOCKET_TIMEOUT) read_sock.bind((, self.RX_PORT)) while max_retries: max_retries -= 1 write_sock.sendto(msg.encode( ), (LWLink.link_ip, self.TX_PORT)) result = False while True: response, dummy = read_sock.recvfrom(1024) response = response.decode() if "Not yet registered." in response: _LOGGER.error("Not yet registered") self.register() result = True break if response.startswith("%d,OK" % trans_id): result = True break if response.startswith("%d,ERR" % trans_id): _LOGGER.error(response) break _LOGGER.info(response) if result: break time.sleep(0.25) except socket.timeout: _LOGGER.error("LW broker timeout!") return result except Exception as ex: _LOGGER.error(ex) raise if result: _LOGGER.info("LW broker OK!") else: _LOGGER.error("LW broker fail!") return result
Send msg to LightwaveRF hub.
374,276
def delete_node_1ton(node_list, begin, node, end): if end is None: assert end is not None end = node.successor elif not isinstance(end, list): end = [end] if any(e_.in_or_out for e_ in end): begin.out_redirect(node.single_input, node.single_output) else: for ne_ in end: target_var_name = node.single_input assert target_var_name in begin.output.values() ne_.in_redirect(node.single_output, target_var_name) begin.successor = [v_ for v_ in begin.successor if v_ != node] + node.successor for ne_ in end: ne_.precedence = [begin if v_ == node else v_ for v_ in ne_.precedence] node_list.remove(node) return node_list
delete the node which has 1-input and n-output
374,277
def ticket_fields(self): if self.api and self.ticket_field_ids: return self.api._get_ticket_fields(self.ticket_field_ids)
| Comment: ids of all ticket fields which are in this ticket form
374,278
def delete_cookie(self, key, path=, domain=None): self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
Delete a cookie. Fails silently if key doesn't exist. :param key: the key (name) of the cookie to be deleted. :param path: if the cookie that should be deleted was limited to a path, the path has to be defined here. :param domain: if the cookie that should be deleted was limited to a domain, that domain has to be defined here.
374,279
def tf_import_demo_experience(self, states, internals, actions, terminal, reward): return self.demo_memory.store( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
Imports a single experience to memory.
374,280
def matrixfromDicts(dicts): if in str(type(dicts)): return dicts names=set([]) dicts=dictFlat(dicts) for item in dicts: names=names.union(list(item.keys())) names=sorted(list(names)) data=np.empty((len(dicts),len(names)),dtype=float)*np.nan for y in range(len(dicts)): for key in dicts[y].keys(): for x in range(len(names)): if names[x] in dicts[y]: data[y,x]=dicts[y][names[x]] if len(dicts): data=np.core.records.fromarrays(data.transpose(),names=names) return data
Give a list of dicts (or list of list of dicts) return a structured array. Headings will be sorted in alphabetical order.
374,281
def _schema_to_json_file_object(self, schema_list, file_obj): json.dump(schema_list, file_obj, indent=2, sort_keys=True)
Helper function for schema_to_json that takes a schema list and file object and writes the schema list to the file object with json.dump
374,282
def initFilter(input, filterInfo = None): if filterInfo is None: return None filterList = [] for i, fieldName in enumerate(input.getFieldNames()): fieldFilter = filterInfo.get(fieldName, None) if fieldFilter == None: continue var = dict() var[] = None min = fieldFilter.get(, None) max = fieldFilter.get(, None) var[] = min var[] = max if fieldFilter[] == : var[] = fieldFilter[] fp = lambda x: (x[] != SENTINEL_VALUE_FOR_MISSING_DATA and \ x[] in x[]) elif fieldFilter[] == : if min != None and max != None: fp = lambda x: (x[] != SENTINEL_VALUE_FOR_MISSING_DATA and \ x[] >= x[] and x[] <= x[]) elif min != None: fp = lambda x: (x[] != SENTINEL_VALUE_FOR_MISSING_DATA and \ x[] >= x[]) else: fp = lambda x: (x[] != SENTINEL_VALUE_FOR_MISSING_DATA and \ x[] <= x[]) filterList.append((i, fp, var)) return (_filterRecord, filterList)
Initializes internal filter variables for further processing. Returns a tuple (function to call,parameters for the filter call) The filterInfo is a dict. Here is an example structure: {fieldName: {'min': x, 'max': y, 'type': 'category', # or 'number' 'acceptValues': ['foo', 'bar'], } } This returns the following: (filterFunc, ((fieldIdx, fieldFilterFunc, filterDict), ...) Where fieldIdx is the index of the field within each record fieldFilterFunc returns True if the value is "OK" (within min, max or part of acceptValues) fieldDict is a dict containing 'type', 'min', max', 'acceptValues'
374,283
def get(self, date, page_no=1, page_size=40, fields=[]): request = TOPRequest() request[] = date request[] = page_no request[] = page_size if not fields: fields = self.fields request[] = fields self.create(self.execute(request)[]) return self
taobao.taobaoke.report.get 淘宝客报表查询 淘宝客报表查询
374,284
def orientation(point_p, point_q, point_r): r = ((point_q.y - point_p.y) * (point_r.x - point_q.x) - (point_q.x - point_p.x) * (point_r.y - point_q.y)) if r == 0: return 0 return 1 if r > 0 else 2
To find orientation of ordered triplet (p, q, r). :param point_p: :type point_p: models.Point :param point_q: :type point_q: models.Point :param point_r: :type point_r: models.Point :return: 0: p, q and r are colinear 1: clockwise 2: counterclockwise :rtype: int
374,285
def yield_module_imports(root, checks=string_imports()): if not isinstance(root, asttypes.Node): raise TypeError() for child in yield_function(root, deep_filter): for f, condition in checks: if condition(child): for name in f(child): yield name continue
Gather all require and define calls from unbundled JavaScript source files and yield all module names. The imports can either be of the CommonJS or AMD syntax.
374,286
def tree(self): bs = self.model.branchScale for node in self._tree.find_clades(): if node != self._tree.root: node.branch_length = self.t[self.name_to_nodeindex[node]] * bs return self._tree
Tree with branch lengths in codon substitutions per site. The tree is a `Bio.Phylo.BaseTree.Tree` object. This is the current tree after whatever optimizations have been performed so far.
374,287
def expand(self, line, do_expand, force=False, vislevels=0, level=-1): lastchild = self.GetLastChild(line, level) line += 1 while line <= lastchild: if force: if vislevels > 0: self.ShowLines(line, line) else: self.HideLines(line, line) elif do_expand: self.ShowLines(line, line) if level == -1: level = self.GetFoldLevel(line) if level & stc.STC_FOLDLEVELHEADERFLAG: if force: self.SetFoldExpanded(line, vislevels - 1) line = self.expand(line, do_expand, force, vislevels - 1) else: expandsub = do_expand and self.GetFoldExpanded(line) line = self.expand(line, expandsub, force, vislevels - 1) else: line += 1 return line
Multi-purpose expand method from original STC class
374,288
def _GetMessage(self, message_file_key, lcid, message_identifier): table_name = .format(message_file_key, lcid) has_table = self._database_file.HasTable(table_name) if not has_table: return None column_names = [] condition = .format(message_identifier) values = list(self._database_file.GetValues( [table_name], column_names, condition)) number_of_values = len(values) if number_of_values == 0: return None if number_of_values == 1: return values[0][] raise RuntimeError()
Retrieves a specific message from a specific message table. Args: message_file_key (int): message file key. lcid (int): language code identifier (LCID). message_identifier (int): message identifier. Returns: str: message string or None if not available. Raises: RuntimeError: if more than one value is found in the database.
374,289
def police_priority_map_exceed_map_pri2_exceed(self, **kwargs): config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name_key = ET.SubElement(police_priority_map, "name") name_key.text = kwargs.pop() exceed = ET.SubElement(police_priority_map, "exceed") map_pri2_exceed = ET.SubElement(exceed, "map-pri2-exceed") map_pri2_exceed.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
374,290
def scale_rows_by_largest_entry(S): if not isspmatrix_csr(S): raise TypeError() largest_row_entry = np.zeros((S.shape[0],), dtype=S.dtype) pyamg.amg_core.maximum_row_value(S.shape[0], largest_row_entry, S.indptr, S.indices, S.data) largest_row_entry[largest_row_entry != 0] =\ 1.0 / largest_row_entry[largest_row_entry != 0] S = scale_rows(S, largest_row_entry, copy=True) return S
Scale each row in S by it's largest in magnitude entry. Parameters ---------- S : csr_matrix Returns ------- S : csr_matrix Each row has been scaled by it's largest in magnitude entry Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.util.utils import scale_rows_by_largest_entry >>> A = poisson( (4,), format='csr' ) >>> A.data[1] = 5.0 >>> A = scale_rows_by_largest_entry(A) >>> A.todense() matrix([[ 0.4, 1. , 0. , 0. ], [-0.5, 1. , -0.5, 0. ], [ 0. , -0.5, 1. , -0.5], [ 0. , 0. , -0.5, 1. ]])
374,291
def group(self, base_dn, samaccountname, attributes=(), explicit_membership_only=False): groups = self.groups(base_dn, samaccountnames=[samaccountname], attributes=attributes, explicit_membership_only=explicit_membership_only) try: return groups[0] except IndexError: logging.info("%s - unable to retrieve object from AD by sAMAccountName", samaccountname)
Produces a single, populated ADGroup object through the object factory. Does not populate attributes for the caller instance. sAMAccountName may not be present in group objects in modern AD schemas. Searching by common name and object class (group) may be an alternative approach if required in the future. :param str base_dn: The base DN to search within :param str samaccountname: The group's sAMAccountName :param list attributes: Object attributes to populate, defaults to all :return: A populated ADGroup object :rtype: ADGroup
374,292
def to_cloudformation(self, **kwargs): function = kwargs.get() if not function: raise TypeError("Missing required keyword argument: function") return [self._construct_permission(function, source_arn=self.Topic), self._inject_subscription(function, self.Topic, self.FilterPolicy)]
Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers. :param dict kwargs: no existing resources need to be modified :returns: a list of vanilla CloudFormation Resources, to which this SNS event expands :rtype: list
374,293
def normalize_layout(layout, min_percentile=1, max_percentile=99, relative_margin=0.1): mins = np.percentile(layout, min_percentile, axis=(0)) maxs = np.percentile(layout, max_percentile, axis=(0)) mins -= relative_margin * (maxs - mins) maxs += relative_margin * (maxs - mins) clipped = np.clip(layout, mins, maxs) clipped -= clipped.min(axis=0) clipped /= clipped.max(axis=0) return clipped
Removes outliers and scales layout to between [0,1].
374,294
def is_friend(self): r = self.relationship if r is None: return False return r.type is RelationshipType.friend
:class:`bool`: Checks if the user is your friend. .. note:: This only applies to non-bot accounts.
374,295
def num2tamilstr_american( *args ): number = args[0] if not any( filter( lambda T: isinstance( number, T), [int, str, unicode, long, float]) ) or isinstance(number,complex): raise Exception() if float(number) >= long(1e15): raise Exception() if float(number) < 0: return u"- "+num2tamilstr_american( -float(number) ) units = (u, u, u, u, u, u, u, u, u, u, u) hundreds = ( u, u, u, u,u, u, u, u, u) one_thousand_prefix = u thousands = (u,u) one_prefix = u mil = u million = (mil,mil) bil = u billion = (bil,bil) tril = u trillion = (tril,tril) n_one = 1 n_ten = 10 n_hundred = 100 n_thousand = 1000 n_million = 1000*n_thousand n_billion = long(1000*n_million) n_trillion = long(1000*n_billion) suffix_base = { n_trillion: trillion, n_billion : billion, n_million : million, n_thousand : thousands} num_map = {n_trillion : [one_prefix,trillion[0]], n_billion : [one_prefix,billion[0]], n_million : [one_prefix,million[0]], n_thousand : [one_thousand_prefix, thousands[0]], n_hundred : [hundreds[0]], n_ten : [units[10]], n_one : [units[1]]} all_bases = [n_trillion,n_billion, n_million, n_thousand, n_hundred, n_ten,n_one] allowed_bases = list(filter( lambda base: float(number) >= base, all_bases )) if float(number) > 0.0 and float(number) <= 1000.0: return num2tamilstr(number) if isinstance(number,str) or isinstance(number,unicode): result = u"" number = number.strip() assert(len(args) == 1) assert(len(number) > 0) is_negative = number[0] == "-" if is_negative: number = number[1:] frac_part = u"" if number.find(".") >= 0: rat_part,frac_part = number.split(".") frac_part = num2tamilstr_american(u"0."+frac_part) else: rat_part = number if len(rat_part) > 0: result = num2tamilstr_american(float(rat_part)) result = result +u" "+ frac_part return result.strip() if len(allowed_bases) >= 1: n_base = allowed_bases[0] if number == n_base: return u" ".join(num_map[n_base]) quotient_number = long( number/n_base ) residue_number = number - n_base*quotient_number if n_base < n_thousand: raise Exception("This can never happen") else: if ( quotient_number == 1 ): if n_base == n_thousand: numeral = one_thousand_prefix+u else: numeral = one_prefix+u else: numeral = num2tamilstr( quotient_number ) if n_base >= n_thousand: suffix = suffix_base[n_base][long(residue_number >= 1)] if residue_number == 0: return numeral + u + suffix numeral = numeral + u + suffix residue_numeral = num2tamilstr_american( residue_number ) return numeral+u+residue_numeral return units[0]
work till 1000 trillion - 1 - i.e = 1e12*1e3 - 1. turn number into a numeral, American style. Fractions upto 1e-30.
374,296
def login(request, template_name=, redirect_field_name=REDIRECT_FIELD_NAME, authentication_form=AuthenticationForm): redirect_to = request.POST.get(redirect_field_name, request.GET.get(redirect_field_name, )) if request.method == "POST": form = authentication_form(request, data=request.POST) if form.is_valid(): if not is_safe_url(url=redirect_to, host=request.get_host()): redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL) user = form.get_user() request.session[] = user["token"] request.session[] = user["email"] request.session[] = user["permissions"] request.session[] = user["id"] request.session[] = user["user_list"] if not settings.HIDE_DASHBOARDS: dashboards = ciApi.get_user_dashboards(user["id"]) dashboard_list = list(dashboards[]) if len(dashboard_list) > 0: request.session[] = \ dashboard_list[0]["dashboards"] request.session[] = \ dashboard_list[0]["default_dashboard"]["id"] else: request.session[] = [] request.session[] = None tokens = ciApi.get_user_service_tokens( params={"user_id": user["id"]}) token_list = list(tokens[]) user_tokens = {} if len(token_list) > 0: for token in token_list: user_tokens[token["service"]["name"]] = { "token": token["token"], "url": token["service"]["url"] + "/api/v1" } request.session[] = user_tokens return HttpResponseRedirect(redirect_to) else: form = authentication_form(request) current_site = get_current_site(request) context = { : form, redirect_field_name: redirect_to, : current_site, : current_site.name, } return TemplateResponse(request, template_name, context)
Displays the login form and handles the login action.
374,297
def collapse(dataframe, groupe, var): grouped = dataframe.groupby([groupe]) var_weighted_grouped = grouped.apply(lambda x: wavg(groupe = x, var = var)) return var_weighted_grouped
Pour une variable, fonction qui calcule la moyenne pondérée au sein de chaque groupe.
374,298
def __get_rectangle_description(block, pair): max_corner, min_corner = block.get_spatial_block().get_corners() max_corner = [max_corner[pair[0]], max_corner[pair[1]]] min_corner = [min_corner[pair[0]], min_corner[pair[1]]] if pair == (0, 0): max_corner[1], min_corner[1] = 1.0, -1.0 return max_corner, min_corner
! @brief Create rectangle description for block in specific dimension. @param[in] pair (tuple): Pair of coordinate index that should be displayed. @param[in] block (bang_block): BANG-block that should be displayed @return (tuple) Pair of corners that describes rectangle.
374,299
def lookup(self, allowed_types, **kwargs): at = getToolByName(self, ) for portal_type in allowed_types: catalog = at.catalog_map.get(portal_type, [None])[0] catalog = getToolByName(self, catalog) kwargs[] = portal_type brains = catalog(**kwargs) if brains: return brains
Lookup an object of type (allowed_types). kwargs is sent directly to the catalog.