code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def add_x10_device(self, housecode, unitcode, dev_type): device = None try: device = self.plm.devices.add_x10_device(self.plm, housecode, unitcode, dev_type) except ValueError: pass return device
Add an X10 device to the PLM.
def update_policy(self,defaultHeaders): if self.inputs is not None: for k,v in defaultHeaders.items(): if k not in self.inputs: self.inputs[k] = v if k == 'pins': self.inputs[k] = self.inputs[k] + defaultHeaders[k] return self.inputs else: return self.inputs
rewrite update policy so that additional pins are added and not overwritten
def to_time(value, ctx): if isinstance(value, str): time = ctx.get_date_parser().time(value) if time is not None: return time elif isinstance(value, datetime.time): return value elif isinstance(value, datetime.datetime): return value.astimezone(ctx.timezone).time() raise EvaluationError("Can't convert '%s' to a time" % str(value))
Tries conversion of any value to a time
def convert_sed_cols(tab): for colname in list(tab.columns.keys()): newname = colname.lower() newname = newname.replace('dfde', 'dnde') if tab.columns[colname].name == newname: continue tab.columns[colname].name = newname return tab
Cast SED column names to lowercase.
def _clean_files_only(self, files): try: if not (os.path.exists(self.origin_path)): self.logger.info("Creating Origin Path - %s" % self.origin_path) os.makedirs(self.origin_path) if not (os.path.exists(self.dir_path)): self.logger.info("Creating Directory Path - %s" % self.dir_path) os.makedirs(self.dir_path) self._add_extra_files(files) except OSError as e: if e.errno == errno.EEXIST: pass else: self.logger.exception(e) raise e except Exception as e: self.logger.exception(e) raise Exception("CleanFilesOnlyError: unable to process")
if a user only wants to process one or more specific files, instead of a full sosreport
def cosine_sim(vec1, vec2): vec1 = [val for val in vec1.values()] vec2 = [val for val in vec2.values()] dot_prod = 0 for i, v in enumerate(vec1): dot_prod += v * vec2[i] mag_1 = math.sqrt(sum([x**2 for x in vec1])) mag_2 = math.sqrt(sum([x**2 for x in vec2])) return dot_prod / (mag_1 * mag_2)
Since our vectors are dictionaries, lets convert them to lists for easier mathing.
def scaleY(self, y): 'returns plotter y coordinate' return round(self.plotviewBox.ymin+(y-self.visibleBox.ymin)*self.yScaler)
returns plotter y coordinate
def resume_all(self): for alias, service in self._service_objects.items(): with expects.expect_no_raises( 'Failed to pause service "%s".' % alias): service.resume()
Resumes all service instances.
def _create_wcs (fitsheader): wcsmodule = _load_wcs_module () is_pywcs = hasattr (wcsmodule, 'UnitConverter') wcs = wcsmodule.WCS (fitsheader) wcs.wcs.set () wcs.wcs.fix () if hasattr (wcs, 'wcs_pix2sky'): wcs.wcs_pix2world = wcs.wcs_pix2sky wcs.wcs_world2pix = wcs.wcs_sky2pix return wcs
For compatibility between astropy and pywcs.
def cancel(self): self.event.clear() if self.__timer is not None: self.__timer.cancel()
stops the timer. call_back function is not called
def _close(self): if hasattr(self, 'aiohttp'): if not self.aiohttp.closed: self.aiohttp.close() if hasattr(self, 'file_descriptors'): for fd in self.file_descriptors.values(): if not fd.closed: fd.close()
Closes aiohttp session and all open file descriptors
def on_copy_remote(self, pair): status = pair.local_classification self._log_action("copy", status, "<", pair.remote)
Called when the remote resource should be copied to local.
def _get_slice_axis(self, slice_obj, axis=None): if axis is None: axis = self.axis or 0 obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step, kind=self.name) if isinstance(indexer, slice): return self._slice(indexer, axis=axis, kind='iloc') else: return self.obj._take(indexer, axis=axis)
this is pretty simple as we just have to deal with labels
def _infer_binop(self, context): left = self.left right = self.right context = context or contextmod.InferenceContext() lhs_context = contextmod.copy_context(context) rhs_context = contextmod.copy_context(context) lhs_iter = left.infer(context=lhs_context) rhs_iter = right.infer(context=rhs_context) for lhs, rhs in itertools.product(lhs_iter, rhs_iter): if any(value is util.Uninferable for value in (rhs, lhs)): yield util.Uninferable return try: yield from _infer_binary_operation(lhs, rhs, self, context, _get_binop_flow) except exceptions._NonDeducibleTypeHierarchy: yield util.Uninferable
Binary operation inference logic.
def read_exif_ifd(fh, byteorder, dtype, count, offsetsize): exif = read_tags(fh, byteorder, offsetsize, TIFF.EXIF_TAGS, maxifds=1) for name in ('ExifVersion', 'FlashpixVersion'): try: exif[name] = bytes2str(exif[name]) except Exception: pass if 'UserComment' in exif: idcode = exif['UserComment'][:8] try: if idcode == b'ASCII\x00\x00\x00': exif['UserComment'] = bytes2str(exif['UserComment'][8:]) elif idcode == b'UNICODE\x00': exif['UserComment'] = exif['UserComment'][8:].decode('utf-16') except Exception: pass return exif
Read EXIF tags from file and return as dict.
def _generateInitialModel(self, output_model_type): logger().info("Generating initial model for BHMM using MLHMM...") from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator mlhmm = MaximumLikelihoodEstimator(self.observations, self.nstates, reversible=self.reversible, output=output_model_type) model = mlhmm.fit() return model
Initialize using an MLHMM.
def visit_decorators(self, node, parent): newnode = nodes.Decorators(node.lineno, node.col_offset, parent) newnode.postinit([self.visit(child, newnode) for child in node.decorator_list]) return newnode
visit a Decorators node by returning a fresh instance of it
def reset(self): for shard_id in self._shards: if self._shards[shard_id].get('isReplicaSet'): singleton = ReplicaSets() elif self._shards[shard_id].get('isServer'): singleton = Servers() singleton.command(self._shards[shard_id]['_id'], 'reset') for config_id in self._configsvrs: self.configdb_singleton.command(config_id, 'reset') for router_id in self._routers: Servers().command(router_id, 'reset') return self.info()
Ensure all shards, configs, and routers are running and available.
def _get_object_as_soft(self): soft = ["^%s = %s" % (self.geotype, self.name), self._get_metadata_as_string()] return "\n".join(soft)
Get the object as SOFT formatted string.
def xmoe_2d(): hparams = xmoe_top_2() hparams.decoder_layers = ["att", "hmoe"] * 4 hparams.mesh_shape = "b0:2;b1:4" hparams.outer_batch_size = 4 hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" hparams.moe_num_experts = [4, 4] return hparams
Two-dimensional hierarchical mixture of 16 experts.
def expectation_step(t_table, stanzas, schemes, rprobs): probs = numpy.zeros((len(stanzas), schemes.num_schemes)) for i, stanza in enumerate(stanzas): scheme_indices = schemes.get_schemes_for_len(len(stanza)) for scheme_index in scheme_indices: scheme = schemes.scheme_list[scheme_index] probs[i, scheme_index] = post_prob_scheme(t_table, stanza, scheme) probs = numpy.dot(probs, numpy.diag(rprobs)) scheme_sums = numpy.sum(probs, axis=1) for i, scheme_sum in enumerate(scheme_sums.tolist()): if scheme_sum > 0: probs[i, :] /= scheme_sum return probs
Compute posterior probability of schemes for each stanza
def on_server_shutdown(self): if not self._container: return self._container.stop() self._container.remove(v=True, force=True)
Stop the container before shutting down.
def take(self, indexer, axis=1, verify=True, convert=True): self._consolidate_inplace() indexer = (np.arange(indexer.start, indexer.stop, indexer.step, dtype='int64') if isinstance(indexer, slice) else np.asanyarray(indexer, dtype='int64')) n = self.shape[axis] if convert: indexer = maybe_convert_indices(indexer, n) if verify: if ((indexer == -1) | (indexer >= n)).any(): raise Exception('Indices must be nonzero and less than ' 'the axis length') new_labels = self.axes[axis].take(indexer) return self.reindex_indexer(new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True)
Take items along any axis.
def create_grupo_usuario(self): return GrupoUsuario( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of grupo_usuario services facade.
def decode_async_options(options): async_options = copy.deepcopy(options) eta = async_options.get('task_args', {}).get('eta') if eta: from datetime import datetime async_options['task_args']['eta'] = datetime.fromtimestamp(eta) callbacks = async_options.get('callbacks', {}) if callbacks: async_options['callbacks'] = decode_callbacks(callbacks) if '__context_checker' in options: _checker = options['__context_checker'] async_options['_context_checker'] = path_to_reference(_checker) if '__process_results' in options: _processor = options['__process_results'] async_options['_process_results'] = path_to_reference(_processor) return async_options
Decode Async options from JSON decoding.
def round(self): return Point(int(round(self.x)), int(round(self.y)))
Round `x` and `y` to integers.
def _do_lumping(self): model = LandmarkAgglomerative(linkage='ward', n_clusters=self.n_macrostates, metric=self.metric, n_landmarks=self.n_landmarks, landmark_strategy=self.landmark_strategy, random_state=self.random_state) model.fit([self.transmat_]) if self.fit_only: microstate_mapping_ = model.landmark_labels_ else: microstate_mapping_ = model.transform([self.transmat_])[0] self.microstate_mapping_ = microstate_mapping_
Do the MVCA lumping.
def min_height(self) -> int: return max( len(self.content.split('\n')) if self.content else 0, int(any([self.left, self.right])) )
Minimum height necessary to render the block's contents.
def to_glyphs_family_user_data_from_ufo(self, ufo): target_user_data = self.font.userData try: for key, value in ufo.lib[FONT_USER_DATA_KEY].items(): if key not in target_user_data.keys(): target_user_data[key] = value except KeyError: pass
Set the GSFont userData from the UFO family-wide lib data.
def _find_newest_ckpt(ckpt_dir): full_paths = [ os.path.join(ckpt_dir, fname) for fname in os.listdir(ckpt_dir) if fname.startswith("experiment_state") and fname.endswith(".json") ] return max(full_paths)
Returns path to most recently modified checkpoint.
def start(self): self._process = threading.Thread(target=self._background_runner) self._process.start()
Create a background thread for httpd and serve 'forever
def load_local_plugin(name): try: module_name = '.'.join(name.split('.')[:-1]) module_obj = importlib.import_module(name=module_name) obj = getattr(module_obj, name.split('.')[-1]) return obj except (ImportError, AttributeError, ValueError) as e: raise PluginNotFoundError(e)
Import a local plugin accessible through Python path.
def first_fit(self, train_x, train_y): train_x, train_y = np.array(train_x), np.array(train_y) self._x = np.copy(train_x) self._y = np.copy(train_y) self._distance_matrix = edit_distance_matrix(self._x) k_matrix = bourgain_embedding_matrix(self._distance_matrix) k_matrix[np.diag_indices_from(k_matrix)] += self.alpha self._l_matrix = cholesky(k_matrix, lower=True) self._alpha_vector = cho_solve((self._l_matrix, True), self._y) self._first_fitted = True return self
Fit the regressor for the first time.
def frequency_app(parser, cmd, args): parser.add_argument('value', help='the value to analyse, read from stdin if omitted', nargs='?') args = parser.parse_args(args) data = frequency(six.iterbytes(pwnypack.main.binary_value_or_stdin(args.value))) return '\n'.join( '0x%02x (%c): %d' % (key, chr(key), value) if key >= 32 and chr(key) in string.printable else '0x%02x ---: %d' % (key, value) for key, value in data.items() )
perform frequency analysis on a value.
def random_date(start_year=2000, end_year=2020): return date(random.randint(start_year, end_year), random.randint(1, 12), random.randint(1, 28))
Generates a random "sensible" date for use in things like issue dates and maturities
def _cursor_position(self, data): column, line = self._get_line_and_col(data) self._move_cursor_to_line(line) self._move_cursor_to_column(column) self._last_cursor_pos = self._cursor.position()
Moves the cursor position.
def flds_firstsort(d): shape = [ len( np.unique(d[l]) ) for l in ['xs', 'ys', 'zs'] ]; si = np.lexsort((d['z'],d['y'],d['x'])); return si,shape;
Perform a lexsort and return the sort indices and shape as a tuple.
def cli(ctx, project_dir): exit_code = SCons(project_dir).sim() ctx.exit(exit_code)
Launch the verilog simulation.
def _format_explain(self): lines = [] for (command, kwargs) in self._call_list: lines.append(command + " " + pformat(kwargs)) return "\n".join(lines)
Format the results of an EXPLAIN
def _utf_strip_bom(self, encoding): if encoding is None: pass elif encoding.lower() == 'utf-8': encoding = 'utf-8-sig' elif encoding.lower().startswith('utf-16'): encoding = 'utf-16' elif encoding.lower().startswith('utf-32'): encoding = 'utf-32' return encoding
Return an encoding that will ignore the BOM.
def update_quota(self, project_id, body=None): return self.put(self.quota_path % (project_id), body=body)
Update a project's quotas.
def iter_module_paths(modules=None): modules = modules or list(sys.modules.values()) for module in modules: try: filename = module.__file__ except (AttributeError, ImportError): continue if filename is not None: abs_filename = os.path.abspath(filename) if os.path.isfile(abs_filename): yield abs_filename
Yield paths of all imported modules.
def snake_to_pascal(snake_str): components = snake_str.split('_') if len(components) > 1: camel = ''.join(x.title() for x in components) return camel return snake_str
Convert `snake_str` from snake_case to PascalCase
def hosting_devices_unassigned_from_cfg_agent(self, context, payload): try: if payload['hosting_device_ids']: pass except KeyError as e: LOG.error("Invalid payload format for received RPC message " "`hosting_devices_unassigned_from_cfg_agent`. Error " "is %(error)s. Payload is %(payload)s", {'error': e, 'payload': payload})
Deal with hosting devices unassigned from this config agent.
def empty_tree(input_list): for item in input_list: if not isinstance(item, list) or not empty_tree(item): return False return True
Recursively iterate through values in nested lists.
def getExtensions(self): extensions = [] if isinstance(self.supportedExtensions, list): for ext in self.supportedExtensions: extensionURL = self._url + "/exts/%s" % ext if ext == "SchematicsServer": extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) return extensions else: extensionURL = self._url + "/exts/%s" % self.supportedExtensions if self.supportedExtensions == "SchematicsServer": extensions.append(SchematicsService(url=extensionURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)) return extensions
returns objects for all map service extensions
def hashes(self): hashes = set() if (self.resources is not None): for resource in self: if (resource.md5 is not None): hashes.add('md5') if (resource.sha1 is not None): hashes.add('sha-1') if (resource.sha256 is not None): hashes.add('sha-256') return(hashes)
Return set of hashes uses in this resource_list.
def _dump_query_timestamps(self, current_time: float): windows = [10, 11, 15, 20, 30, 60] print("GraphQL requests:", file=sys.stderr) for query_hash, times in self._graphql_query_timestamps.items(): print(" {}".format(query_hash), file=sys.stderr) for window in windows: reqs_in_sliding_window = sum(t > current_time - window * 60 for t in times) print(" last {} minutes: {} requests".format(window, reqs_in_sliding_window), file=sys.stderr)
Output the number of GraphQL queries grouped by their query_hash within the last time.
def Module(EPIC, campaign=None): channel = Channel(EPIC, campaign=campaign) nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25, 10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49, 16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73, 23: 77, 24: 81} for c in [channel, channel - 1, channel - 2, channel - 3]: if c in nums.values(): for mod, chan in nums.items(): if chan == c: return mod return None
Returns the module number for a given EPIC target.
def pyGeno_init() : global db, dbConf global pyGeno_SETTINGS_PATH global pyGeno_RABA_DBFILE global pyGeno_DATA_PATH if not checkPythonVersion() : raise PythonVersionError("==> FATAL: pyGeno only works with python 2.7 and above, please upgrade your python version") if not os.path.exists(pyGeno_SETTINGS_DIR) : os.makedirs(pyGeno_SETTINGS_DIR) pyGeno_SETTINGS_PATH = getSettingsPath() pyGeno_RABA_DBFILE = os.path.normpath( os.path.join(pyGeno_SETTINGS_PATH, "pyGenoRaba.db") ) pyGeno_DATA_PATH = os.path.normpath( os.path.join(pyGeno_SETTINGS_PATH, "data") ) if not os.path.exists(pyGeno_SETTINGS_PATH) : os.makedirs(pyGeno_SETTINGS_PATH) if not os.path.exists(pyGeno_DATA_PATH) : os.makedirs(pyGeno_DATA_PATH) rabaDB.rabaSetup.RabaConfiguration(pyGeno_RABA_NAMESPACE, pyGeno_RABA_DBFILE) db = rabaDB.rabaSetup.RabaConnection(pyGeno_RABA_NAMESPACE) dbConf = rabaDB.rabaSetup.RabaConfiguration(pyGeno_RABA_NAMESPACE)
This function is automatically called at launch
def _shutdown_cherrypy(self): if cherrypy.engine.state == cherrypy.engine.states.STARTED: threading.Timer(1, cherrypy.engine.exit).start()
Shutdown cherrypy in one second, if it's running
def check_bounding_rect(rect_pos): if not isinstance(rect_pos, Iterable): raise ValueError('rectangle spect must be a tuple of floats ' 'specifying (left, right, width, height)') left, bottom, width, height = rect_pos for val, name in zip((left, bottom, width, height), ('left', 'bottom', 'width', 'height')): if val < 0.0 or val > 1.0: raise ValueError("{}'s value must be >=0 and <= 1.0. " "It is now {}".format(name, val)) if left + width > 1.0: print('rect would extend beyond the width of figure/axis by {}'.format(left + width - 1.0)) if bottom + height > 1.0: print('rect would extend beyond the height of figure/axis by {}'.format( bottom + height - 1.0)) return rect_pos
Ensure the rect spec is valid.
def kill_pane(self, pane): assert isinstance(pane, Pane) if not pane.process.is_terminated: pane.process.kill() self.arrangement.remove_pane(pane)
Kill the given pane, and remove it from the arrangement.
def _extract_features(self): for parsed_line in self.parsed_lines: if parsed_line.get('program') == 'sshd': result = self._parse_auth_message(parsed_line['message']) if 'ip' in result: self.features['ips'].append(result['ip']) if result['ip'] not in self.ips_to_pids: self.ips_to_pids[result['ip']] = [parsed_line['processid']] else: if parsed_line['processid'] not in self.ips_to_pids[result['ip']]: self.ips_to_pids[result['ip']].append(parsed_line['processid'])
Extracts and sets the feature data from the log file necessary for a reduction
def timeit(self, metric, func, *args, **kwargs): return metrics.timeit(metric, func, *args, **kwargs)
Time execution of callable and emit metric then return result.
def update(cls, args): kytos_api = KytosConfig().config.get('kytos', 'api') url = f"{kytos_api}api/kytos/core/web/update" version = args["<version>"] if version: url += f"/{version}" try: result = requests.post(url) except(HTTPError, URLError, requests.exceptions.ConnectionError): LOG.error("Can't connect to server: %s", kytos_api) return if result.status_code != 200: LOG.info("Error while updating web ui: %s", result.content) else: LOG.info("Web UI updated.")
Call the method to update the Web UI.
def add_params_to_uri(uri, params, fragment=False): sch, net, path, par, query, fra = urlparse.urlparse(uri) if fragment: fra = add_params_to_qs(fra, params) else: query = add_params_to_qs(query, params) return urlparse.urlunparse((sch, net, path, par, query, fra))
Add a list of two-tuples to the uri query components.
def _aws_encode_changebatch(o): change_idx = 0 while change_idx < len(o['Changes']): o['Changes'][change_idx]['ResourceRecordSet']['Name'] = aws_encode(o['Changes'][change_idx]['ResourceRecordSet']['Name']) if 'ResourceRecords' in o['Changes'][change_idx]['ResourceRecordSet']: rr_idx = 0 while rr_idx < len(o['Changes'][change_idx]['ResourceRecordSet']['ResourceRecords']): o['Changes'][change_idx]['ResourceRecordSet']['ResourceRecords'][rr_idx]['Value'] = aws_encode(o['Changes'][change_idx]['ResourceRecordSet']['ResourceRecords'][rr_idx]['Value']) rr_idx += 1 if 'AliasTarget' in o['Changes'][change_idx]['ResourceRecordSet']: o['Changes'][change_idx]['ResourceRecordSet']['AliasTarget']['DNSName'] = aws_encode(o['Changes'][change_idx]['ResourceRecordSet']['AliasTarget']['DNSName']) change_idx += 1 return o
helper method to process a change batch & encode the bits which need encoding.
def _options_to_dict(df): kolums = ["k1", "k2", "value"] d = df[kolums].values.tolist() dc = {} for x in d: dc.setdefault(x[0], {}) dc[x[0]][x[1]] = x[2] return dc
Make a dictionary to print.
def _init_go2bordercolor(objcolors, **kws): go2bordercolor_ret = objcolors.get_bordercolor() if 'go2bordercolor' not in kws: return go2bordercolor_ret go2bordercolor_usr = kws['go2bordercolor'] goids = set(go2bordercolor_ret).intersection(go2bordercolor_usr) for goid in goids: go2bordercolor_usr[goid] = go2bordercolor_ret[goid] return go2bordercolor_usr
Initialize go2bordercolor with default to make hdrgos bright blue.
def format_repr(obj, attributes) -> str: attribute_repr = ', '.join(('{}={}'.format(attr, repr(getattr(obj, attr))) for attr in attributes)) return "{0}({1})".format(obj.__class__.__qualname__, attribute_repr)
Format an object's repr method with specific attributes.
def iterate_specific_packet_range(): now = datetime.utcnow() start = now - timedelta(hours=1) total = 0 for packet in archive.list_packets(start=start, stop=now): total += 1 print('Found', total, 'packets in range')
Count the number of packets in a specific range.
def find(self, pattern): pos = self.current_segment.data.find(pattern) if pos == -1: return -1 return pos + self.current_position
Searches for a pattern in the current memory segment
def analyse_text(text): if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE): return 1.0 elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE): return 0.5
Check if code contains REBOL header and so it probably not R code
def derivatives_factory(cls, coef, degree, knots, ext, **kwargs): return cls._basis_spline_factory(coef, degree, knots, 1, ext)
Given some coefficients, return a the derivative of a B-spline.
def format_endpoint_argument_doc(argument): doc = argument.doc_dict() doc['description'] = clean_description(py_doc_trim(doc['description'])) details = doc.get('detailed_description', None) if details is not None: doc['detailed_description'] = clean_description(py_doc_trim(details)) return doc
Return documentation about the argument that an endpoint accepts.
def _parse_content(response): if response.status_code != 200: raise ApiError(f'unknown error: {response.content.decode()}') result = json.loads(response.content) if not result['ok']: raise ApiError(f'{result["error"]}: {result.get("detail")}') return result
parse the response body as JSON, raise on errors
def config(env=DEFAULT_ENV, default=None, **overrides): config = {} s = os.environ.get(env, default) if s: config = parse(s) overrides = dict([(k.upper(), v) for k, v in overrides.items()]) config.update(overrides) return config
Returns configured REDIS dictionary from REDIS_URL.
def move(self, d, add_tile=True): if d == Board.LEFT or d == Board.RIGHT: chg, get = self.setLine, self.getLine elif d == Board.UP or d == Board.DOWN: chg, get = self.setCol, self.getCol else: return 0 moved = False score = 0 for i in self.__size_range: origin = get(i) line = self.__moveLineOrCol(origin, d) collapsed, pts = self.__collapseLineOrCol(line, d) new = self.__moveLineOrCol(collapsed, d) chg(i, new) if origin != new: moved = True score += pts if moved and add_tile: self.addTile() return score
move and return the move score
def png(self): use_plugin('freeimage') with TemporaryFilePath(suffix='.png') as tmp: safe_range_im = 255 * normalise(self) imsave(tmp.fpath, safe_range_im.astype(np.uint8)) with open(tmp.fpath, 'rb') as fh: return fh.read()
Return png string of image.
def read(filename): fname = os.path.join(here, filename) with codecs.open(fname, encoding='utf-8') as f: return f.read()
Get the long description from a file.
def _convert_pooling_param(param): param_string = "pooling_convention='full', " if param.global_pooling: param_string += "global_pool=True, kernel=(1,1)" else: param_string += "pad=(%d,%d), kernel=(%d,%d), stride=(%d,%d)" % ( param.pad, param.pad, param.kernel_size, param.kernel_size, param.stride, param.stride) if param.pool == 0: param_string += ", pool_type='max'" elif param.pool == 1: param_string += ", pool_type='avg'" else: raise ValueError("Unknown Pooling Method!") return param_string
Convert the pooling layer parameter
def update_fw_local_cache(self, net, direc, start): fw_dict = self.get_fw_dict() if direc == 'in': fw_dict.update({'in_network_id': net, 'in_service_ip': start}) else: fw_dict.update({'out_network_id': net, 'out_service_ip': start}) self.update_fw_dict(fw_dict)
Update the fw dict with Net ID and service IP.
def push(self, value: Union[int, bytes]) -> None: if len(self.values) > 1023: raise FullStack('Stack limit reached') validate_stack_item(value) self.values.append(value)
Push an item onto the stack.
def _EntryToEvent(entry, handlers, transformers): event = rdf_events.AuditEvent( timestamp=entry.timestamp, user=entry.username, action=handlers[entry.router_method_name]) for fn in transformers: fn(entry, event) return event
Converts an APIAuditEntry to a legacy AuditEvent.
def format_strings(self, **kwargs): return mutablerecords.CopyRecord( self, name=util.format_string(self.name, kwargs))
String substitution of name.
def gridrange(start, end, step): for x in frange(start.real, end.real, step.real): for y in frange(start.imag, end.imag, step.imag): yield x + y * 1j
Generate a grid of complex numbers
def entities(self): start = 0 end = 0 prev_tag = u'O' chunks = [] for i, (w, tag) in enumerate(self.ne_chunker.annotate(self.words)): if tag != prev_tag: if prev_tag == u'O': start = i else: chunks.append(Chunk(self.words[start: i], start, i, tag=prev_tag, parent=self)) prev_tag = tag if tag != u'O': chunks.append(Chunk(self.words[start: i+1], start, i+1, tag=tag, parent=self)) return chunks
Returns a list of entities for this blob.
def send_email(self, message): msg = MIMEMultipart() msg['From'] = self.from_address msg['To'] = self.to_address msg['Subject'] = self.title msg.attach(MIMEText('<pre>' + cgi.escape(message) + '</pre>', 'html')) smtp = smtplib.SMTP(self.server, self.port, timeout=self.timeout) if self.tls_auth: smtp.starttls() smtp.login(self.user, self.password) smtp.sendmail(self.from_address, self.to_address, msg.as_string()) smtp.quit()
Initiate a SMTP session and send an email.
def show_header(**header): "Display a HTTP-style header on the command-line." print('%s: %s' % ('Now', header['now'])) print('%s: %s' % ('Stop-Name', header['name'])) print('%s: %s' % ('Stop-ID', header.get('id', None))) print('')
Display a HTTP-style header on the command-line.
def count_header_blanks(lines, count): blanks = 0 for i in range(2, count + 2): pair = _extract_header_value(lines[i]) if not pair: blanks += 1 return blanks
Count the number of blank lines in the header
def _element(cls): if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) if len(elements) < cls.__control["index"] + 1: raise Exception("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (cls.__name__, len(elements), cls.__control["index"])) if len(elements) > 1: print("Element [%s]: There are [%d] elements, choosed index=%d" %(cls.__name__,len(elements),cls.__control["index"])) elm = elements[cls.__control["index"]] cls.__control["index"] = 0 return elm
find the element with controls
def save(self, filename, wildcard='*', verbose=False): f = open(filename, mode='w') k = list(self.keys()) k.sort() count = 0 for p in k: if p and fnmatch.fnmatch(str(p).upper(), wildcard.upper()): f.write("%-16.16s %f\n" % (p, self.__getitem__(p))) count += 1 f.close() if verbose: print("Saved %u parameters to %s" % (count, filename))
save parameters to a file
def write_pkg_to_file(self, name, objects, path='.', filename=None): pkg_objs = [] for _, obj in iteritems(objects): pkg_objs.append(obj) sorted_pkg = sorted(pkg_objs, key=lambda k: k['_id']) output = self.json_dumps(sorted_pkg) + '\n' if filename is None: filename = self.safe_filename('Pkg', name) filename = os.path.join(path, filename) self.pr_inf("Writing to file: " + filename) with open(filename, 'w') as f: f.write(output) return filename
Write a list of related objs to file
def list_sdbs(self): sdb_raw = self.get_sdbs() sdbs = [] for s in sdb_raw: sdbs.append(s['name']) return sdbs
Return sdbs by Name
def type_name(self): res = self.type.__name__ if self.type.__module__ not in ('__builtin__', 'builtins'): res = self.type.__module__ + '.' + res return res
Returns the full type identifier of the field.
def _from_hex_digest(digest): return "".join( [chr(int(digest[x : x + 2], 16)) for x in range(0, len(digest), 2)] )
Convert hex digest to sequence of bytes.
def fcast(value: float) -> TensorLike: newvalue = tf.cast(value, FTYPE) if DEVICE == 'gpu': newvalue = newvalue.gpu() return newvalue
Cast to float tensor
def _delete(url, profile): request_url = "{0}/api/dashboards/{1}".format(profile.get('grafana_url'), url) response = requests.delete( request_url, headers={ "Accept": "application/json", "Authorization": "Bearer {0}".format(profile.get('grafana_token')) }, timeout=profile.get('grafana_timeout'), ) data = response.json() return data
Delete a specific dashboard.
def stop_workers(self): self._started = False for worker in self._workers: worker.stop()
Synchronously stop any potential workers.
def parametrized_bottleneck(x, hparams): if hparams.bottleneck_kind == "tanh_discrete": d, _ = tanh_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode) return d, 0.0 if hparams.bottleneck_kind == "isemhash": return isemhash_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode, hparams.isemhash_noise_dev, hparams.isemhash_mix_prob) if hparams.bottleneck_kind == "vq": return vq_discrete_bottleneck(x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon) if hparams.bottleneck_kind == "em": return vq_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, soft_em=True, num_samples=hparams.vq_num_samples) if hparams.bottleneck_kind == "gumbel_softmax": return gumbel_softmax_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, hparams.temperature_warmup_steps, hard=False, summary=True) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
Meta-function calling all the above bottlenecks with hparams.
def generate_public_ssh_key(ssh_private_key_file): try: with open(ssh_private_key_file, "rb") as key_file: key = key_file.read() except FileNotFoundError: raise IpaUtilsException( 'SSH private key file: %s cannot be found.' % ssh_private_key_file ) try: private_key = serialization.load_pem_private_key( key, password=None, backend=default_backend() ) except ValueError: raise IpaUtilsException( 'SSH private key file: %s is not a valid key file.' % ssh_private_key_file ) return private_key.public_key().public_bytes( serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH )
Generate SSH public key from private key file.
def concatenate_fields(fields, dim): 'Create an INstanceAttribute from a list of InstnaceFields' if len(fields) == 0: raise ValueError('fields cannot be an empty list') if len(set((f.name, f.shape, f.dtype) for f in fields)) != 1: raise ValueError('fields should have homogeneous name, shape and dtype') tpl = fields[0] attr = InstanceAttribute(tpl.name, shape=tpl.shape, dtype=tpl.dtype, dim=dim, alias=None) attr.value = np.array([f.value for f in fields], dtype=tpl.dtype) return attr
Create an INstanceAttribute from a list of InstnaceFields
def parse_descedant_elements(self, element): for descendant in element.iterdescendants(): self.parsers[descendant.tag](descendant)
parses all descendants of an etree element
def cart_db(): config = _config_file() _config_test(config) juicer.utils.Log.log_debug("Establishing cart connection:") cart_con = MongoClient(dict(config.items(config.sections()[0]))['cart_host']) cart_db = cart_con.carts return cart_db
return a pymongo db connection for interacting with cart objects
def remove_and_append(self, index): while index in self: self.remove(index) self.append(index)
Remove previous entrances of a tab, and add it as the latest.
def render_to_response(self, context, indent=None): "Returns a JSON response containing 'context' as payload" return self.get_json_response(self.convert_context_to_json(context, indent=indent))
Returns a JSON response containing 'context' as payload
def _rename_with_content_disposition(self, response: HTTPResponse): if not self._filename: return if response.request.url_info.scheme not in ('http', 'https'): return header_value = response.fields.get('Content-Disposition') if not header_value: return filename = parse_content_disposition(header_value) if filename: dir_path = os.path.dirname(self._filename) new_filename = self._path_namer.safe_filename(filename) self._filename = os.path.join(dir_path, new_filename)
Rename using the Content-Disposition header.
def parse_input_file(text, variables=None): text = find_includes(text) lines = text.splitlines() tasks, linenumbers = find_tasks(lines) preamble = [line for line in lines[:linenumbers[0]]] logging.debug("Preamble:\n{}".format("\n".join(preamble))) if variables is not None: preamble += "\n" + "\n".join(variables) environment = create_environment(preamble) code_sections = [] for n in range(len(linenumbers) - 1): code_sections.append((linenumbers[n], linenumbers[n+1])) for n, task in zip(code_sections, tasks): task["code"] = lines[n[0]: n[1]] task["environment"] = environment clean_tasks = [] for task in tasks: clean_tasks.append(Task(**task)) return clean_tasks
Parser for a file with syntax somewhat similar to Drake.
def faucet(self): if hasattr(self, 'faucets'): if len(self.faucets) > 1: raise TypeError("Only one faucet per account.") return self.faucets[0] raise AttributeError("There is no faucet assigned.")
Show current linked faucet.