code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _print_fields(self, fields): longest_name = max(fields, key=lambda f: len(f[1]))[1] longest_type = max(fields, key=lambda f: len(f[2]))[2] field_format = '%s%-{}s %-{}s %s'.format( len(longest_name) + self._padding_after_name, len(longest_type) + self._padding_after_type) for field in fields: self._print(field_format % field)
Print the fields, padding the names as necessary to align them.
def mark_fit_good(self, fit, spec=None): if spec == None: for spec, fits in list(self.pmag_results_data['specimens'].items()): if fit in fits: break samp = self.Data_hierarchy['sample_of_specimen'][spec] if 'sample_orientation_flag' not in self.Data_info['er_samples'][samp]: self.Data_info['er_samples'][samp]['sample_orientation_flag'] = 'g' samp_flag = self.Data_info['er_samples'][samp]['sample_orientation_flag'] if samp_flag == 'g': self.bad_fits.remove(fit) return True else: self.user_warning( "Cannot mark this interpretation good its sample orientation has been marked bad") return False
Marks fit good so it is used in high level means Parameters ---------- fit : fit to mark good spec : specimen of fit to mark good (optional though runtime will increase if not provided)
def mean_by_panel(self, length): self._check_panel(length) func = lambda v: v.reshape(-1, length).mean(axis=0) newindex = arange(length) return self.map(func, index=newindex)
Compute the mean across fixed sized panels of each record. Splits each record into panels of size `length`, and then computes the mean across panels. Panel length must subdivide record exactly. Parameters ---------- length : int Fixed length with which to subdivide.
def process_object(obj): "Hook to process the object currently being displayed." invalid_options = OptsMagic.process_element(obj) if invalid_options: return invalid_options OutputMagic.info(obj)
Hook to process the object currently being displayed.
def creditusage(cls): rating = cls.call('hosting.rating.list') if not rating: return 0 rating = rating.pop() usage = [sum(resource.values()) for resource in rating.values() if isinstance(resource, dict)] return sum(usage)
Get credit usage per hour
def update_subject_identifier_on_save(self): if not self.subject_identifier: self.subject_identifier = self.subject_identifier_as_pk.hex elif re.match(UUID_PATTERN, self.subject_identifier): pass return self.subject_identifier
Overridden to not set the subject identifier on save.
def main(): parser = argparse.ArgumentParser( description='Tool for testing caffe to mxnet conversion layer by layer') parser.add_argument('--image_url', type=str, default='https://github.com/dmlc/web-data/raw/master/mxnet/doc/'\ 'tutorials/python/predict_image/cat.jpg', help='input image to test inference, can be either file path or url') parser.add_argument('--caffe_prototxt_path', type=str, default='./model.prototxt', help='path to caffe prototxt') parser.add_argument('--caffe_model_path', type=str, default='./model.caffemodel', help='path to caffe weights') parser.add_argument('--caffe_mean', type=str, default='./model_mean.binaryproto', help='path to caffe mean file') parser.add_argument('--mean_diff_allowed', type=int, default=1e-03, help='mean difference allowed between caffe blob and mxnet blob') parser.add_argument('--max_diff_allowed', type=int, default=1e-01, help='max difference allowed between caffe blob and mxnet blob') parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict') args = parser.parse_args() convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path, args.caffe_model_path, args.caffe_mean, args.mean_diff_allowed, args.max_diff_allowed)
Entrypoint for compare_layers
def export_to_directory_crtomo(self, directory, norrec='norrec'): exporter_crtomo.write_files_to_directory( self.data, directory, norrec=norrec )
Export the sEIT data into data files that can be read by CRTomo. Parameters ---------- directory : string output directory. will be created if required norrec : string (nor|rec|norrec) Which data to export. Default: norrec
def _validateSetting(value, policy): log.debug('validating %s for policy %s', value, policy) if 'Settings' in policy: if policy['Settings']: if isinstance(policy['Settings'], list): if value not in policy['Settings']: return False elif isinstance(policy['Settings'], dict): _policydata = _policy_info() if not getattr(_policydata, policy['Settings']['Function'])(value, **policy['Settings']['Args']): return False else: return True return True
helper function to validate specified value is appropriate for the policy if the 'Settings' key is a list, the value will check that it is in the list if the 'Settings' key is a dict we will try to execute the function name from the 'Function' key, passing the value and additional arguments from the 'Args' dict if the 'Settings' key is None, we won't do any validation and just return True if the Policy has 'Children', we'll validate their settings too
def _maybe_add_conditions_to_implicit_api_paths(self, template): for api_id, api in template.iterate(SamResourceType.Api.value): if not api.properties.get('__MANAGE_SWAGGER'): continue swagger = api.properties.get("DefinitionBody") editor = SwaggerEditor(swagger) for path in editor.iter_on_path(): all_method_conditions = set( [condition for method, condition in self.api_conditions[api_id][path].items()] ) at_least_one_method = len(all_method_conditions) > 0 all_methods_contain_conditions = None not in all_method_conditions if at_least_one_method and all_methods_contain_conditions: if len(all_method_conditions) == 1: editor.make_path_conditional(path, all_method_conditions.pop()) else: path_condition_name = self._path_condition_name(api_id, path) self._add_combined_condition_to_template( template.template_dict, path_condition_name, all_method_conditions) editor.make_path_conditional(path, path_condition_name) api.properties["DefinitionBody"] = editor.swagger template.set(api_id, api)
Add conditions to implicit API paths if necessary. Implicit API resource methods are constructed from API events on individual serverless functions within the SAM template. Since serverless functions can have conditions on them, it's possible to have a case where all methods under a resource path have conditions on them. If all of these conditions evaluate to false, the entire resource path should not be defined either. This method checks all resource paths' methods and if all methods under a given path contain a condition, a composite condition is added to the overall template Conditions section and that composite condition is added to the resource path.
def salt_run(): import salt.cli.run if '' in sys.path: sys.path.remove('') client = salt.cli.run.SaltRun() _install_signal_handlers(client) client.run()
Execute a salt convenience routine.
def load_library_handle(libname, path): if path is None or path in ['None', 'none']: return None try: if os.name == "nt": opj_lib = ctypes.windll.LoadLibrary(path) else: opj_lib = ctypes.CDLL(path) except (TypeError, OSError): msg = 'The {libname} library at {path} could not be loaded.' msg = msg.format(path=path, libname=libname) warnings.warn(msg, UserWarning) opj_lib = None return opj_lib
Load the library, return the ctypes handle.
def _frame_generator(self, frame_duration_ms, audio, sample_rate): n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) offset = 0 timestamp = 0.0 duration = (float(n) / sample_rate) / 2.0 while offset + n < len(audio): yield self.Frame(audio[offset:offset + n], timestamp, duration) timestamp += duration offset += n
Generates audio frames from PCM audio data. Takes the desired frame duration in milliseconds, the PCM data, and the sample rate. Yields Frames of the requested duration.
def round_to_nearest(number, nearest=1): result = nearest * round(number / nearest) if result % 1 == 0: return int(result) if nearest % 1 == 0: return round(result) if nearest % 0.1 == 0: return round(result, 1) if nearest % 0.01 == 0: return round(result, 2) return result
Round 'number' to the nearest multiple of 'nearest'. Parameters ---------- number A real number to round. nearest Number to round to closes multiple of. Returns ------- rounded A rounded number. Examples ------- >>> round_to_nearest(6.8, nearest = 2.5) 7.5
def read_pid_stat(pid): return { "utime": random.randint(0, 999999999), "stime": random.randint(0, 999999999), "cutime": random.randint(0, 999999999), "cstime": random.randint(0, 999999999), }
Mocks read_pid_stat as this is a Linux-specific operation.
def run(self): with util.timed_block() as t: files = self._collect_files() log.info("Collected <33>{} <32>files in <33>{}s".format( len(files), t.elapsed_s )) if self.verbose: for p in files: log.info(" <0>{}", p) if not files: return self.allow_empty with util.timed_block() as t: results = self._run_checks(files) log.info("Code checked in <33>{}s", t.elapsed_s) success = True for name, retcodes in results.items(): if any(x != 0 for x in retcodes): success = False log.err("<35>{} <31>failed with: <33>{}".format( name, retcodes )) return success
Run all linters and report results. Returns: bool: **True** if all checks were successful, **False** otherwise.
def download_file_with_progress_bar(url): request = requests.get(url, stream=True) if request.status_code == 404: msg = ('there was a 404 error trying to reach {} \nThis probably ' 'means the requested version does not exist.'.format(url)) logger.error(msg) sys.exit() total_size = int(request.headers["Content-Length"]) chunk_size = 1024 bars = int(total_size / chunk_size) bytes_io = io.BytesIO() pbar = tqdm(request.iter_content(chunk_size=chunk_size), total=bars, unit="kb", leave=False) for chunk in pbar: bytes_io.write(chunk) return bytes_io
Downloads a file from the given url, displays a progress bar. Returns a io.BytesIO object
def focus0(self): f = Point(self.center) if self.xAxisIsMajor: f.x -= self.linearEccentricity else: f.y -= self.linearEccentricity return f
First focus of the ellipse, Point class.
def increment_name(self, name, i): if i == 0: return name if '.' in name: split = name.split('.') split[-2] = split[-2] + str(i) return '.'.join(split) else: return name + str(i)
takes something like test.txt and returns test1.txt
def children(self) -> NodeList: return NodeList([e for e in self.childNodes if e.nodeType == Node.ELEMENT_NODE])
Return list of child nodes. Currently this is not a live object.
def name_to_int(name): if not name: return float('nan') lower = name.lower() cga_names = {s: i for i, s in enumerate(colour_names.cga())} return cga_names.get(lower) or html_to_small_ansi(lower)
Get a number for that colour name if not a name, then not a number
def twitter_credential(name): credential_name = 'TWITTER_' + name.upper() if hasattr(settings, credential_name): return getattr(settings, credential_name) else: raise AttributeError('Missing twitter credential in settings: ' + credential_name)
Grab twitter credential from settings
def parse(self, filelike, filename): self.log = log self.source = filelike.readlines() src = ''.join(self.source) try: compile(src, filename, 'exec') except SyntaxError as error: raise ParseError() from error self.stream = TokenStream(StringIO(src)) self.filename = filename self.dunder_all = None self.dunder_all_error = None self.future_imports = set() self._accumulated_decorators = [] return self.parse_module()
Parse the given file-like object and return its Module object.
def validate_link(link_data): from django.apps import apps try: Model = apps.get_model(*link_data['model'].split('.')) Model.objects.get(pk=link_data['pk']) except Model.DoesNotExist: raise ValidationError(_("Unable to link onto '{0}'.").format(Model.__name__))
Check if the given model exists, otherwise raise a Validation error
def list_build_records(page_size=200, page_index=0, sort="", q=""): data = list_build_records_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
List all BuildRecords
def vote(self, candidates): ranks = [(c, self.evaluate(c)[0]) for c in candidates] ranks.sort(key=operator.itemgetter(1), reverse=True) return ranks
Rank artifact candidates. The voting is needed for the agents living in societies using social decision making. The function should return a sorted list of (candidate, evaluation)-tuples. Depending on the social choice function used, the evaluation might be omitted from the actual decision making, or only a number of (the highest ranking) candidates may be used. This basic implementation ranks candidates based on :meth:`~creamas.core.agent.CreativeAgent.evaluate`. :param candidates: list of :py:class:`~creamas.core.artifact.Artifact` objects to be ranked :returns: Ordered list of (candidate, evaluation)-tuples
def service_response(body, headers, status_code): response = Response(body) response.headers = headers response.status_code = status_code return response
Constructs a Flask Response from the body, headers, and status_code. :param str body: Response body as a string :param dict headers: headers for the response :param int status_code: status_code for response :return: Flask Response
def set_refresh_rate(self, fps): self.rf_fps = fps self.rf_rate = 1.0 / self.rf_fps self.logger.info("set a refresh rate of %.2f fps" % (self.rf_fps))
Set the refresh rate for redrawing the canvas at a timed interval. Parameters ---------- fps : float Desired rate in frames per second.
def merge_from(self, other): if other.pattern is not None: self.pattern = other.pattern if other.format is not None: self.format = other.format self.leading_digits_pattern.extend(other.leading_digits_pattern) if other.national_prefix_formatting_rule is not None: self.national_prefix_formatting_rule = other.national_prefix_formatting_rule if other.national_prefix_optional_when_formatting is not None: self.national_prefix_optional_when_formatting = other.national_prefix_optional_when_formatting if other.domestic_carrier_code_formatting_rule is not None: self.domestic_carrier_code_formatting_rule = other.domestic_carrier_code_formatting_rule
Merge information from another NumberFormat object into this one.
def _handle_tag_csmtextsettings(self): obj = _make_object("CSMTextSettings") obj.TextId = unpack_ui16(self._src) bc = BitConsumer(self._src) obj.UseFlashType = bc.u_get(2) obj.GridFit = bc.u_get(3) obj.Reserved1 = bc.u_get(3) obj.Thickness = unpack_float(self._src) obj.Sharpness = unpack_float(self._src) obj.Reserved2 = unpack_ui8(self._src) return obj
Handle the CSMTextSettings tag.
def get_logs(self, jobs, log_file=None): if not (jobs and self.log_url): return for job in jobs: url = "{}?jobId={}".format(self.log_url, job.get("id")) if log_file: self._download_log("{}&download".format(url), log_file) else: logger.info("Submit log for job %s: %s", job.get("id"), url)
Get log or log url of the jobs.
def indicator_pivot(self, indicator_resource): resource = self.copy() resource._request_uri = '{}/{}'.format( indicator_resource.request_uri, resource._request_uri ) return resource
Pivot point on indicators for this resource. This method will return all *resources* (groups, tasks, victims, etc) for this resource that are associated with the provided resource id (indicator value). **Example Endpoints URI's** +--------+---------------------------------------------------------------------------------+ | Method | API Endpoint URI's | +========+=================================================================================+ | GET | /v2/indicators/{resourceType}/{resourceId}/groups/{resourceType} | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/groups/{resourceType}/{uniqueId} | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/tasks/ | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/tasks/{uniqueId} | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/victims/ | +--------+---------------------------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{resourceId}/victims/{uniqueId} | +--------+---------------------------------------------------------------------------------+ Args: resource_type (string): The resource pivot resource type (indicator type). resource_id (integer): The resource pivot id (indicator value).
def travis(branch: str): assert os.environ.get('TRAVIS_BRANCH') == branch assert os.environ.get('TRAVIS_PULL_REQUEST') == 'false'
Performs necessary checks to ensure that the travis build is one that should create releases. :param branch: The branch the environment should be running against.
def publish(self, message, tag=b''): self.send(tag + b'\0' + message)
Publish `message` with specified `tag`. :param message: message data :type message: str :param tag: message tag :type tag: str
def writeSampleIndex(self, fp): print('\n'.join( '%d %s' % (index, name) for (index, name) in sorted((index, name) for (name, index) in self._samples.items()) ), file=fp)
Write a file of sample indices and names, sorted by index. @param fp: A file-like object, opened for writing.
def replayOne(self, r): 'Replay the command in one given row.' CommandLog.currentReplayRow = r longname = getattr(r, 'longname', None) if longname == 'set-option': try: options.set(r.row, r.input, options._opts.getobj(r.col)) escaped = False except Exception as e: exceptionCaught(e) escaped = True else: vs = self.moveToReplayContext(r) vd().keystrokes = r.keystrokes escaped = vs.exec_command(vs.getCommand(longname if longname else r.keystrokes), keystrokes=r.keystrokes) CommandLog.currentReplayRow = None if escaped: warning('replay aborted') return escaped
Replay the command in one given row.
def extract(self, pbf, output): logging.info("Extracting POI nodes from {0} to {1}".format(pbf, output)) with open(output, 'w') as f: def nodes_callback(nodes): for node in nodes: node_id, tags, coordinates = node if any([t in tags for t in POI_TAGS]): f.write(json.dumps(dict(tags=tags, coordinates=coordinates))) f.write('\n') parser = OSMParser(concurrency=4, nodes_callback=nodes_callback) parser.parse(pbf) return output
extract POI nodes from osm pbf extract
def _read24(self, register): ret = 0.0 for b in self._read_register(register, 3): ret *= 256.0 ret += float(b & 0xFF) return ret
Read an unsigned 24-bit value as a floating point and return it.
def clearText(self, keepFocus=False): self.text = '' self.focus = keepFocus self._updateImage()
Clear the text in the field
def page_revisions(request, page_id, template_name='wagtailrollbacks/edit_handlers/revisions.html'): page = get_object_or_404(Page, pk=page_id) page_perms = page.permissions_for_user(request.user) if not page_perms.can_edit(): raise PermissionDenied page_num = request.GET.get('p', 1) revisions = get_revisions(page, page_num) return render( request, template_name, { 'page': page, 'revisions': revisions, 'p': page_num, } )
Returns GET response for specified page revisions. :param request: the request instance. :param page_id: the page ID. :param template_name: the template name. :rtype: django.http.HttpResponse.
def get_model_home(): d = os.path.join(get_data_home(), 'nnp_models') if not os.path.isdir(d): os.makedirs(d) return d
Returns a root folder path for downloading models.
def _reuse_pre_installed_setuptools(env, installer): if not env.setuptools_version: return reuse_old = config.reuse_old_setuptools reuse_best = config.reuse_best_setuptools reuse_future = config.reuse_future_setuptools reuse_comment = None if reuse_old or reuse_best or reuse_future: pv_old = parse_version(env.setuptools_version) pv_new = parse_version(installer.setuptools_version()) if pv_old < pv_new: if reuse_old: reuse_comment = "%s+ recommended" % ( installer.setuptools_version(),) elif pv_old > pv_new: if reuse_future: reuse_comment = "%s+ required" % ( installer.setuptools_version(),) elif reuse_best: reuse_comment = "" if reuse_comment is None: return if reuse_comment: reuse_comment = " (%s)" % (reuse_comment,) print("Reusing pre-installed setuptools %s distribution%s." % ( env.setuptools_version, reuse_comment)) return True
Return whether a pre-installed setuptools distribution should be reused.
def stats(self): import ns1.rest.stats return ns1.rest.stats.Stats(self.config)
Return a new raw REST interface to stats resources :rtype: :py:class:`ns1.rest.stats.Stats`
def _compress(self): rank = 0.0 current = self._head while current and current._successor: if current._rank + current._successor._rank + current._successor._delta <= self._invariant(rank, self._observations): removed = current._successor current._value = removed._value current._rank += removed._rank current._delta = removed._delta current._successor = removed._successor rank += current._rank current = current._successor
Prunes the cataloged observations.
def short_repr(obj, max_len=40): obj_repr = repr(obj) if len(obj_repr) <= max_len: return obj_repr return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr))
Returns a short, term-friendly string representation of the object. Args: obj: An object for which to return a string representation. max_len: Maximum length of the returned string. Longer reprs will be turned into a brief descriptive string giving the type and length of obj.
def hdf5_storable(type_or_storable, *args, **kwargs): if not isinstance(type_or_storable, Storable): type_or_storable = default_storable(type_or_storable) hdf5_service.registerStorable(type_or_storable, *args, **kwargs)
Registers a `Storable` instance in the global service.
def get_default_voices(self): voices = [] for app in self.app_list: children = [] for model in app.get('models', []): child = { 'type': 'model', 'label': model.get('name', ''), 'url': model.get('admin_url', '') } children.append(child) voice = { 'type': 'app', 'label': app.get('name', ''), 'url': app.get('app_url', ''), 'children': children } voices.append(voice) return voices
When no custom menu is defined in settings Retrieves a js menu ready dict from the django admin app list
def prj_resolution_data(project, role): if role == QtCore.Qt.DisplayRole: return '%s x %s' % (project.resx, project.resy)
Return the data for resolution :param project: the project that holds the data :type project: :class:`jukeboxcore.djadapter.models.Project` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the resolution :rtype: depending on role :raises: None
def create(self): column_family = self.to_pb() modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( id=self.column_family_id, create=column_family ) client = self._table._instance._client client.table_admin_client.modify_column_families( self._table.name, [modification] )
Create this column family. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_create_column_family] :end-before: [END bigtable_create_column_family]
def check_ten_percent_voltage_deviation(network): v_mag_pu_pfa = network.results.v_res() if (v_mag_pu_pfa > 1.1).any().any() or (v_mag_pu_pfa < 0.9).any().any(): message = "Maximum allowed voltage deviation of 10% exceeded." raise ValueError(message)
Checks if 10% criteria is exceeded. Parameters ---------- network : :class:`~.grid.network.Network`
def get_inventory(self): self.oem_init() yield ("System", self._get_zero_fru()) self.init_sdr() for fruid in sorted(self._sdr.fru): fruinf = fru.FRU( ipmicmd=self, fruid=fruid, sdr=self._sdr.fru[fruid]).info if fruinf is not None: fruinf = self._oem.process_fru(fruinf, self._sdr.fru[fruid].fru_name) yield (self._sdr.fru[fruid].fru_name, fruinf) for componentpair in self._oem.get_oem_inventory(): yield componentpair
Retrieve inventory of system Retrieve inventory of the targeted system. This frequently includes serial numbers, sometimes hardware addresses, sometimes memory modules This function will retrieve whatever the underlying platform provides and apply some structure. Iterating over the return yields tuples of a name for the inventoried item and dictionary of descriptions or None for items not present.
def name2rgb(name): try: import colour except ImportError: raise ImportError('You need colour to be installed: pip install colour') c = colour.Color(name) color = int(c.red * 255), int(c.green * 255), int(c.blue * 255) return color
Convert the name of a color into its RGB value
def relabel(self, change): "Relabel images by moving from parent dir with old label `class_old` to parent dir with new label `class_new`." class_new,class_old,file_path = change.new,change.old,change.owner.file_path fp = Path(file_path) parent = fp.parents[1] self._csv_dict[fp] = class_new
Relabel images by moving from parent dir with old label `class_old` to parent dir with new label `class_new`.
def write_training_data(self, features, targets): assert len(features) == len(targets) data = dict(zip(features, targets)) with open(os.path.join(self.repopath, 'training.pkl'), 'w') as fp: pickle.dump(data, fp)
Writes data dictionary to filename
def app_authenticate(self, account=None, flush=True, bailout=False): with self._get_account(account) as account: user = account.get_name() password = account.get_password() self._dbg(1, "Attempting to app-authenticate %s." % user) self._app_authenticate(account, password, flush, bailout) self.app_authenticated = True
Attempt to perform application-level authentication. Application level authentication is needed on devices where the username and password are requested from the user after the connection was already accepted by the remote device. The difference between app-level authentication and protocol-level authentication is that in the latter case, the prompting is handled by the client, whereas app-level authentication is handled by the remote device. App-level authentication comes in a large variety of forms, and while this method tries hard to support them all, there is no guarantee that it will always work. We attempt to smartly recognize the user and password prompts; for a list of supported operating systems please check the Exscript.protocols.drivers module. Returns upon finding the first command line prompt. Depending on whether the flush argument is True, it also removes the prompt from the incoming buffer. :type account: Account :param account: An account object, like login(). :type flush: bool :param flush: Whether to flush the last prompt from the buffer. :type bailout: bool :param bailout: Whether to wait for a prompt after sending the password.
def copy_file(source, destination, unique=False, sort=False, case_sensitive=True, create_path=False): _File.copy(source, destination, unique, sort, case_sensitive, create_path)
Python utility to create file Args: source: absolute/relative path of source file destination: absolute/relative path of destination file. Use same as source for replacing the content of existing file. unique: Copy only unique lines from file sort: Sort the content of file case_sensitive: unique/sort operations to be performed case-sensitive string create_path: Recursively create the path to destination directory in case not found Returns: None
def selected(self, new): def preprocess(item): if isinstance(item, str): return self.options[item] return item items = coerce_to_list(new, preprocess) self.widget.value = items
Set selected from list or instance of object or name. Over-writes existing selection
def snapshot(self): snap = connState(connection_end=self.connection_end, read_or_write=self.row, seq_num=self.seq_num, compression_alg=type(self.compression), ciphersuite=type(self.ciphersuite), tls_version=self.tls_version) snap.cipher = self.cipher.snapshot() if self.hmac: snap.hmac.key = self.hmac.key return snap
This is used mostly as a way to keep the cipher state and the seq_num.
def summarizeResults(expName, suite): print("\n================",expName,"=====================") try: values, params = suite.get_values_fix_params( expName, 0, "totalCorrect", "last") v = np.array(values) sortedIndices = v.argsort() for i in sortedIndices[::-1]: print(v[i], params[i]["name"]) print() except: print("Couldn't analyze experiment",expName) try: values, params = suite.get_values_fix_params( expName, 0, "testerror", "last") v = np.array(values) sortedIndices = v.argsort() for i in sortedIndices[::-1]: print(v[i], params[i]["name"]) print() except: print("Couldn't analyze experiment",expName)
Summarize the totalCorrect value from the last iteration for each experiment in the directory tree.
def describe_guest(userid): guest_list_info = client.send_request('guest_list') userid_1 = (unicode(userid, "utf-8") if sys.version[0] == '2' else userid) if userid_1 not in guest_list_info['output']: raise RuntimeError("Guest %s does not exist!" % userid) guest_describe_info = client.send_request('guest_get_definition_info', userid) print("\nThe created guest %s's info are: \n%s\n" % (userid, guest_describe_info))
Get the basic information of virtual machine. Input parameters: :userid: USERID of the guest, last 8 if length > 8
def evalMetric(self, x, method=None): if self.verbose: print('----------') print('At design: ' + str(x)) q_samples, grad_samples = self.evalSamples(x) if self.verbose: print('Evaluating metric') return self.evalMetricFromSamples(q_samples, grad_samples, method)
Evaluates the horsetail matching metric at given values of the design variables. :param iterable x: values of the design variables, this is passed as the first argument to the function fqoi :param str method: method to use to evaluate the metric ('empirical' or 'kernel') :return: metric_value - value of the metric evaluated at the design point given by x :rtype: float *Example Usage*:: >>> def myFunc(x, u): return x[0]*x[1] + u >>> u1 = UniformParameter() >>> theHM = HorsetailMatching(myFunc, u) >>> x0 = [1, 2] >>> theHM.evalMetric(x0)
def set_mode(self, mode): if mode not in [self.TERMINATE, self.RUN, self.IDLE]: raise ProgrammerError('mode=%r is not recognized' % mode) with self.registry.lock(identifier=self.worker_id) as session: session.set('modes', 'mode', mode) logger.info('set mode to %s', mode)
Set the global mode of the rejester system. This must be one of the constants :attr:`TERMINATE`, :attr:`RUN`, or :attr:`IDLE`. :attr:`TERMINATE` instructs any running workers to do an orderly shutdown, completing current jobs then exiting. :attr:`IDLE` instructs workers to stay running but not start new jobs. :attr:`RUN` tells workers to do actual work. :param str mode: new rejester mode :raise rejester.exceptions.ProgrammerError: on invalid `mode`
def LAMBDA(self, node): self.handleNode, self.deferHandleNode = self.deferHandleNode, self.handleNode super().LAMBDA(node) self.handleNode, self.deferHandleNode = self.deferHandleNode, self.handleNode
This is likely very brittle, currently works for pyflakes 1.3.0. Deferring annotation handling depends on the fact that during calls to LAMBDA visiting the function's body is already deferred and the only eager calls to `handleNode` are for annotations.
def copy_directory(src, dest, force=False): if os.path.exists(dest) and force is True: shutil.rmtree(dest) try: shutil.copytree(src, dest) except OSError as e: if e.errno == errno.ENOTDIR: shutil.copy(src, dest) else: bot.error('Directory not copied. Error: %s' % e) sys.exit(1)
Copy an entire directory recursively
def resize(widthWindow, heightWindow): glEnable(GL_BLEND) glEnable(GL_POINT_SMOOTH) glShadeModel(GL_SMOOTH) glBlendFunc(GL_SRC_ALPHA,GL_ONE) glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST); glHint(GL_POINT_SMOOTH_HINT,GL_NICEST); glDisable(GL_DEPTH_TEST)
Initial settings for the OpenGL state machine, clear color, window size, etc
def mchirp_sampler_imf(**kwargs): m1, m2 = draw_imf_samples(**kwargs) mchirp_astro = mchirp_from_mass1_mass2(m1, m2) return mchirp_astro
Draw chirp mass samples for power-law model Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- mchirp-astro: array The chirp mass samples for the population
def get (self, feature): if type(feature) == type([]): feature = feature[0] if not isinstance(feature, b2.build.feature.Feature): feature = b2.build.feature.get(feature) assert isinstance(feature, b2.build.feature.Feature) if self.feature_map_ is None: self.feature_map_ = {} for v in self.all_: if v.feature not in self.feature_map_: self.feature_map_[v.feature] = [] self.feature_map_[v.feature].append(v.value) return self.feature_map_.get(feature, [])
Returns all values of 'feature'.
def respond(self, result): if self.one_way or self.unique_id is None: return None response = JSONRPCSuccessResponse() response.result = result response.unique_id = self.unique_id return response
Create a response to this request. When processing the request completed successfully this method can be used to create a response object. :param result: The result of the invoked method. :type result: Anything that can be encoded by JSON. :returns: A response object that can be serialized and sent to the client. :rtype: :py:class:`JSONRPCSuccessResponse`
def request_examples(self, attack_config, criteria, run_counts, batch_size): raise NotImplementedError(str(type(self)) + "needs to implement request_examples")
Returns a numpy array of integer example indices to run in the next batch.
def from_time(cls, source): return cls(hours=source.hour, minutes=source.minute, seconds=source.second, milliseconds=source.microsecond // 1000)
datetime.time -> SubRipTime corresponding to time object
def rollapply(data, window, fn): res = data.copy() res[:] = np.nan n = len(data) if window > n: return res for i in range(window - 1, n): res.iloc[i] = fn(data.iloc[i - window + 1:i + 1]) return res
Apply a function fn over a rolling window of size window. Args: * data (Series or DataFrame): Series or DataFrame * window (int): Window size * fn (function): Function to apply over the rolling window. For a series, the return value is expected to be a single number. For a DataFrame, it shuold return a new row. Returns: * Object of same dimensions as data
async def self_check(self): platforms = set() for platform in get_platform_settings(): try: name = platform['class'] cls: Type[Platform] = import_class(name) except KeyError: yield HealthCheckFail( '00004', 'Missing platform `class` name in configuration.' ) except (AttributeError, ImportError, ValueError): yield HealthCheckFail( '00003', f'Platform "{name}" cannot be imported.' ) else: if cls in platforms: yield HealthCheckFail( '00002', f'Platform "{name}" is imported more than once.' ) platforms.add(cls) async for check in cls.self_check(): yield check
Checks that the platforms configuration is all right.
def _get_default_mr_params(cls): cfg = cls(_lenient=True) mr_params = cfg._get_mr_params() mr_params["api_version"] = 0 return mr_params
Gets default values for old API.
def config(self): config = {} if self.config_file.exists(): with open(self.config_file.as_posix(), 'rt') as f: config = {k:self._override[k] if k in self._override else v for k, v in yaml.safe_load(f).items()} return config
Allows changing the config on the fly
def prerequisites(): url = "http://home.gna.org/gaupol/download.html" debian = "sudo apt-get install python3-aeidon" other = "python3 setup.py --user --without-gaupol clean install" LOGGER.error( "The aeidon module is missing!\n\n" "Try '{0}' or the appropriate command for your package manager.\n\n" "You can also download the tarball for gaupol (which includes " "aeidon) at {1}. After downloading, unpack and run '{2}'." .format(debian, url, other))
Display information about obtaining the aeidon module.
def parse_diff_filenames(diff_files): files = [] for line in diff_files.splitlines(): line = line.strip() fn = re.findall('[^ ]+\s+(.*.py)', line) if fn and not line.startswith('?'): files.append(fn[0]) return files
Parse the output of filenames_diff_cmd.
def compile_theme(theme_id=None): from engineer.processors import convert_less from engineer.themes import ThemeManager if theme_id is None: themes = ThemeManager.themes().values() else: themes = [ThemeManager.theme(theme_id)] with(indent(2)): puts(colored.yellow("Compiling %s themes." % len(themes))) for theme in themes: theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath() puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path))) with indent(4): puts("Compiling...") convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id), theme_output_path, minify=True) puts(colored.green("Done.", bold=True))
Compiles a theme.
def alter(self, function): check_not_none(function, "function can't be None") return self._encode_invoke(atomic_long_alter_codec, function=self._to_data(function))
Alters the currently stored value by applying a function on it. :param function: (Function), A stateful serializable object which represents the Function defined on server side. This object must have a serializable Function counter part registered on server side with the actual ``org.hazelcast.core.IFunction`` implementation.
def parse_deckspawn_metainfo(protobuf: bytes, version: int) -> dict: deck = DeckSpawnProto() deck.ParseFromString(protobuf) error = {"error": "Deck ({deck}) metainfo incomplete, deck must have a name.".format(deck=deck.name)} if deck.name == "": raise InvalidDeckMetainfo(error) if deck.version != version: raise InvalidDeckVersion({"error", "Deck version mismatch."}) return { "version": deck.version, "name": deck.name, "issue_mode": deck.issue_mode, "number_of_decimals": deck.number_of_decimals, "asset_specific_data": deck.asset_specific_data }
Decode deck_spawn tx op_return protobuf message and validate it, Raise error if deck_spawn metainfo incomplete or version mistmatch.
def _check_read(self, fd, nbytes): result = fd.read(nbytes) if (not result and nbytes > 0) or len(result) != nbytes: raise InvalidZoneinfoFile( "Expected {} bytes reading {}, " "but got {}".format(nbytes, fd.name, len(result) if result else 0) ) if PY2: return bytearray(result) return result
Reads the given number of bytes from the given file and checks that the correct number of bytes could be read.
def pt_scale(pt=(0.0, 0.0), f=1.0): assert isinstance(pt, tuple) l_pt = len(pt) assert l_pt > 1 for i in pt: assert isinstance(i, float) assert isinstance(f, float) return tuple([pt[i]*f for i in range(l_pt)])
Return given point scaled by factor f from origin.
def fromdelta(args): p = OptionParser(fromdelta.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) deltafile, = args coordsfile = deltafile.rsplit(".", 1)[0] + ".coords" cmd = "show-coords -rclH {0}".format(deltafile) sh(cmd, outfile=coordsfile) return coordsfile
%prog fromdelta deltafile Convert deltafile to coordsfile.
def __definitions_descriptor(self): result = {} for def_key, def_value in self.__parser.schemas().iteritems(): if 'properties' in def_value or 'type' in def_value: key_result = {} required_keys = set() if 'type' in def_value: key_result['type'] = def_value['type'] if 'properties' in def_value: for prop_key, prop_value in def_value['properties'].items(): if isinstance(prop_value, dict) and 'required' in prop_value: required_keys.add(prop_key) del prop_value['required'] key_result['properties'] = def_value['properties'] if required_keys: key_result['required'] = sorted(required_keys) result[def_key] = key_result for def_value in result.itervalues(): for prop_value in def_value.itervalues(): if isinstance(prop_value, dict): if '$ref' in prop_value: prop_value['type'] = 'object' self._add_def_paths(prop_value) return result
Describes the definitions section of the OpenAPI spec. Returns: Dictionary describing the definitions of the spec.
def tag_torsion_angles(self, force=False): tagged = ['omega' in x.tags.keys() for x in self._monomers] if (not all(tagged)) or force: tas = measure_torsion_angles(self._monomers) for monomer, (omega, phi, psi) in zip(self._monomers, tas): monomer.tags['omega'] = omega monomer.tags['phi'] = phi monomer.tags['psi'] = psi monomer.tags['tas'] = (omega, phi, psi) return
Tags each Monomer of the Polymer with its omega, phi and psi torsion angle. Parameters ---------- force : bool, optional If `True` the tag will be run even if `Residues` are already tagged.
def __remove_pyc_pyo(fname): if osp.splitext(fname)[1] == '.py': for ending in ('c', 'o'): if osp.exists(fname+ending): os.remove(fname+ending)
Eventually remove .pyc and .pyo files associated to a Python script
def write_json(self, **kwargs): self.stdout.write(json.dumps(kwargs) + "\n") self.stdout.flush()
Write an JSON object on a single line. The keyword arguments are interpreted as a single JSON object. It's not possible with this method to write non-objects.
def do_reload(module: types.ModuleType, newer_than: int) -> bool: path = getattr(module, '__file__') directory = getattr(module, '__path__', [None])[0] if path is None and directory: path = os.path.join(directory, '__init__.py') last_modified = os.path.getmtime(path) if last_modified < newer_than: return False try: importlib.reload(module) return True except ImportError: return False
Executes the reload of the specified module if the source file that it was loaded from was updated more recently than the specified time :param module: A module object to be reloaded :param newer_than: The time in seconds since epoch that should be used to determine if the module needs to be reloaded. If the module source was modified more recently than this time, the module will be refreshed. :return: Whether or not the module was reloaded
def GetRelativePath(self, path_spec): location = getattr(path_spec, 'location', None) if location is None: raise errors.PathSpecError('Path specification missing location.') if path_spec_factory.Factory.IsSystemLevelTypeIndicator( self._file_system.type_indicator): if not location.startswith(self._mount_point.location): raise errors.PathSpecError( 'Path specification does not contain mount point.') else: if not hasattr(path_spec, 'parent'): raise errors.PathSpecError('Path specification missing parent.') if path_spec.parent != self._mount_point: raise errors.PathSpecError( 'Path specification does not contain mount point.') path_segments = self._file_system.SplitPath(location) if path_spec_factory.Factory.IsSystemLevelTypeIndicator( self._file_system.type_indicator): mount_point_path_segments = self._file_system.SplitPath( self._mount_point.location) path_segments = path_segments[len(mount_point_path_segments):] return '{0:s}{1:s}'.format( self._file_system.PATH_SEPARATOR, self._file_system.PATH_SEPARATOR.join(path_segments))
Returns the relative path based on a resolved path specification. The relative path is the location of the upper most path specification. The the location of the mount point is stripped off if relevant. Args: path_spec (PathSpec): path specification. Returns: str: corresponding relative path or None if the relative path could not be determined. Raises: PathSpecError: if the path specification is incorrect.
def encode_request(self, fields, files): parts = [] boundary = self.boundary for k, values in fields: if not isinstance(values, (list, tuple)): values = [values] for v in values: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"' % k).encode('utf-8'), b'', v.encode('utf-8'))) for key, filename, value in files: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)).encode('utf-8'), b'', value)) parts.extend((b'--' + boundary + b'--', b'')) body = b'\r\n'.join(parts) ct = b'multipart/form-data; boundary=' + boundary headers = { 'Content-type': ct, 'Content-length': str(len(body)) } return Request(self.url, body, headers)
Encode fields and files for posting to an HTTP server. :param fields: The fields to send as a list of (fieldname, value) tuples. :param files: The files to send as a list of (fieldname, filename, file_bytes) tuple.
def validate_connector(self, connector): if not 'connector' in connector: raise ValueError('missing connector name') elif connector['connector'] != CONNECTOR_RABBITMQ: raise ValueError('unknown connector: ' + str(connector['connector'])) RabbitMQConnector.validate(connector)
Validate a given connector. Raises ValueError if the connector is not valid. Parameters ---------- connector : dict Connection information
def denormalize(x:TensorImage, mean:FloatTensor,std:FloatTensor, do_x:bool=True)->TensorImage: "Denormalize `x` with `mean` and `std`." return x.cpu().float()*std[...,None,None] + mean[...,None,None] if do_x else x.cpu()
Denormalize `x` with `mean` and `std`.
def newNodeEatName(self, name): ret = libxml2mod.xmlNewNodeEatName(self._o, name) if ret is None:raise treeError('xmlNewNodeEatName() failed') __tmp = xmlNode(_obj=ret) return __tmp
Creation of a new node element. @ns is optional (None).
def form_valid(self, form): form_valid_from_parent = super(HostCreate, self).form_valid(form) messages.success(self.request, 'Host {} Successfully Created'.format(self.object)) return form_valid_from_parent
First call the parent's form valid then let the user know it worked.
def clearAdvancedActions( self ): self._advancedMap.clear() margins = list(self.getContentsMargins()) margins[2] = 0 self.setContentsMargins(*margins)
Clears out the advanced action map.
def remove_read_more_sep(self, raw_content): if self._read_more_exp is None: return raw_content sp = self._read_more_exp.split(raw_content, maxsplit=1) if len(sp) == 2 and sp[0]: result = '\n\n'.join((sp[0].rstrip(), sp[1].lstrip())) else: result = raw_content return result
Removes the first read_more_sep that occurs in raw_content. Subclasses should call this method to preprocess raw_content.
def getVerifiers(self): contacts = list() for verifier in self.getVerifiersIDs(): user = api.get_user(verifier) contact = api.get_user_contact(user, ["LabContact"]) if contact: contacts.append(contact) return contacts
Returns the list of lab contacts that have verified at least one analysis from this Analysis Request
def add_user(username, deployment_name, token_manager=None, app_url=defaults.APP_URL): deployment_id = get_deployment_id(deployment_name, token_manager=token_manager, app_url=app_url) account_id = accounts.get_account_id(username, token_manager=token_manager, app_url=app_url) headers = token_manager.get_access_token_headers() deployment_url = environment.get_deployment_url(app_url=app_url) response = requests.put('%s/api/v1/deployments/%s/accounts/%s' % (deployment_url, deployment_id, account_id), headers=headers) if response.status_code == 204: return response.text else: raise JutException('Error %s: %s' % (response.status_code, response.text))
add user to deployment
def write_result(self, result): assert not self.finished, "Already sent a response" if not self.result.thrift_spec: self.finished = True return spec = self.result.thrift_spec[0] if result is not None: assert spec, "Tried to return a result for a void method." setattr(self.result, spec[2], result) self.finished = True
Send back the result of this call. Only one of this and `write_exc_info` may be called. :param result: Return value of the call
def user_photo_url(user, size): endpoint, kwargs = user_url_args(user, size) return url_for(endpoint, **kwargs)
Return url to use for this user.
def serve_assets(path): res = os.path.join(app.config['NIKOLA_ROOT'], _site.config["OUTPUT_FOLDER"], 'assets') return send_from_directory(res, path)
Serve Nikola assets. This is meant to be used ONLY by the internal dev server. Please configure your web server to handle requests to this URL:: /assets/ => output/assets