text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def print_warning_results(results, level=0): """Print warning messages found during validation. """ marker = _YELLOW + "[!] " for warning in results.warnings: print_level(logger.warning, marker + "Warning: %s", level, warning)
[ "def", "print_warning_results", "(", "results", ",", "level", "=", "0", ")", ":", "marker", "=", "_YELLOW", "+", "\"[!] \"", "for", "warning", "in", "results", ".", "warnings", ":", "print_level", "(", "logger", ".", "warning", ",", "marker", "+", "\"Warni...
35
13.428571
def resolve_domains(domains, disable_zone=False): """ Resolves the list of domains and returns the ips. """ dnsresolver = dns.resolver.Resolver() ips = [] for domain in domains: print_notification("Resolving {}".format(domain)) try: result = dnsresolver.query(domain, 'A') for a in result.response.answer[0]: ips.append(str(a)) if not disable_zone: ips.extend(zone_transfer(str(a), domain)) except dns.resolver.NXDOMAIN as e: print_error(e) return ips
[ "def", "resolve_domains", "(", "domains", ",", "disable_zone", "=", "False", ")", ":", "dnsresolver", "=", "dns", ".", "resolver", ".", "Resolver", "(", ")", "ips", "=", "[", "]", "for", "domain", "in", "domains", ":", "print_notification", "(", "\"Resolvi...
30.473684
15
def validate(self, model, checks=[]): """Use a defined schema to validate the medium table format.""" custom = [ check_partial(reaction_id_check, frozenset(r.id for r in model.reactions)) ] super(Medium, self).validate(model=model, checks=checks + custom)
[ "def", "validate", "(", "self", ",", "model", ",", "checks", "=", "[", "]", ")", ":", "custom", "=", "[", "check_partial", "(", "reaction_id_check", ",", "frozenset", "(", "r", ".", "id", "for", "r", "in", "model", ".", "reactions", ")", ")", "]", ...
45.571429
17.142857
def _set_address(self, v, load=False): """ Setter method for address, mapped from YANG variable /interface/management/ip/address (container) If this variable is read-only (config: false) in the source YANG file, then _set_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_address() directly. YANG Description: The IPv4 address configuration for this management interface. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=address.address, is_container='container', presence=False, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IPv4 address configuration for this \nmanagement interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """address must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=address.address, is_container='container', presence=False, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IPv4 address configuration for this \nmanagement interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""", }) self.__address = t if hasattr(self, '_set'): self._set()
[ "def", "_set_address", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base",...
69.36
34
def construct_request_uri(local_dir, base_path, **kwargs): """ Constructs a special redirect_uri to be used when communicating with one OP. Each OP should get their own redirect_uris. :param local_dir: Local directory in which to place the file :param base_path: Base URL to start with :param kwargs: :return: 2-tuple with (filename, url) """ _filedir = local_dir if not os.path.isdir(_filedir): os.makedirs(_filedir) _webpath = base_path _name = rndstr(10) + ".jwt" filename = os.path.join(_filedir, _name) while os.path.exists(filename): _name = rndstr(10) filename = os.path.join(_filedir, _name) _webname = "%s%s" % (_webpath, _name) return filename, _webname
[ "def", "construct_request_uri", "(", "local_dir", ",", "base_path", ",", "*", "*", "kwargs", ")", ":", "_filedir", "=", "local_dir", "if", "not", "os", ".", "path", ".", "isdir", "(", "_filedir", ")", ":", "os", ".", "makedirs", "(", "_filedir", ")", "...
35.142857
12
def rmarkdown_draft(filename, template, package): """ create a draft rmarkdown file from an installed template """ if file_exists(filename): return filename draft_template = Template( 'rmarkdown::draft("$filename", template="$template", package="$package", edit=FALSE)' ) draft_string = draft_template.substitute( filename=filename, template=template, package=package) report_dir = os.path.dirname(filename) rcmd = Rscript_cmd() with chdir(report_dir): do.run([rcmd, "--no-environ", "-e", draft_string], "Creating bcbioRNASeq quality control template.") do.run(["sed", "-i", "s/YYYY-MM-DD\///g", filename], "Editing bcbioRNAseq quality control template.") return filename
[ "def", "rmarkdown_draft", "(", "filename", ",", "template", ",", "package", ")", ":", "if", "file_exists", "(", "filename", ")", ":", "return", "filename", "draft_template", "=", "Template", "(", "'rmarkdown::draft(\"$filename\", template=\"$template\", package=\"$package...
43.470588
21.823529
def add_catalogue(self, catalogue, overlay=False): ''' :param catalogue: Earthquake catalogue as instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` :param dict config: Configuration parameters of the algorithm, containing the following information: 'min_lat' Minimum value of latitude (in degrees, float) 'max_lat' Minimum value of longitude (in degrees, float) (min_lat, min_lon) Defines the inferior corner of the map 'min_lon' Maximum value of latitude (in degrees, float) 'max_lon' Maximum value of longitude (in degrees, float) (min_lon, max_lon) Defines the upper corner of the map :returns: Figure with the spatial distribution of the events. ''' # Magnitudes bins and minimum marrker size # min_mag = np.min(catalogue.data['magnitude']) # max_mag = np.max(catalogue.data['magnitude']) con_min = np.where(np.array([symb[0] for symb in DEFAULT_SYMBOLOGY]) < np.min(catalogue.data['magnitude']))[0] con_max = np.where(np.array([symb[1] for symb in DEFAULT_SYMBOLOGY]) > np.max(catalogue.data['magnitude']))[0] if len(con_min) == 1: min_loc = con_min[0] else: min_loc = con_min[-1] if len(con_max) == 1: max_loc = con_max[0] else: max_loc = con_max[1] # min_loc = np.where(np.array([symb[0] for symb in DEFAULT_SYMBOLOGY]) # < np.min(catalogue.data['magnitude']))[0][-1] # max_loc = np.where(np.array([symb[1] for symb in DEFAULT_SYMBOLOGY]) # > np.max(catalogue.data['magnitude']))[0][1] symbology = DEFAULT_SYMBOLOGY[min_loc:max_loc] for sym in symbology: # Create legend string if np.isinf(sym[0]): leg_str = 'M < %5.2f' % sym[1] elif np.isinf(sym[1]): leg_str = 'M >= %5.2f' % sym[0] else: leg_str = '%5.2f <= M < %5.2f' % (sym[0], sym[1]) idx = np.logical_and(catalogue.data['magnitude'] >= sym[0], catalogue.data['magnitude'] < sym[1]) mag_size = 1.2 * np.min([sym[0] + 0.5, sym[1] - 0.5]) x, y = self.m(catalogue.data['longitude'][idx], catalogue.data['latitude'][idx]) self.m.plot(x, y, sym[2], markersize=mag_size, label=leg_str) self.ax.legend(bbox_to_anchor=LEGEND_OFFSET) if self.title: self.ax.set_title(self.title, fontsize=16) if not overlay: plt.show()
[ "def", "add_catalogue", "(", "self", ",", "catalogue", ",", "overlay", "=", "False", ")", ":", "# Magnitudes bins and minimum marrker size", "# min_mag = np.min(catalogue.data['magnitude'])", "# max_mag = np.max(catalogue.data['magnitude'])", "con_min", "=", "np", ".", "where",...
46
21.220339
def pull_request(self, number): """Get the pull request indicated by ``number``. :param int number: (required), number of the pull request. :returns: :class:`PullRequest <github3.pulls.PullRequest>` """ json = None if int(number) > 0: url = self._build_url('pulls', str(number), base_url=self._api) json = self._json(self._get(url), 200) return PullRequest(json, self) if json else None
[ "def", "pull_request", "(", "self", ",", "number", ")", ":", "json", "=", "None", "if", "int", "(", "number", ")", ">", "0", ":", "url", "=", "self", ".", "_build_url", "(", "'pulls'", ",", "str", "(", "number", ")", ",", "base_url", "=", "self", ...
41.545455
17.818182
def target(self, project_module): """Returns the project target corresponding to the 'project-module'.""" assert isinstance(project_module, basestring) if project_module not in self.module2target: self.module2target[project_module] = \ b2.build.targets.ProjectTarget(project_module, project_module, self.attribute(project_module, "requirements")) return self.module2target[project_module]
[ "def", "target", "(", "self", ",", "project_module", ")", ":", "assert", "isinstance", "(", "project_module", ",", "basestring", ")", "if", "project_module", "not", "in", "self", ".", "module2target", ":", "self", ".", "module2target", "[", "project_module", "...
52.333333
18.444444
def add_contributor(self, project_id, name, email, language_code): """ Adds a contributor to a project language """ self._run( url_path="contributors/add", id=project_id, name=name, email=email, language=language_code ) return True
[ "def", "add_contributor", "(", "self", ",", "project_id", ",", "name", ",", "email", ",", "language_code", ")", ":", "self", ".", "_run", "(", "url_path", "=", "\"contributors/add\"", ",", "id", "=", "project_id", ",", "name", "=", "name", ",", "email", ...
27.333333
13.5
def do_diff(self, subcmd, opts, *args): """Display the differences between two paths. usage: 1. diff [-r N[:M]] [TARGET[@REV]...] 2. diff [-r N[:M]] --old=OLD-TGT[@OLDREV] [--new=NEW-TGT[@NEWREV]] \ [PATH...] 3. diff OLD-URL[@OLDREV] NEW-URL[@NEWREV] 1. Display the changes made to TARGETs as they are seen in REV between two revisions. TARGETs may be working copy paths or URLs. N defaults to BASE if any TARGET is a working copy path, otherwise it must be specified. M defaults to the current working version if any TARGET is a working copy path, otherwise it defaults to HEAD. 2. Display the differences between OLD-TGT as it was seen in OLDREV and NEW-TGT as it was seen in NEWREV. PATHs, if given, are relative to OLD-TGT and NEW-TGT and restrict the output to differences for those paths. OLD-TGT and NEW-TGT may be working copy paths or URL[@REV]. NEW-TGT defaults to OLD-TGT if not specified. -r N makes OLDREV default to N, -r N:M makes OLDREV default to N and NEWREV default to M. 3. Shorthand for 'svn diff --old=OLD-URL[@OLDREV] --new=NEW-URL[@NEWREV]' Use just 'svn diff' to display local modifications in a working copy. ${cmd_option_list} """ print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
[ "def", "do_diff", "(", "self", ",", "subcmd", ",", "opts", ",", "*", "args", ")", ":", "print", "\"'svn %s' opts: %s\"", "%", "(", "subcmd", ",", "opts", ")", "print", "\"'svn %s' args: %s\"", "%", "(", "subcmd", ",", "args", ")" ]
48.419355
27.774194
def assemble(self,roboset=None,color=None,format=None,bgset=None,sizex=300,sizey=300): """ Build our Robot! Returns the robot image itself. """ # Allow users to manually specify a robot 'set' that they like. # Ensure that this is one of the allowed choices, or allow all # If they don't set one, take the first entry from sets above. if roboset == 'any': roboset = self.sets[self.hasharray[1] % len(self.sets) ] elif roboset in self.sets: roboset = roboset else: roboset = self.sets[0] # Only set1 is setup to be color-seletable. The others don't have enough pieces in various colors. # This could/should probably be expanded at some point.. # Right now, this feature is almost never used. ( It was < 44 requests this year, out of 78M reqs ) if roboset == 'set1': if color in self.colors: roboset = 'set1/' + color else: randomcolor = self.colors[self.hasharray[0] % len(self.colors) ] roboset = 'set1/' + randomcolor # If they specified a background, ensure it's legal, then give it to them. if bgset in self.bgsets: bgset = bgset elif bgset == 'any': bgset = self.bgsets[ self.hasharray[2] % len(self.bgsets) ] # If we set a format based on extension earlier, use that. Otherwise, PNG. if format is None: format = self.format # Each directory in our set represents one piece of the Robot, such as the eyes, nose, mouth, etc. # Each directory is named with two numbers - The number before the # is the sort order. # This ensures that they always go in the same order when choosing pieces, regardless of OS. # The second number is the order in which to apply the pieces. # For instance, the head has to go down BEFORE the eyes, or the eyes would be hidden. # First, we'll get a list of parts of our robot. roboparts = self._get_list_of_files(self.resourcedir + 'sets/' + roboset) # Now that we've sorted them by the first number, we need to sort each sub-category by the second. roboparts.sort(key=lambda x: x.split("#")[1]) if bgset is not None: bglist = [] backgrounds = natsort.natsorted(os.listdir(self.resourcedir + 'backgrounds/' + bgset)) backgrounds.sort() for ls in backgrounds: if not ls.startswith("."): bglist.append(self.resourcedir + 'backgrounds/' + bgset + "/" + ls) background = bglist[self.hasharray[3] % len(bglist)] # Paste in each piece of the Robot. roboimg = Image.open(roboparts[0]) roboimg = roboimg.resize((1024,1024)) for png in roboparts: img = Image.open(png) img = img.resize((1024,1024)) roboimg.paste(img,(0,0),img) # If we're a BMP, flatten the image. if format == 'bmp': #Flatten bmps r, g, b, a = roboimg.split() roboimg = Image.merge("RGB", (r, g, b)) if bgset is not None: bg = Image.open(background) bg = bg.resize((1024,1024)) bg.paste(roboimg,(0,0),roboimg) roboimg = bg self.img = roboimg.resize((sizex,sizey),Image.ANTIALIAS) self.format = format
[ "def", "assemble", "(", "self", ",", "roboset", "=", "None", ",", "color", "=", "None", ",", "format", "=", "None", ",", "bgset", "=", "None", ",", "sizex", "=", "300", ",", "sizey", "=", "300", ")", ":", "# Allow users to manually specify a robot 'set' th...
41.261905
24.785714
def _sparse_or_dense_matmul_onehot(sparse_or_dense_matrix, col_index): """Returns a (dense) column of a Tensor or SparseTensor. Args: sparse_or_dense_matrix: matrix-shaped, `float` `Tensor` or `SparseTensor`. col_index: scalar, `int` `Tensor` representing the index of the desired column. Returns: column: vector-shaped, `float` `Tensor` with the same dtype as `sparse_or_dense_matrix`, representing the `col_index`th column of `sparse_or_dense_matrix`. """ if isinstance(sparse_or_dense_matrix, (tf.SparseTensor, tf.compat.v1.SparseTensorValue)): # TODO(b/111924846): Implement better (ideally in a way that allows us to # eliminate the `num_rows` arg, if possible). num_rows = _get_shape(sparse_or_dense_matrix)[-2] batch_shape = _get_shape(sparse_or_dense_matrix)[:-2] slice_start = tf.concat([tf.zeros_like(batch_shape), [0, col_index]], axis=0) slice_size = tf.concat([batch_shape, [num_rows, 1]], axis=0) # We momentarily lose static shape information in tf.sparse_slice. However # we regain it in the following tf.reshape. sparse_slice = tf.sparse.slice(sparse_or_dense_matrix, tf.cast(slice_start, tf.int64), tf.cast(slice_size, tf.int64)) output_shape = tf.concat([batch_shape, [num_rows]], axis=0) return tf.reshape(tf.sparse.to_dense(sparse_slice), output_shape) else: return tf.gather(sparse_or_dense_matrix, col_index, axis=-1)
[ "def", "_sparse_or_dense_matmul_onehot", "(", "sparse_or_dense_matrix", ",", "col_index", ")", ":", "if", "isinstance", "(", "sparse_or_dense_matrix", ",", "(", "tf", ".", "SparseTensor", ",", "tf", ".", "compat", ".", "v1", ".", "SparseTensorValue", ")", ")", "...
47.375
24.03125
def uifile(self): """ Returns the uifile for this scaffold. :return <str> """ output = '' # build from a zip file if zipfile.is_zipfile(self.source()): zfile = zipfile.ZipFile(self.source(), 'r') if 'properties.ui' in zfile.namelist(): tempdir = tempfile.gettempdir() output = os.path.join(tempdir, '{0}_properties.ui'.format(self.name())) f = open(output, 'w') f.write(zfile.read('properties.ui')) f.close() zfile.close() else: uifile = os.path.join(os.path.dirname(self.source()), 'properties.ui') if os.path.exists(uifile): output = uifile return output
[ "def", "uifile", "(", "self", ")", ":", "output", "=", "''", "# build from a zip file", "if", "zipfile", ".", "is_zipfile", "(", "self", ".", "source", "(", ")", ")", ":", "zfile", "=", "zipfile", ".", "ZipFile", "(", "self", ".", "source", "(", ")", ...
30.214286
17.285714
def assert_equal(self, v1, v2, **kwargs):#, desc=None, screenshot=False, safe=False): """ Check v1 is equals v2, and take screenshot if not equals Args: - desc (str): some description - safe (bool): will omit AssertionError if set to True - screenshot: can be type <None|True|False|PIL.Image> """ is_success = v1 == v2 if is_success: message = "assert equal success, %s == %s" %(v1, v2) else: message = '%s not equal %s' % (v1, v2) kwargs.update({ 'message': message, 'success': is_success, }) self._add_assert(**kwargs)
[ "def", "assert_equal", "(", "self", ",", "v1", ",", "v2", ",", "*", "*", "kwargs", ")", ":", "#, desc=None, screenshot=False, safe=False):", "is_success", "=", "v1", "==", "v2", "if", "is_success", ":", "message", "=", "\"assert equal success, %s == %s\"", "%", ...
38.823529
16.647059
def forward(self, data_batch, is_train=None): """Forward computation. Parameters ---------- data_batch : DataBatch is_train : bool Defaults to ``None``, in which case `is_train` is take as ``self.for_training``. """ assert self.binded and self.params_initialized self.switch_bucket(data_batch.bucket_key, data_batch.provide_data, data_batch.provide_label) self._curr_module.forward(data_batch, is_train=is_train)
[ "def", "forward", "(", "self", ",", "data_batch", ",", "is_train", "=", "None", ")", ":", "assert", "self", ".", "binded", "and", "self", ".", "params_initialized", "self", ".", "switch_bucket", "(", "data_batch", ".", "bucket_key", ",", "data_batch", ".", ...
39.384615
19.384615
def pin_variant(institute_id, case_name, variant_id): """Pin and unpin variants to/from the list of suspects.""" institute_obj, case_obj = institute_and_case(store, institute_id, case_name) variant_obj = store.variant(variant_id) user_obj = store.user(current_user.email) link = url_for('variants.variant', institute_id=institute_id, case_name=case_name, variant_id=variant_id) if request.form['action'] == 'ADD': store.pin_variant(institute_obj, case_obj, user_obj, link, variant_obj) elif request.form['action'] == 'DELETE': store.unpin_variant(institute_obj, case_obj, user_obj, link, variant_obj) return redirect(request.referrer or link)
[ "def", "pin_variant", "(", "institute_id", ",", "case_name", ",", "variant_id", ")", ":", "institute_obj", ",", "case_obj", "=", "institute_and_case", "(", "store", ",", "institute_id", ",", "case_name", ")", "variant_obj", "=", "store", ".", "variant", "(", "...
58.166667
16.5
def add_filter(self, filter_or_string, *args, **kwargs): """ Appends a filter. """ self.filters.append(build_filter(filter_or_string, *args, **kwargs)) return self
[ "def", "add_filter", "(", "self", ",", "filter_or_string", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "filters", ".", "append", "(", "build_filter", "(", "filter_or_string", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", ...
28.285714
18.285714
def hbas(self): """ :class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in this Partition. If the "dpm-storage-management" feature is enabled, this property is `None`. """ # We do here some lazy loading. if not self._hbas: try: dpm_sm = self.feature_enabled('dpm-storage-management') except ValueError: dpm_sm = False if not dpm_sm: self._hbas = HbaManager(self) return self._hbas
[ "def", "hbas", "(", "self", ")", ":", "# We do here some lazy loading.", "if", "not", "self", ".", "_hbas", ":", "try", ":", "dpm_sm", "=", "self", ".", "feature_enabled", "(", "'dpm-storage-management'", ")", "except", "ValueError", ":", "dpm_sm", "=", "False...
31.470588
17.823529
def add_view( self, baseview, name, href="", icon="", label="", category="", category_icon="", category_label="", ): """ Add your views associated with menus using this method. :param baseview: A BaseView type class instantiated or not. This method will instantiate the class for you if needed. :param name: The string name that identifies the menu. :param href: Override the generated href for the menu. You can use an url string or an endpoint name if non provided default_view from view will be set as href. :param icon: Font-Awesome icon name, optional. :param label: The label that will be displayed on the menu, if absent param name will be used :param category: The menu category where the menu will be included, if non provided the view will be acessible as a top menu. :param category_icon: Font-Awesome icon name for the category, optional. :param category_label: The label that will be displayed on the menu, if absent param name will be used Examples:: appbuilder = AppBuilder(app, db) # Register a view, rendering a top menu without icon. appbuilder.add_view(MyModelView(), "My View") # or not instantiated appbuilder.add_view(MyModelView, "My View") # Register a view, a submenu "Other View" from "Other" with a phone icon. appbuilder.add_view( MyOtherModelView, "Other View", icon='fa-phone', category="Others" ) # Register a view, with category icon and translation. appbuilder.add_view( YetOtherModelView, "Other View", icon='fa-phone', label=_('Other View'), category="Others", category_icon='fa-envelop', category_label=_('Other View') ) # Add a link appbuilder.add_link("google", href="www.google.com", icon = "fa-google-plus") """ baseview = self._check_and_init(baseview) log.info(LOGMSG_INF_FAB_ADD_VIEW.format(baseview.__class__.__name__, name)) if not self._view_exists(baseview): baseview.appbuilder = self self.baseviews.append(baseview) self._process_inner_views() if self.app: self.register_blueprint(baseview) self._add_permission(baseview) self.add_link( name=name, href=href, icon=icon, label=label, category=category, category_icon=category_icon, category_label=category_label, baseview=baseview, ) return baseview
[ "def", "add_view", "(", "self", ",", "baseview", ",", "name", ",", "href", "=", "\"\"", ",", "icon", "=", "\"\"", ",", "label", "=", "\"\"", ",", "category", "=", "\"\"", ",", "category_icon", "=", "\"\"", ",", "category_label", "=", "\"\"", ",", ")"...
34.858824
16.858824
def Hsub(T=298.15, P=101325, MW=None, AvailableMethods=False, Method=None, CASRN=''): # pragma: no cover '''This function handles the calculation of a chemical's enthalpy of sublimation. Generally this, is used by the chemical class, as all parameters are passed. This API is considered experimental, and is expected to be removed in a future release in favor of a more complete object-oriented interface. ''' def list_methods(): methods = [] # if Hfus(T=T, P=P, MW=MW, CASRN=CASRN) and Hvap(T=T, P=P, MW=MW, CASRN=CASRN): # methods.append('Hfus + Hvap') if CASRN in GharagheiziHsub_data.index: methods.append('Ghazerati Appendix, at 298K') methods.append('None') return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] # This is the calculate, given the method section # if Method == 'Hfus + Hvap': # p1 = Hfus(T=T, P=P, MW=MW, CASRN=CASRN) # p2 = Hvap(T=T, P=P, MW=MW, CASRN=CASRN) # if p1 and p2: # _Hsub = p1 + p2 # else: # _Hsub = None if Method == 'Ghazerati Appendix, at 298K': _Hsub = float(GharagheiziHsub_data.at[CASRN, 'Hsub']) elif Method == 'None' or not _Hsub or not MW: return None else: raise Exception('Failure in in function') _Hsub = property_molar_to_mass(_Hsub, MW) return _Hsub
[ "def", "Hsub", "(", "T", "=", "298.15", ",", "P", "=", "101325", ",", "MW", "=", "None", ",", "AvailableMethods", "=", "False", ",", "Method", "=", "None", ",", "CASRN", "=", "''", ")", ":", "# pragma: no cover", "def", "list_methods", "(", ")", ":",...
39.5
21.055556
def prev_position(self, pos): """returns the previous position in depth-first order""" candidate = None if pos is not None: prevsib = self.prev_sibling_position(pos) # is None if first if prevsib is not None: candidate = self.last_decendant(prevsib) else: parent = self.parent_position(pos) if parent is not None: candidate = parent return candidate
[ "def", "prev_position", "(", "self", ",", "pos", ")", ":", "candidate", "=", "None", "if", "pos", "is", "not", "None", ":", "prevsib", "=", "self", ".", "prev_sibling_position", "(", "pos", ")", "# is None if first", "if", "prevsib", "is", "not", "None", ...
39.583333
12.25
def discover() -> List[Tuple[str, str]]: """ Scan for connected modules and instantiate handler classes """ if IS_ROBOT and os.path.isdir('/dev/modules'): devices = os.listdir('/dev/modules') else: devices = [] discovered_modules = [] module_port_regex = re.compile('|'.join(MODULE_TYPES.keys()), re.I) for port in devices: match = module_port_regex.search(port) if match: name = match.group().lower() if name not in MODULE_TYPES: log.warning("Unexpected module connected: {} on {}" .format(name, port)) continue absolute_port = '/dev/modules/{}'.format(port) discovered_modules.append((absolute_port, name)) log.info('Discovered modules: {}'.format(discovered_modules)) return discovered_modules
[ "def", "discover", "(", ")", "->", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", ":", "if", "IS_ROBOT", "and", "os", ".", "path", ".", "isdir", "(", "'/dev/modules'", ")", ":", "devices", "=", "os", ".", "listdir", "(", "'/dev/modules'", ...
35.5
16.625
def data_mod(self, *args, **kwargs): """ Register a function to modify data of member Instruments. The function is not partially applied to modify member data. When the Constellation receives a function call to register a function for data modification, it passes the call to each instrument and registers it in the instrument's pysat.Custom queue. (Wraps pysat.Custom.add; documentation of that function is reproduced here.) Parameters ---------- function : string or function object name of function or function object to be added to queue kind : {'add, 'modify', 'pass'} add Adds data returned from fuction to instrument object. modify pysat instrument object supplied to routine. Any and all changes to object are retained. pass A copy of pysat object is passed to function. No data is accepted from return. at_pos : string or int insert at position. (default, insert at end). args : extra arguments Note ---- Allowed `add` function returns: - {'data' : pandas Series/DataFrame/array_like, 'units' : string/array_like of strings, 'long_name' : string/array_like of strings, 'name' : string/array_like of strings (iff data array_like)} - pandas DataFrame, names of columns are used - pandas Series, .name required - (string/list of strings, numpy array/list of arrays) """ for instrument in self.instruments: instrument.custom.add(*args, **kwargs)
[ "def", "data_mod", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "instrument", "in", "self", ".", "instruments", ":", "instrument", ".", "custom", ".", "add", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
35.265306
23.877551
def get_reference_section_beginning(fulltext): """Get start of reference section.""" sect_start = { 'start_line': None, 'end_line': None, 'title_string': None, 'marker_pattern': None, 'marker': None, 'how_found_start': None, } # Find start of refs section: sect_start = find_reference_section(fulltext) if sect_start is not None: sect_start['how_found_start'] = 1 else: # No references found - try with no title option sect_start = find_reference_section_no_title_via_brackets(fulltext) if sect_start is not None: sect_start['how_found_start'] = 2 # Try weaker set of patterns if needed if sect_start is None: # No references found - try with no title option (with weaker # patterns..) sect_start = find_reference_section_no_title_via_dots(fulltext) if sect_start is not None: sect_start['how_found_start'] = 3 if sect_start is None: # No references found - try with no title option (with even # weaker patterns..) sect_start = find_reference_section_no_title_via_numbers( fulltext) if sect_start is not None: sect_start['how_found_start'] = 4 if sect_start: current_app.logger.debug('* title %r' % sect_start['title_string']) current_app.logger.debug('* marker %r' % sect_start['marker']) current_app.logger.debug('* title_marker_same_line %s' % sect_start['title_marker_same_line']) else: current_app.logger.debug('* could not find references section') return sect_start
[ "def", "get_reference_section_beginning", "(", "fulltext", ")", ":", "sect_start", "=", "{", "'start_line'", ":", "None", ",", "'end_line'", ":", "None", ",", "'title_string'", ":", "None", ",", "'marker_pattern'", ":", "None", ",", "'marker'", ":", "None", ",...
40.116279
17.953488
def add_listener(self, listener): """Add the given listener to the wrapped client. The listener will be wrapped, so that it will be called in the reactor thread. This way, it can safely use Twisted APIs. """ internal_listener = partial(self._call_in_reactor_thread, listener) self._internal_listeners[listener] = internal_listener return self._client.add_listener(internal_listener)
[ "def", "add_listener", "(", "self", ",", "listener", ")", ":", "internal_listener", "=", "partial", "(", "self", ".", "_call_in_reactor_thread", ",", "listener", ")", "self", ".", "_internal_listeners", "[", "listener", "]", "=", "internal_listener", "return", "...
47.888889
19.777778
def create_resource_object(self, type, id): """Create a resource object of type for the integer id. type should be one of the following strings: resource drawable window pixmap fontable font gc colormap cursor This function can be used when a resource ID has been fetched e.g. from an resource or a command line argument. Resource objects should never be created by instantiating the appropriate class directly, since any X extensions dynamically added by the library will not be available. """ return self.display.resource_classes[type](self.display, id)
[ "def", "create_resource_object", "(", "self", ",", "type", ",", "id", ")", ":", "return", "self", ".", "display", ".", "resource_classes", "[", "type", "]", "(", "self", ".", "display", ",", "id", ")" ]
32.428571
22.380952
def create_roc_plots(pwmfile, fgfa, background, outdir): """Make ROC plots for all motifs.""" motifs = read_motifs(pwmfile, fmt="pwm", as_dict=True) ncpus = int(MotifConfig().get_default_params()['ncpus']) pool = Pool(processes=ncpus) jobs = {} for bg,fname in background.items(): for m_id, m in motifs.items(): k = "{}_{}".format(str(m), bg) jobs[k] = pool.apply_async( get_roc_values, (motifs[m_id], fgfa, fname,) ) imgdir = os.path.join(outdir, "images") if not os.path.exists(imgdir): os.mkdir(imgdir) roc_img_file = os.path.join(outdir, "images", "{}_roc.{}.png") for motif in motifs.values(): for bg in background: k = "{}_{}".format(str(motif), bg) error, x, y = jobs[k].get() if error: logger.error("Error in thread: %s", error) logger.error("Motif: %s", motif) sys.exit(1) roc_plot(roc_img_file.format(motif.id, bg), x, y)
[ "def", "create_roc_plots", "(", "pwmfile", ",", "fgfa", ",", "background", ",", "outdir", ")", ":", "motifs", "=", "read_motifs", "(", "pwmfile", ",", "fmt", "=", "\"pwm\"", ",", "as_dict", "=", "True", ")", "ncpus", "=", "int", "(", "MotifConfig", "(", ...
38.827586
14.551724
def create(self, mention, max_message_length): """ Create a message :param mention: JSON object containing mention details from Twitter (or an empty dict {}) :param max_message_length: Maximum allowable length for created message :return: A random message created using a Markov chain generator """ message = [] def message_len(): return sum([len(w) + 1 for w in message]) while message_len() < max_message_length: message.append(self.a_random_word(message[-1] if message else None)) return ' '.join(message[:-1])
[ "def", "create", "(", "self", ",", "mention", ",", "max_message_length", ")", ":", "message", "=", "[", "]", "def", "message_len", "(", ")", ":", "return", "sum", "(", "[", "len", "(", "w", ")", "+", "1", "for", "w", "in", "message", "]", ")", "w...
37.8125
23.0625
def v_type_extension(ctx, stmt): """verify that the extension matches the extension definition""" (modulename, identifier) = stmt.keyword revision = stmt.i_extension_revision module = modulename_to_module(stmt.i_module, modulename, revision) if module is None: return if identifier not in module.i_extensions: if module.i_modulename == stmt.i_orig_module.i_modulename: # extension defined in current submodule if identifier not in stmt.i_orig_module.i_extensions: err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED', (identifier, module.arg)) return else: stmt.i_extension = stmt.i_orig_module.i_extensions[identifier] else: err_add(ctx.errors, stmt.pos, 'EXTENSION_NOT_DEFINED', (identifier, module.arg)) return else: stmt.i_extension = module.i_extensions[identifier] ext_arg = stmt.i_extension.search_one('argument') if stmt.arg is not None and ext_arg is None: err_add(ctx.errors, stmt.pos, 'EXTENSION_ARGUMENT_PRESENT', identifier) elif stmt.arg is None and ext_arg is not None: err_add(ctx.errors, stmt.pos, 'EXTENSION_NO_ARGUMENT_PRESENT', identifier)
[ "def", "v_type_extension", "(", "ctx", ",", "stmt", ")", ":", "(", "modulename", ",", "identifier", ")", "=", "stmt", ".", "keyword", "revision", "=", "stmt", ".", "i_extension_revision", "module", "=", "modulename_to_module", "(", "stmt", ".", "i_module", "...
44.965517
17.724138
def provide_session(self, start_new=False): """ Makes sure that session is still valid and provides session info :param start_new: If `True` it will always create a new session. Otherwise it will create a new session only if no session exists or the previous session timed out. :type start_new: bool :return: Current session info :rtype: dict """ if self.is_global: self._session_info = self._global_session_info self._session_start = self._global_session_start if self._session_info is None or start_new or \ datetime.datetime.now() > self._session_start + self.SESSION_DURATION: self._start_new_session() return self._session_info
[ "def", "provide_session", "(", "self", ",", "start_new", "=", "False", ")", ":", "if", "self", ".", "is_global", ":", "self", ".", "_session_info", "=", "self", ".", "_global_session_info", "self", ".", "_session_start", "=", "self", ".", "_global_session_star...
41.888889
21.277778
def main(): """ This is a Toil pipeline for the UNC best practice RNA-Seq analysis. RNA-seq fastqs are combined, aligned, sorted, filtered, and quantified. Please read the README.md located in the same directory. """ # Define Parser object and add to toil parser = build_parser() Job.Runner.addToilOptions(parser) args = parser.parse_args() # Store inputs from argparse inputs = {'config': args.config, 'config_fastq': args.config_fastq, 'input': args.input, 'unc.bed': args.unc, 'hg19.transcripts.fa': args.fasta, 'composite_exons.bed': args.composite_exons, 'normalize.pl': args.normalize, 'output_dir': args.output_dir, 'rsem_ref.zip': args.rsem_ref, 'chromosomes.zip': args.chromosomes, 'ebwt.zip': args.ebwt, 'ssec': args.ssec, 's3_dir': args.s3_dir, 'sudo': args.sudo, 'single_end_reads': args.single_end_reads, 'upload_bam_to_s3': args.upload_bam_to_s3, 'uuid': None, 'sample.tar': None, 'cpu_count': None} # Launch jobs Job.Runner.startToil(Job.wrapJobFn(download_shared_files, inputs), args)
[ "def", "main", "(", ")", ":", "# Define Parser object and add to toil", "parser", "=", "build_parser", "(", ")", "Job", ".", "Runner", ".", "addToilOptions", "(", "parser", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# Store inputs from argparse", ...
37.441176
13.147059
def _append_distances(self, v, distance, candidates): """ Apply distance implementation if specified """ if distance: # Normalize vector (stored vectors are normalized) nv = unitvec(v) candidates = [(x[0], x[1], self.distance.distance(x[0], nv)) for x in candidates] return candidates
[ "def", "_append_distances", "(", "self", ",", "v", ",", "distance", ",", "candidates", ")", ":", "if", "distance", ":", "# Normalize vector (stored vectors are normalized)", "nv", "=", "unitvec", "(", "v", ")", "candidates", "=", "[", "(", "x", "[", "0", "]"...
40.555556
18.111111
def to_dict(self): """Return a dictionary representation of the error. :return: A dict with the keys: - attr: Attribute which contains the error, or "<root>" if it refers to the schema root. - errors: A list of dictionary representations of the errors. """ def exception_to_dict(e): try: return e.to_dict() except AttributeError: return { "type": e.__class__.__name__, "error": str(e), } result = { "errors": [exception_to_dict(e) for e in self.errors] } if self.index is not None: result["index"] = self.index else: result["attr"] = self.attr if self.attr is not None else "<root>" return result
[ "def", "to_dict", "(", "self", ")", ":", "def", "exception_to_dict", "(", "e", ")", ":", "try", ":", "return", "e", ".", "to_dict", "(", ")", "except", "AttributeError", ":", "return", "{", "\"type\"", ":", "e", ".", "__class__", ".", "__name__", ",", ...
34.125
19.125
def _ReadLabels(self, artifact_definition_values, artifact_definition, name): """Reads the optional artifact definition labels. Args: artifact_definition_values (dict[str, object]): artifact definition values. artifact_definition (ArtifactDefinition): an artifact definition. name (str): name of the artifact definition. Raises: FormatError: if there are undefined labels. """ labels = artifact_definition_values.get('labels', []) undefined_labels = set(labels).difference(self.labels) if undefined_labels: raise errors.FormatError( 'Artifact definition: {0:s} found undefined labels: {1:s}.'.format( name, ', '.join(undefined_labels))) artifact_definition.labels = labels
[ "def", "_ReadLabels", "(", "self", ",", "artifact_definition_values", ",", "artifact_definition", ",", "name", ")", ":", "labels", "=", "artifact_definition_values", ".", "get", "(", "'labels'", ",", "[", "]", ")", "undefined_labels", "=", "set", "(", "labels", ...
35.809524
22.380952
def sms(self, message, to=None, from_=None, action=None, method=None, status_callback=None, **kwargs): """ Create a <Sms> element :param message: Message body :param to: Number to send message to :param from: Number to send message from :param action: Action URL :param method: Action URL method :param status_callback: Status callback URL :param kwargs: additional attributes :returns: <Sms> element """ return self.nest(Sms( message, to=to, from_=from_, action=action, method=method, status_callback=status_callback, **kwargs ))
[ "def", "sms", "(", "self", ",", "message", ",", "to", "=", "None", ",", "from_", "=", "None", ",", "action", "=", "None", ",", "method", "=", "None", ",", "status_callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "...
29.541667
13.375
def coordinate_reproject(x, y, s_crs, t_crs): """ reproject a coordinate from one CRS to another Parameters ---------- x: int or float the X coordinate component y: int or float the Y coordinate component s_crs: int, str or :osgeo:class:`osr.SpatialReference` the source CRS. See :func:`~spatialist.auxil.crsConvert` for options. t_crs: int, str or :osgeo:class:`osr.SpatialReference` the target CRS. See :func:`~spatialist.auxil.crsConvert` for options. Returns ------- """ source = crsConvert(s_crs, 'osr') target = crsConvert(t_crs, 'osr') transform = osr.CoordinateTransformation(source, target) point = transform.TransformPoint(x, y)[:2] return point
[ "def", "coordinate_reproject", "(", "x", ",", "y", ",", "s_crs", ",", "t_crs", ")", ":", "source", "=", "crsConvert", "(", "s_crs", ",", "'osr'", ")", "target", "=", "crsConvert", "(", "t_crs", ",", "'osr'", ")", "transform", "=", "osr", ".", "Coordina...
30.625
19.208333
def check_lazy_load_terreinobject(f): ''' Decorator function to lazy load a :class:`Terreinobject`. ''' def wrapper(*args): terreinobject = args[0] if ( terreinobject._centroid is None or terreinobject._bounding_box is None or terreinobject._metadata is None ): log.debug('Lazy loading Terreinobject %s', terreinobject.id) terreinobject.check_gateway() t = terreinobject.gateway.get_terreinobject_by_id(terreinobject.id) terreinobject._centroid = t._centroid terreinobject._bounding_box = t._bounding_box terreinobject._metadata = t._metadata return f(*args) return wrapper
[ "def", "check_lazy_load_terreinobject", "(", "f", ")", ":", "def", "wrapper", "(", "*", "args", ")", ":", "terreinobject", "=", "args", "[", "0", "]", "if", "(", "terreinobject", ".", "_centroid", "is", "None", "or", "terreinobject", ".", "_bounding_box", ...
37.631579
17.842105
def feature_burstness(corpus, featureset_name, feature, k=5, normalize=True, s=1.1, gamma=1., **slice_kwargs): """ Estimate burstness profile for a feature over the ``'date'`` axis. Parameters ---------- corpus : :class:`.Corpus` feature : str Name of featureset in ``corpus``. E.g. ``'citations'``. findex : int Index of ``feature`` in ``corpus``. k : int (default: 5) Number of burst states. normalize : bool (default: True) If True, burstness is expressed relative to the hightest possible state (``k-1``). Otherwise, states themselves are returned. kwargs : kwargs Parameters for burstness automaton HMM. """ if featureset_name not in corpus.features: corpus.index_feature(featureset_name) if 'date' not in corpus.indices: corpus.index('date') # Get time-intervals between occurrences. dates = [min(corpus.indices['date'].keys()) - 1] # Pad start. X_ = [1.] years, values = corpus.feature_distribution(featureset_name, feature) for year, N in izip(years, values): if N == 0: continue if N > 1: if year == dates[-1] + 1: for n in xrange(int(N)): X_.append(1./N) dates.append(year) else: X_.append(float(year - dates[-1])) dates.append(year) for n in xrange(int(N) - 1): X_.append(1./(N - 1)) dates.append(year) else: X_.append(float(year - dates[-1])) dates.append(year) # Get optimum state sequence. st = _forward(map(lambda x: x*100, X_), s=s, gamma=gamma, k=k) # Bin by date. A = defaultdict(list) for i in xrange(len(X_)): A[dates[i]].append(st[i]) # Normalize. if normalize: A = {key: mean(values)/k for key, values in A.items()} else: A = {key: mean(values) for key, values in A.items()} D = sorted(A.keys()) return D[1:], [A[d] for d in D[1:]]
[ "def", "feature_burstness", "(", "corpus", ",", "featureset_name", ",", "feature", ",", "k", "=", "5", ",", "normalize", "=", "True", ",", "s", "=", "1.1", ",", "gamma", "=", "1.", ",", "*", "*", "slice_kwargs", ")", ":", "if", "featureset_name", "not"...
30.147059
19.470588
def dispatch(self, *args, **kwargs): ''' Entry point for this class, here we decide basic stuff ''' # Get if this class is working as only a base render and List funcionality shouldn't be enabled onlybase = getattr(self, "onlybase", False) # REST not available when onlybase is enabled if not onlybase: # Check if this is a REST query to pusth the answer to responde in JSON if bool(self.request.META.get('HTTP_X_REST', False)): self.json = True if self.request.GET.get('json', self.request.POST.get('json', None)) is None: newget = {} newget['json'] = "{}" for key in self.request.GET: newget[key] = self.request.GET[key] self.request.GET = QueryDict('').copy() self.request.GET.update(newget) # return HttpResponseBadRequest(_("The service requires you to set a GET argument named json={} which will contains all the filters you can apply to a list")) # Check if this is a REST query to add an element if self.request.method == 'POST': target = get_class(resolve("{}/add".format(self.request.META.get("REQUEST_URI"))).func) target.json = True return target.as_view()(self.request) # Set class internal variables self._setup(self.request) # Deprecations deprecated = [('retrictions', '2016061000')] for (depre, version) in deprecated: if hasattr(self, depre): raise IOError("The attribute '{}' has been deprecated in version '{}' and it is not available anymore".format(version)) # Build extracontext if not hasattr(self, 'extra_context'): self.extra_context = {} if not hasattr(self, 'client_context'): self.client_context = {} # Attach user to the extra_context self.extra_context['user'] = self.user # Attach WS entry point and STATIC entry point self.extra_context['ws_entry_point'] = self.BASE_URL + getattr(self, "ws_entry_point", "{0}/{1}".format(self._appname, "{0}s".format(self._modelname.lower()))) static_partial_row_path = getattr(self, 'static_partial_row', "{0}/{1}_rows.html".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_partial_row'] = get_static(static_partial_row_path, self.user, self.language, self.DEFAULT_STATIC_PARTIAL_ROWS, 'html', relative=True) static_partial_header_path = getattr(self, 'static_partial_header', "{0}/{1}_header.html".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_partial_header'] = get_static(static_partial_header_path, self.user, self.language, None, 'html', relative=True) static_partial_summary_path = getattr(self, 'static_partial_summary', "{0}/{1}_summary.html".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_partial_summary'] = get_static(static_partial_summary_path, self.user, self.language, self.DEFAULT_STATIC_PARTIAL_SUMMARY, 'html', relative=True) static_app_row_path = getattr(self, 'static_app_row', "{0}/{1}_app.js".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_app_row'] = get_static(static_app_row_path, self.user, self.language, os.path.join(settings.STATIC_URL, 'codenerix/js/app.js'), 'js', relative=True) static_controllers_row_path = getattr(self, 'static_controllers_row', "{0}/{1}_controllers.js".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_controllers_row'] = get_static(static_controllers_row_path, self.user, self.language, None, 'js', relative=True) static_filters_row_path = getattr(self, 'static_filters_row', "{0}/{1}_filters.js".format(self._appname, "{0}s".format(self._modelname.lower()))) self.extra_context['static_filters_row'] = get_static(static_filters_row_path, self.user, self.language, os.path.join(settings.STATIC_URL, 'codenerix/js/rows.js'), 'js', relative=True) self.extra_context['field_delete'] = getattr(self, 'field_delete', False) self.extra_context['field_check'] = getattr(self, 'field_check', None) # Default value for extends_base if hasattr(self, 'extends_base'): self.extra_context['extends_base'] = self.extends_base elif hasattr(self, 'extends_base'): self.extra_context['extends_base'] = self.extends_base # Get if this is a template only answer self.__authtoken = (bool(getattr(self.request, "authtoken", False))) self.json_worker = (hasattr(self, 'json_builder')) or self.__authtoken or (self.json is True) if self.json_worker: # Check if the request has some json query, if not, just render the template if self.request.GET.get('json', self.request.POST.get('json', None)) is None: # Calculate tabs if getattr(self, 'show_details', False): self.extra_context['tabs_js'] = json.dumps(self.get_tabs_js()) # Silence the normal execution from this class self.get_queryset = lambda: None self.get_context_data = lambda **kwargs: self.extra_context self.render_to_response = lambda context, **response_kwargs: super(GenList, self).render_to_response(context, **response_kwargs) # Call the base implementation and finish execution here return super(GenList, self).dispatch(*args, **kwargs) # The systems is requesting a list, we are not allowed if onlybase: json_answer = {"error": True, "errortxt": _("Not allowed, this kind of requests has been prohibited for this view!")} return HttpResponse(json.dumps(json_answer), content_type='application/json') # Initialize a default context self.__kwargs = kwargs self.__context = {} # Force export list self.export = getattr(self, 'export', self.request.GET.get('export', self.request.POST.get('export', None))) # Call the base implementation return super(GenList, self).dispatch(*args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Get if this class is working as only a base render and List funcionality shouldn't be enabled", "onlybase", "=", "getattr", "(", "self", ",", "\"onlybase\"", ",", "False", ")", "# ...
58.509259
39.861111
def print(*args, **kwargs): """ Normally print function in python prints values to a stream / stdout >> print(value1, value2, sep='', end='\n', file=sys.stdout) Current package usage: ====================== print(value1, value2, sep='', end='\n', file=sys.stdout, color=None, bg_color=None, text_format=None, log_type=None) :param args: Values (str) to print :param kwargs: Text formats like sep, end, color, background, text format, log type (ERROR, INFO, WARNING, DEBUG) :return: Colored text to stdout (Console) """ # Pop out color and background values from kwargs color_name = kwargs.pop('color', None) bg_color = kwargs.pop('bg_color', None) log_type = kwargs.pop('log_type', None) # Check formats, create a list of text formats txt_formats = kwargs.pop('text_format', []) if sys.version_info[0] == 2: str_type = basestring elif sys.version_info[0] == 3: str_type = str else: str_type = basestring if isinstance(txt_formats, str_type): txt_formats = [txt_formats] # Check for file keyword file_name = kwargs.get('file', sys.stdout) # Check for foreground and background colors if color_name or bg_color or log_type: # Pop out the 'end' argument end_ = kwargs.pop('end', "\n") kwargs['end'] = "" # If log type argument is provided if log_type: if log_type not in log_types.keys(): print('Log type not valid!', log_type='error') sys.exit(1) if log_type == 'info': __builtin__.print('\033[{}m[INF] '.format(foreground_colors[log_types[log_type]]), file=file_name, end='') __builtin__.print('\033[0m', file=file_name, end='') if log_type == 'warn': __builtin__.print('\033[{}m[WRN] '.format(foreground_colors[log_types[log_type]]), file=file_name, end='') __builtin__.print('\033[0m', file=file_name, end='') if log_type == 'error': __builtin__.print('\033[{}m[ERR] '.format(foreground_colors[log_types[log_type]]), file=file_name, end='') __builtin__.print('\033[0m', file=file_name, end='') if log_type == 'hint': __builtin__.print('\033[{}m[HNT] '.format(foreground_colors[log_types[log_type]]), file=file_name, end='') __builtin__.print('\033[0m', file=file_name, end='') if log_type == 'debug': __builtin__.print('\033[{}m[DBG] '.format(foreground_colors[log_types[log_type]]), file=file_name, end='') __builtin__.print('\033[0m', file=file_name, end='') # If foreground color argument is provided if color_name: if color_name not in foreground_colors.keys(): print('Invalid color code!', log_type='error') sys.exit(1) __builtin__.print('\033[{}m'.format(foreground_colors[color_name]), file=file_name, end='') # If background color argument is provided if bg_color: if bg_color not in background_colors.keys(): print('Invalid background color code!', log_type='error') sys.exit(1) __builtin__.print('\033[{}m'.format(background_colors[bg_color]), file=file_name, end='') # If text formats are provided for txt_format in txt_formats: __builtin__.print('\033[{}m'.format(text_formats[txt_format]), file=file_name, end='') # Print values __builtin__.print(*args, **kwargs) # Reset __builtin__.print('\033[0m', file=file_name, end=end_) else: __builtin__.print(*args, **kwargs)
[ "def", "print", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Pop out color and background values from kwargs", "color_name", "=", "kwargs", ".", "pop", "(", "'color'", ",", "None", ")", "bg_color", "=", "kwargs", ".", "pop", "(", "'bg_color'", ","...
43.583333
22.654762
def is_answer_available(self, assessment_section_id, item_id): """Tests if an answer is available for the given item. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` arg: item_id (osid.id.Id): ``Id`` of the ``Item`` return: (boolean) - ``true`` if an answer are available, ``false`` otherwise raise: NotFound - ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id`` raise: NullArgument - ``assessment_section_id or item_id is null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Note: we need more settings elsewhere to indicate answer available conditions # This makes the simple assumption that answers are available only when # a response has been submitted for an Item. try: response = self.get_response(assessment_section_id, item_id) # need to invoke something like .object_map before # a "null" response throws IllegalState response.object_map except errors.IllegalState: return False else: return True
[ "def", "is_answer_available", "(", "self", ",", "assessment_section_id", ",", "item_id", ")", ":", "# Note: we need more settings elsewhere to indicate answer available conditions", "# This makes the simple assumption that answers are available only when", "# a response has been submitted fo...
46.448276
21.034483
def extract_ast_species(ast): """Extract species from ast.species set of tuples (id, label)""" species_id = "None" species_label = "None" species = [ (species_id, species_label) for (species_id, species_label) in ast.species if species_id ] if len(species) == 1: (species_id, species_label) = species[0] if not species_id: species_id = "None" species_label = "None" log.debug(f"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}") return (species_id, species_label)
[ "def", "extract_ast_species", "(", "ast", ")", ":", "species_id", "=", "\"None\"", "species_label", "=", "\"None\"", "species", "=", "[", "(", "species_id", ",", "species_label", ")", "for", "(", "species_id", ",", "species_label", ")", "in", "ast", ".", "sp...
28.473684
24.894737
def get_books_containing_page(cursor, uuid, version, context_uuid=None, context_version=None): """Return a list of book names and UUIDs that contain a given module UUID.""" with db_connect() as db_connection: # Uses a RealDictCursor instead of the regular cursor with db_connection.cursor( cursor_factory=psycopg2.extras.RealDictCursor ) as real_dict_cursor: # In the future the books-containing-page SQL might handle # all of these cases. For now we branch the code out in here. if context_uuid and context_version: return [get_book_info(cursor, real_dict_cursor, context_uuid, context_version, uuid, version)] else: portal_type = get_portal_type(cursor, uuid, version) if portal_type == 'Module': real_dict_cursor.execute(SQL['get-books-containing-page'], {'document_uuid': uuid, 'document_version': version}) return real_dict_cursor.fetchall() else: # Books are currently not in any other book return []
[ "def", "get_books_containing_page", "(", "cursor", ",", "uuid", ",", "version", ",", "context_uuid", "=", "None", ",", "context_version", "=", "None", ")", ":", "with", "db_connect", "(", ")", "as", "db_connection", ":", "# Uses a RealDictCursor instead of the regul...
54.083333
19.083333
def push(remote='origin', branch='master'): """git push commit""" print(cyan("Pulling changes from repo ( %s / %s)..." % (remote, branch))) local("git push %s %s" % (remote, branch))
[ "def", "push", "(", "remote", "=", "'origin'", ",", "branch", "=", "'master'", ")", ":", "print", "(", "cyan", "(", "\"Pulling changes from repo ( %s / %s)...\"", "%", "(", "remote", ",", "branch", ")", ")", ")", "local", "(", "\"git push %s %s\"", "%", "(",...
47.75
11.5
def quaternion_about_axis(angle, axis): """Return quaternion for rotation about axis. >>> q = quaternion_about_axis(0.123, [1, 0, 0]) >>> np.allclose(q, [0.99810947, 0.06146124, 0, 0]) True """ q = np.array([0.0, axis[0], axis[1], axis[2]]) qlen = vector_norm(q) if qlen > _EPS: q *= math.sin(angle / 2.0) / qlen q[0] = math.cos(angle / 2.0) return q
[ "def", "quaternion_about_axis", "(", "angle", ",", "axis", ")", ":", "q", "=", "np", ".", "array", "(", "[", "0.0", ",", "axis", "[", "0", "]", ",", "axis", "[", "1", "]", ",", "axis", "[", "2", "]", "]", ")", "qlen", "=", "vector_norm", "(", ...
27.642857
15.785714
def get_backlog_configurations(self, team_context): """GetBacklogConfigurations. Gets backlog configuration for a team :param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation :rtype: :class:`<BacklogConfiguration> <azure.devops.v5_0.work.models.BacklogConfiguration>` """ project = None team = None if team_context is not None: if team_context.project_id: project = team_context.project_id else: project = team_context.project if team_context.team_id: team = team_context.team_id else: team = team_context.team route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'string') if team is not None: route_values['team'] = self._serialize.url('team', team, 'string') response = self._send(http_method='GET', location_id='7799f497-3cb5-4f16-ad4f-5cd06012db64', version='5.0', route_values=route_values) return self._deserialize('BacklogConfiguration', response)
[ "def", "get_backlog_configurations", "(", "self", ",", "team_context", ")", ":", "project", "=", "None", "team", "=", "None", "if", "team_context", "is", "not", "None", ":", "if", "team_context", ".", "project_id", ":", "project", "=", "team_context", ".", "...
45.75
19.071429
def form_valid(self, form): # lb = SalesLines.objects.filter(pk=self.__line_pk).first() # product_old = lb.product_final product_pk = self.request.POST.get("product_final", None) quantity = self.request.POST.get("quantity", None) product_final = ProductFinal.objects.filter(pk=product_pk).first() """ if product: is_pack = product.is_pack() else: is_pack = False """ if product_final and quantity: reason = form.data['reason'] if reason: reason_obj = ReasonModification.objects.filter(pk=reason).first() if reason_obj: try: with transaction.atomic(): result = super(LinesUpdateModalBasket, self).form_valid(form) reason_basket = ReasonModificationLineBasket() reason_basket.basket = self.object.basket reason_basket.reason = reason_obj reason_basket.line = self.object reason_basket.user = get_current_user() reason_basket.quantity = self.object.quantity reason_basket.save() return result except ValidationError as e: errors = form._errors.setdefault("product_final", ErrorList()) errors.append(e) return super(LinesUpdateModalBasket, self).form_invalid(form) else: errors = form._errors.setdefault("reason", ErrorList()) errors.append(_("Reason of modification invalid")) return super(LinesUpdatelOrder, self).form_invalid(form) else: errors = form._errors.setdefault("reason", ErrorList()) errors.append(_("Reason of modification invalid")) return super(LinesUpdatelOrder, self).form_invalid(form) """ if is_pack: options = product.productfinals_option.filter(active=True) options_pack = [] for option in options: field = 'packs[{}]'.format(option.pk) opt = self.request.POST.get(field, None) if opt: opt_product = ProductFinal.objects.filter(pk=opt).first() if opt_product: options_pack.append({ 'product_option': option, 'product_final': opt_product, 'quantity': quantity }) else: errors = form._errors.setdefault(field, ErrorList()) errors.append(_("Product Option invalid")) return super(LinesUpdateModalBasket, self).form_invalid(form) else: errors = form._errors.setdefault(field, ErrorList()) errors.append(_("Option invalid")) return super(LinesUpdateModalBasket, self).form_invalid(form) """ else: errors = form._errors.setdefault("product_final", ErrorList()) errors.append((_("Product invalid"), quantity, product_final)) return super(LinesUpdateModalBasket, self).form_invalid(form) """ ret = super(LinesUpdateModalBasket, self).form_valid(form) if product_old != self.object.product: self.object.remove_options() if is_pack: self.object.set_options(options_pack) return ret """
[ "def", "form_valid", "(", "self", ",", "form", ")", ":", "# lb = SalesLines.objects.filter(pk=self.__line_pk).first()", "# product_old = lb.product_final", "product_pk", "=", "self", ".", "request", ".", "POST", ".", "get", "(", "\"product_final\"", ",", "None", ")", ...
47.025
20.825
def _isub(self, other): """Discard the elements of other from self. if isinstance(it, _basebag): This runs in O(it.num_unique_elements()) else: This runs in O(len(it)) """ if isinstance(other, _basebag): for elem, other_count in other.counts(): try: self._increment_count(elem, -other_count) except ValueError: self._set_count(elem, 0) else: for elem in other: try: self._increment_count(elem, -1) except ValueError: pass return self
[ "def", "_isub", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "_basebag", ")", ":", "for", "elem", ",", "other_count", "in", "other", ".", "counts", "(", ")", ":", "try", ":", "self", ".", "_increment_count", "(", "elem"...
22.666667
17.047619
def stats(self, name, value): """ Calculates min/average/max statistics based on the current and previous values. :param name: a counter name of Statistics type :param value: a value to update statistics """ counter = self.get(name, CounterType.Statistics) self._calculate_stats(counter, value) self._update()
[ "def", "stats", "(", "self", ",", "name", ",", "value", ")", ":", "counter", "=", "self", ".", "get", "(", "name", ",", "CounterType", ".", "Statistics", ")", "self", ".", "_calculate_stats", "(", "counter", ",", "value", ")", "self", ".", "_update", ...
33.181818
18.272727
def hashes_above(path, line_number): """Yield hashes from contiguous comment lines before line ``line_number``. """ def hash_lists(path): """Yield lists of hashes appearing between non-comment lines. The lists will be in order of appearance and, for each non-empty list, their place in the results will coincide with that of the line number of the corresponding result from `parse_requirements` (which changed in pip 7.0 to not count comments). """ hashes = [] with open(path) as file: for lineno, line in enumerate(file, 1): match = HASH_COMMENT_RE.match(line) if match: # Accumulate this hash. hashes.append(match.groupdict()['hash']) if not IGNORED_LINE_RE.match(line): yield hashes # Report hashes seen so far. hashes = [] elif PIP_COUNTS_COMMENTS: # Comment: count as normal req but have no hashes. yield [] return next(islice(hash_lists(path), line_number - 1, None))
[ "def", "hashes_above", "(", "path", ",", "line_number", ")", ":", "def", "hash_lists", "(", "path", ")", ":", "\"\"\"Yield lists of hashes appearing between non-comment lines.\n\n The lists will be in order of appearance and, for each non-empty\n list, their place in the re...
41.074074
17.851852
def _ExtractGoogleDocsSearchQuery(self, url): """Extracts a search query from a Google docs URL. Google Docs: https://docs.google.com/.*/u/0/?q=query Args: url (str): URL. Returns: str: search query or None if no query was found. """ if 'q=' not in url: return None line = self._GetBetweenQEqualsAndAmpersand(url) if not line: return None return line.replace('+', ' ')
[ "def", "_ExtractGoogleDocsSearchQuery", "(", "self", ",", "url", ")", ":", "if", "'q='", "not", "in", "url", ":", "return", "None", "line", "=", "self", ".", "_GetBetweenQEqualsAndAmpersand", "(", "url", ")", "if", "not", "line", ":", "return", "None", "re...
21.894737
22.052632
def magic_api(word): """ This is our magic API that we're simulating. It'll return a random number and a cache timer. """ result = sum(ord(x)-65 + randint(1,50) for x in word) delta = timedelta(seconds=result) cached_until = datetime.now() + delta return result, cached_until
[ "def", "magic_api", "(", "word", ")", ":", "result", "=", "sum", "(", "ord", "(", "x", ")", "-", "65", "+", "randint", "(", "1", ",", "50", ")", "for", "x", "in", "word", ")", "delta", "=", "timedelta", "(", "seconds", "=", "result", ")", "cach...
30.8
11
def _make_stream_transport(self): """Create an AdbStreamTransport with a newly allocated local_id.""" msg_queue = queue.Queue() with self._stream_transport_map_lock: # Start one past the last id we used, and grab the first available one. # This mimics the ADB behavior of 'increment an unsigned and let it # overflow', but with a check to ensure we don't reuse an id in use, # even though that's unlikely with 2^32 - 1 of them available. We try # at most 64 id's, if we've wrapped around and there isn't one available # in the first 64, there's a problem, better to fail fast than hang for # a potentially very long time (STREAM_ID_LIMIT can be very large). self._last_id_used = (self._last_id_used % STREAM_ID_LIMIT) + 1 for local_id in itertools.islice( itertools.chain( range(self._last_id_used, STREAM_ID_LIMIT), range(1, self._last_id_used)), 64): if local_id not in list(self._stream_transport_map.keys()): self._last_id_used = local_id break else: raise usb_exceptions.AdbStreamUnavailableError('Ran out of local ids!') # Ignore this warning - the for loop will always have at least one # iteration, so local_id will always be set. # pylint: disable=undefined-loop-variable stream_transport = AdbStreamTransport(self, local_id, msg_queue) self._stream_transport_map[local_id] = stream_transport return stream_transport
[ "def", "_make_stream_transport", "(", "self", ")", ":", "msg_queue", "=", "queue", ".", "Queue", "(", ")", "with", "self", ".", "_stream_transport_map_lock", ":", "# Start one past the last id we used, and grab the first available one.", "# This mimics the ADB behavior of 'incr...
54.814815
21.148148
def inject(self, inst, **renames): """Injects dependencies and propagates dependency injector""" if renames: di = self.clone(**renames) else: di = self pro = di._provides inst.__injections_source__ = di deps = getattr(inst, '__injections__', None) if deps: for attr, dep in deps.items(): val = pro.get(dep.name) if val is None: raise MissingDependencyError(dep.name) if not isinstance(val, dep.type): raise TypeError("Wrong provider for {!r}".format(val)) setattr(inst, attr, val) meth = getattr(inst, '__injected__', None) if meth is not None: meth() return inst
[ "def", "inject", "(", "self", ",", "inst", ",", "*", "*", "renames", ")", ":", "if", "renames", ":", "di", "=", "self", ".", "clone", "(", "*", "*", "renames", ")", "else", ":", "di", "=", "self", "pro", "=", "di", ".", "_provides", "inst", "."...
35.227273
13.818182
def transform_regex_replace(source, pattern, rewrite, name=None): """Replace all substrings from `needle` to corresponding strings in `haystack` with source. Args: source: `Tensor` or `SparseTensor` of any shape, source strings for replacing. pattern: List of RE2 patterns to search in source rewrite: List of strings to replace with. Should have same length as `needle`. name: A name for the operation (optional). Returns: `Tensor` or `SparseTensor` of same shape and size as input. """ with ops.name_scope(name, "TransformRegexReplace", [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): result = tf.SparseTensor( indices=source.indices, values=ops_module.transform_regex_replace(source.values, pattern, rewrite), dense_shape=source.dense_shape ) else: result = ops_module.transform_regex_replace(source, pattern, rewrite) return result
[ "def", "transform_regex_replace", "(", "source", ",", "pattern", ",", "rewrite", ",", "name", "=", "None", ")", ":", "with", "ops", ".", "name_scope", "(", "name", ",", "\"TransformRegexReplace\"", ",", "[", "source", "]", ")", ":", "source", "=", "convert...
44.291667
24.708333
def f1_score(df, col_true=None, col_pred='precision_result', pos_label=1, average=None): r""" Compute f-1 score of a predicted DataFrame. f-1 is defined as .. math:: \frac{2 \cdot precision \cdot recall}{precision + recall} :Parameters: - **df** - predicted data frame - **col_true** - column name of true label - **col_pred** - column name of predicted label, 'prediction_result' by default. - **pos_label** - denote the desired class label when ``average`` == `binary` - **average** - denote the method to compute average. :Returns: Recall score :Return type: float | numpy.array[float] The parameter ``average`` controls the behavior of the function. * When ``average`` == None (by default), f-1 of every class is given as a list. * When ``average`` == 'binary', f-1 of class specified in ``pos_label`` is given. * When ``average`` == 'micro', f-1 of overall precision and recall is given, where overall precision and recall are computed in micro-average mode. * When ``average`` == 'macro', average f-1 of all the class is given. * When ``average`` == `weighted`, average f-1 of all the class weighted by support of every true classes is given. :Example: Assume we have a table named 'predicted' as follows: ======== =================== label prediction_result ======== =================== 0 1 1 2 2 1 1 1 1 0 2 2 ======== =================== Different options of ``average`` parameter outputs different values: .. code-block:: python >>> f1_score(predicted, 'label', average=None) array([ 0. , 0.33333333, 0.5 ]) >>> f1_score(predicted, 'label', average='macro') 0.27 >>> f1_score(predicted, 'label', average='micro') 0.33 >>> f1_score(predicted, 'label', average='weighted') 0.33 """ if not col_pred: col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS) return fbeta_score(df, col_true, col_pred, pos_label=pos_label, average=average)
[ "def", "f1_score", "(", "df", ",", "col_true", "=", "None", ",", "col_pred", "=", "'precision_result'", ",", "pos_label", "=", "1", ",", "average", "=", "None", ")", ":", "if", "not", "col_pred", ":", "col_pred", "=", "get_field_name_by_role", "(", "df", ...
32.53125
29.765625
def sliding_window(image, step_size, window_size, mask=None, only_whole=True): """ Creates generator of sliding windows. :param image: input image :param step_size: number of pixels we are going to skip in both the (x, y) direction :param window_size: the width and height of the window we are going to extract :param mask: region of interest, if None it will slide through the whole image :param only_whole: if True - produces only windows of the given window_size :return: generator that produce upper left corner of the window, center of the window and the sliding window itself """ if mask is None: mask = np.ones(image.shape, dtype=np.bool) # slide a window across the image for y in xrange(0, image.shape[0], step_size): # c_y = y + window_size[1] / 2. for x in xrange(0, image.shape[1], step_size): # c_x = x + window_size[0] / 2. # if c_x < mask.shape[1] and c_y < mask.shape[0] and mask[c_y, c_x]: # yield the current window end_x = x + window_size[0] end_y = y + window_size[1] if only_whole and (end_x > image.shape[1] or end_y > image.shape[0]): continue else: mask_out = np.zeros(image.shape, dtype=np.bool) mask_out[y:end_y, x:end_x] = True yield (x, y, mask_out, image[y:end_y, x:end_x])
[ "def", "sliding_window", "(", "image", ",", "step_size", ",", "window_size", ",", "mask", "=", "None", ",", "only_whole", "=", "True", ")", ":", "if", "mask", "is", "None", ":", "mask", "=", "np", ".", "ones", "(", "image", ".", "shape", ",", "dtype"...
51.703704
20.074074
def create_textview(self, wrap_mode=Gtk.WrapMode.WORD_CHAR, justify=Gtk.Justification.LEFT, visible=True, editable=True): """ Function creates a text view with wrap_mode and justification """ text_view = Gtk.TextView() text_view.set_wrap_mode(wrap_mode) text_view.set_editable(editable) if not editable: text_view.set_cursor_visible(False) else: text_view.set_cursor_visible(visible) text_view.set_justification(justify) return text_view
[ "def", "create_textview", "(", "self", ",", "wrap_mode", "=", "Gtk", ".", "WrapMode", ".", "WORD_CHAR", ",", "justify", "=", "Gtk", ".", "Justification", ".", "LEFT", ",", "visible", "=", "True", ",", "editable", "=", "True", ")", ":", "text_view", "=", ...
38.285714
13.857143
def json_worker(self, mask, cache_id=None, cache_method="string", cache_section="www"): """A function annotation that adds a worker request. A worker request is a POST request that is computed asynchronously. That is, the actual task is performed in a different thread and the network request returns immediately. The client side uses polling to fetch the result and can also cancel the task. The worker javascript client side must be linked and used for accessing the request. Parameters ---------- mask : string The URL that must be matched to perform this request. cache_id : function(args) or None Optional function for caching the result. If set the worker must be idempotent. Requires a `cache` object for the server. The function needs to return an object constructed from the function arguments to uniquely identify the result. Results are cached verbatim. cache_method : string or None Optional cache method string. Gets passed to get_hnd() of the cache. Defaults to "string" which requires a JSON serializable cache_id. cache_section : string or None Optional cache section string. Gets passed to get_hnd() of the cache. Defaults to "www". fun : function(args); (The annotated function) A function returning a (JSON-able) object. The function takes one argument which is the dictionary containing the payload from the client side. If the result is None a 404 error is sent. """ use_cache = cache_id is not None def wrapper(fun): lock = threading.RLock() tasks = {} cargo = {} cargo_cleaner = [None] def is_done(cur_key): with lock: if cur_key not in tasks: return True if "running" not in tasks[cur_key]: return False return not tasks[cur_key]["running"] def start_cargo_cleaner(): def get_next_cargo(): with lock: next_ttl = None for value in cargo.values(): ttl, _ = value if next_ttl is None or ttl < next_ttl: next_ttl = ttl return next_ttl def clean_for(timestamp): with lock: keys = [] for (key, value) in cargo.items(): ttl, _ = value if ttl > timestamp: continue keys.append(key) for k in keys: cargo.pop(k) msg("purged cargo that was never read ({0})", k) def remove_cleaner(): with lock: if get_next_cargo() is not None: return False cargo_cleaner[0] = None return True def clean(): while True: next_ttl = get_next_cargo() if next_ttl is None: if remove_cleaner(): break else: continue time_until = next_ttl - time.time() if time_until > 0: time.sleep(time_until) clean_for(time.time()) with lock: if cargo_cleaner[0] is not None: return cleaner = self._thread_factory( target=clean, name="{0}-Cargo-Cleaner".format(self.__class__)) cleaner.daemon = True cargo_cleaner[0] = cleaner cleaner.start() def add_cargo(content): with lock: mcs = self.max_chunk_size if mcs < 1: raise ValueError("invalid chunk size: {0}".format(mcs)) ttl = time.time() + 10 * 60 # 10 minutes chunks = [] while len(content) > 0: chunk = content[:mcs] content = content[mcs:] cur_key = get_key() cargo[cur_key] = (ttl, chunk) chunks.append(cur_key) start_cargo_cleaner() return chunks def remove_cargo(cur_key): with lock: _, result = cargo.pop(cur_key) return result def remove_worker(cur_key): with lock: task = tasks.pop(cur_key, None) if task is None: err_msg = "Task {0} not found!".format(cur_key) return None, (ValueError(err_msg), None) if task["running"]: th = task["thread"] if th.is_alive(): # kill the thread tid = None for tk, tobj in threading._active.items(): if tobj is th: tid = tk break if tid is not None: papi = ctypes.pythonapi pts_sae = papi.PyThreadState_SetAsyncExc res = pts_sae(ctypes.c_long(tid), ctypes.py_object(WorkerDeath)) if res == 0: # invalid thread id -- the thread might # be done already msg("invalid thread id for " + "killing worker {0}", cur_key) elif res != 1: # roll back pts_sae(ctypes.c_long(tid), None) msg("killed too many ({0}) workers? {1}", res, cur_key) else: if self.verbose_workers: msg("killed worker {0}", cur_key) err_msg = "Task {0} is still running!".format(cur_key) return None, (ValueError(err_msg), None) return task["result"], task["exception"] def start_worker(args, cur_key, get_thread): try: with lock: task = { "running": True, "result": None, "exception": None, "thread": get_thread(), } tasks[cur_key] = task if use_cache: cache_obj = cache_id(args) if cache_obj is not None and self.cache is not None: with self.cache.get_hnd( cache_obj, section=cache_section, method=cache_method) as hnd: if hnd.has(): result = hnd.read() else: result = hnd.write(json_dumps(fun(args))) else: result = json_dumps(fun(args)) else: result = json_dumps(fun(args)) with lock: task["running"] = False task["result"] = result except (KeyboardInterrupt, SystemExit): raise except Exception as e: with lock: task["running"] = False task["exception"] = (e, traceback.format_exc()) return # make sure the result does not get stored forever try: # remove 2 minutes after not reading the result time.sleep(120) finally: _result, err = remove_worker(cur_key) if err is not None: e, tb = err if tb is not None: msg("Error in purged worker for {0}: {1}\n{2}", cur_key, e, tb) return msg("purged result that was never read ({0})", cur_key) def get_key(): with lock: crc32 = zlib.crc32(repr(get_time()).encode('utf8')) cur_key = int(crc32 & 0xFFFFFFFF) while cur_key in tasks or cur_key in cargo: key = int(cur_key + 1) if key == cur_key: key = 0 cur_key = key return cur_key def reserve_worker(): with lock: cur_key = get_key() tasks[cur_key] = {} # put marker return cur_key def run_worker(req, args): post = args["post"] try: action = post["action"] cur_key = None if action == "stop": cur_key = post["token"] remove_worker(cur_key) # throw away the result return { "token": cur_key, "done": True, "result": None, "continue": False, } if action == "start": cur_key = reserve_worker() inner_post = post.get("payload", {}) th = [] wname = "{0}-Worker-{1}".format(self.__class__, cur_key) worker = self._thread_factory( target=start_worker, name=wname, args=(inner_post, cur_key, lambda: th[0])) th.append(worker) worker.start() # give fast tasks a way to immediately return results time.sleep(0.1) if action == "cargo": cur_key = post["token"] result = remove_cargo(cur_key) return { "token": cur_key, "result": result, } if action == "get": cur_key = post["token"] if cur_key is None: raise ValueError("invalid action: {0}".format(action)) if is_done(cur_key): result, exception = remove_worker(cur_key) if exception is not None: e, tb = exception if tb is None: # token does not exist anymore return { "token": cur_key, "done": False, "result": None, "continue": False, } if isinstance(e, PreventDefaultResponse): raise e msg("Error in worker for {0}: {1}\n{2}", cur_key, e, tb) raise PreventDefaultResponse(500, "worker error") if len(result) > self.max_chunk_size: cargo_keys = add_cargo(result) return { "token": cur_key, "done": True, "result": cargo_keys, "continue": True, } return { "token": cur_key, "done": True, "result": result, "continue": False, } return { "token": cur_key, "done": False, "result": None, "continue": True, } except: # nopep8 msg("Error processing worker command: {0}", post) raise self.add_json_post_mask(mask, run_worker) self.set_file_argc(mask, 0) return fun return wrapper
[ "def", "json_worker", "(", "self", ",", "mask", ",", "cache_id", "=", "None", ",", "cache_method", "=", "\"string\"", ",", "cache_section", "=", "\"www\"", ")", ":", "use_cache", "=", "cache_id", "is", "not", "None", "def", "wrapper", "(", "fun", ")", ":...
43.334405
14.434084
def kill_all(self, bIgnoreExceptions = False): """ Kills from all processes currently being debugged. @type bIgnoreExceptions: bool @param bIgnoreExceptions: C{True} to ignore any exceptions that may be raised when killing each process. C{False} to stop and raise an exception when encountering an error. @raise WindowsError: Raises an exception on error, unless C{bIgnoreExceptions} is C{True}. """ for pid in self.get_debugee_pids(): self.kill(pid, bIgnoreExceptions = bIgnoreExceptions)
[ "def", "kill_all", "(", "self", ",", "bIgnoreExceptions", "=", "False", ")", ":", "for", "pid", "in", "self", ".", "get_debugee_pids", "(", ")", ":", "self", ".", "kill", "(", "pid", ",", "bIgnoreExceptions", "=", "bIgnoreExceptions", ")" ]
41.642857
17.5
def _fetch_langs(): """Fetch (scrape) languages from Google Translate. Google Translate loads a JavaScript Array of 'languages codes' that can be spoken. We intersect this list with all the languages Google Translate provides to get the ones that support text-to-speech. Returns: dict: A dictionnary of languages from Google Translate """ # Load HTML page = requests.get(URL_BASE) soup = BeautifulSoup(page.content, 'html.parser') # JavaScript URL # The <script src=''> path can change, but not the file. # Ex: /zyx/abc/20180211/desktop_module_main.js js_path = soup.find(src=re.compile(JS_FILE))['src'] js_url = "{}/{}".format(URL_BASE, js_path) # Load JavaScript js_contents = requests.get(js_url).text # Approximately extract TTS-enabled language codes # RegEx pattern search because minified variables can change. # Extra garbage will be dealt with later as we keep languages only. # In: "[...]Fv={af:1,ar:1,[...],zh:1,"zh-cn":1,"zh-tw":1}[...]" # Out: ['is', '12', [...], 'af', 'ar', [...], 'zh', 'zh-cn', 'zh-tw'] pattern = r'[{,\"](\w{2}|\w{2}-\w{2,3})(?=:1|\":1)' tts_langs = re.findall(pattern, js_contents) # Build lang. dict. from main page (JavaScript object populating lang. menu) # Filtering with the TTS-enabled languages # In: "{code:'auto',name:'Detect language'},{code:'af',name:'Afrikaans'},[...]" # re.findall: [('auto', 'Detect language'), ('af', 'Afrikaans'), [...]] # Out: {'af': 'Afrikaans', [...]} trans_pattern = r"{code:'(?P<lang>.+?[^'])',name:'(?P<name>.+?[^'])'}" trans_langs = re.findall(trans_pattern, page.text) return {lang: name for lang, name in trans_langs if lang in tts_langs}
[ "def", "_fetch_langs", "(", ")", ":", "# Load HTML", "page", "=", "requests", ".", "get", "(", "URL_BASE", ")", "soup", "=", "BeautifulSoup", "(", "page", ".", "content", ",", "'html.parser'", ")", "# JavaScript URL", "# The <script src=''> path can change, but not ...
42.875
23.5
def clear(self): """ Clear the cache. """ not_removed = [] for fn in os.listdir(self.base): fn = os.path.join(self.base, fn) try: if os.path.islink(fn) or os.path.isfile(fn): os.remove(fn) elif os.path.isdir(fn): shutil.rmtree(fn) except Exception: not_removed.append(fn) return not_removed
[ "def", "clear", "(", "self", ")", ":", "not_removed", "=", "[", "]", "for", "fn", "in", "os", ".", "listdir", "(", "self", ".", "base", ")", ":", "fn", "=", "os", ".", "path", ".", "join", "(", "self", ".", "base", ",", "fn", ")", "try", ":",...
29.866667
9.466667
def _compose_range(pattern, rule, fill=2): """oc._compose_range('Week', 'Week04-Week09', fill=2) - hash a range. This takes apart a range of times and returns a dictionary of all intervening values appropriately set. The fill value is used to format the time numbers. """ keys = [] mask = len(pattern) for rule in str.split(rule, ","): if not '-' in rule: if rule[:mask] == pattern: keys.append(rule[mask:]) else: keys.append(rule) else: (start, end) = str.split(rule, '-') if rule[:mask] == pattern: start = int(start[mask:]) else: start = int(start) # Since I allow both "Week00-15" and "Week00-Week15", I need # to check for the second week. if end[0:mask] == pattern: end = int(end[mask:]) else: end = int(end) key = "%%0%ii" % fill for i in range(start, end + 1): keys.append(key % i) #print keys return keys
[ "def", "_compose_range", "(", "pattern", ",", "rule", ",", "fill", "=", "2", ")", ":", "keys", "=", "[", "]", "mask", "=", "len", "(", "pattern", ")", "for", "rule", "in", "str", ".", "split", "(", "rule", ",", "\",\"", ")", ":", "if", "not", "...
31.882353
13.823529
def get_new_driver(self, browser=None, headless=None, servername=None, port=None, proxy=None, agent=None, switch_to=True, cap_file=None, disable_csp=None): """ This method spins up an extra browser for tests that require more than one. The first browser is already provided by tests that import base_case.BaseCase from seleniumbase. If parameters aren't specified, the method uses the same as the default driver. @Params browser - the browser to use. (Ex: "chrome", "firefox") headless - the option to run webdriver in headless mode servername - if using a Selenium Grid, set the host address here port - if using a Selenium Grid, set the host port here proxy - if using a proxy server, specify the "host:port" combo here switch_to - the option to switch to the new driver (default = True) """ if self.browser == "remote" and self.servername == "localhost": raise Exception('Cannot use "remote" browser driver on localhost!' ' Did you mean to connect to a remote Grid server' ' such as BrowserStack or Sauce Labs? In that' ' case, you must specify the "server" and "port"' ' parameters on the command line! ' 'Example: ' '--server=user:key@hub.browserstack.com --port=80') browserstack_ref = ( 'https://browserstack.com/automate/capabilities') sauce_labs_ref = ( 'https://wiki.saucelabs.com/display/DOCS/Platform+Configurator#/') if self.browser == "remote" and not self.cap_file: raise Exception('Need to specify a desired capabilities file when ' 'using "--browser=remote". Add "--cap_file=FILE". ' 'File should be in the Python format used by: ' '%s OR ' '%s ' 'See SeleniumBase/examples/sample_cap_file_BS.py ' 'and SeleniumBase/examples/sample_cap_file_SL.py' % (browserstack_ref, sauce_labs_ref)) if browser is None: browser = self.browser browser_name = browser if headless is None: headless = self.headless if servername is None: servername = self.servername if port is None: port = self.port use_grid = False if servername != "localhost": # Use Selenium Grid (Use "127.0.0.1" for localhost Grid) use_grid = True proxy_string = proxy if proxy_string is None: proxy_string = self.proxy_string user_agent = agent if user_agent is None: user_agent = self.user_agent if disable_csp is None: disable_csp = self.disable_csp if self.demo_mode or self.masterqa_mode: disable_csp = True if cap_file is None: cap_file = self.cap_file valid_browsers = constants.ValidBrowsers.valid_browsers if browser_name not in valid_browsers: raise Exception("Browser: {%s} is not a valid browser option. " "Valid options = {%s}" % (browser, valid_browsers)) # Launch a web browser from seleniumbase.core import browser_launcher new_driver = browser_launcher.get_driver(browser_name=browser_name, headless=headless, use_grid=use_grid, servername=servername, port=port, proxy_string=proxy_string, user_agent=user_agent, cap_file=cap_file, disable_csp=disable_csp) self._drivers_list.append(new_driver) if switch_to: self.driver = new_driver if self.headless: # Make sure the invisible browser window is big enough try: self.set_window_size(1440, 1080) self.wait_for_ready_state_complete() except Exception: # This shouldn't fail, but in case it does, # get safely through setUp() so that # WebDrivers can get closed during tearDown(). pass else: if self.browser == 'chrome' or self.browser == 'opera': try: self.driver.set_window_size(1250, 840) self.wait_for_ready_state_complete() except Exception: pass # Keep existing browser resolution elif self.browser == 'edge': try: self.driver.maximize_window() self.wait_for_ready_state_complete() except Exception: pass # Keep existing browser resolution return new_driver
[ "def", "get_new_driver", "(", "self", ",", "browser", "=", "None", ",", "headless", "=", "None", ",", "servername", "=", "None", ",", "port", "=", "None", ",", "proxy", "=", "None", ",", "agent", "=", "None", ",", "switch_to", "=", "True", ",", "cap_...
51.708738
19.757282
def request(self, method, url, params=None, data=None, headers=None, auth=None, timeout=None, allow_redirects=False): """ Make an HTTP Request with parameters provided. :param str method: The HTTP method to use :param str url: The URL to request :param dict params: Query parameters to append to the URL :param dict data: Parameters to go in the body of the HTTP request :param dict headers: HTTP Headers to send with the request :param tuple auth: Basic Auth arguments :param float timeout: Socket/Read timeout for the request :param boolean allow_redirects: Whether or not to allow redirects See the requests documentation for explanation of all these parameters :return: An http response :rtype: A :class:`Response <twilio.rest.http.response.Response>` object """ kwargs = { 'method': method.upper(), 'url': url, 'params': params, 'data': data, 'headers': headers, 'auth': auth, 'hooks': self.request_hooks } if params: _logger.info('{method} Request: {url}?{query}'.format(query=urlencode(params), **kwargs)) _logger.info('PARAMS: {params}'.format(**kwargs)) else: _logger.info('{method} Request: {url}'.format(**kwargs)) if data: _logger.info('PAYLOAD: {data}'.format(**kwargs)) self.last_response = None session = self.session or Session() request = Request(**kwargs) self.last_request = TwilioRequest(**kwargs) prepped_request = session.prepare_request(request) response = session.send( prepped_request, allow_redirects=allow_redirects, timeout=timeout, ) _logger.info('{method} Response: {status} {text}'.format(method=method, status=response.status_code, text=response.text)) self.last_response = Response(int(response.status_code), response.text) return self.last_response
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "params", "=", "None", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "auth", "=", "None", ",", "timeout", "=", "None", ",", "allow_redirects", "=", "False", ")", ":", "kwa...
38.037037
23.074074
def extract_objects(self, fname, type_filter=None): '''Extract objects from a source file Args: fname(str): Name of file to read from type_filter (class, optional): Object class to filter results Returns: List of objects extracted from the file. ''' objects = [] if fname in self.object_cache: objects = self.object_cache[fname] else: with io.open(fname, 'rt', encoding='utf-8') as fh: text = fh.read() objects = parse_verilog(text) self.object_cache[fname] = objects if type_filter: objects = [o for o in objects if isinstance(o, type_filter)] return objects
[ "def", "extract_objects", "(", "self", ",", "fname", ",", "type_filter", "=", "None", ")", ":", "objects", "=", "[", "]", "if", "fname", "in", "self", ".", "object_cache", ":", "objects", "=", "self", ".", "object_cache", "[", "fname", "]", "else", ":"...
28.954545
19.409091
def renegotiate(self): """ Renegotiate the session. :return: True if the renegotiation can be started, False otherwise :rtype: bool """ if not self.renegotiate_pending(): _openssl_assert(_lib.SSL_renegotiate(self._ssl) == 1) return True return False
[ "def", "renegotiate", "(", "self", ")", ":", "if", "not", "self", ".", "renegotiate_pending", "(", ")", ":", "_openssl_assert", "(", "_lib", ".", "SSL_renegotiate", "(", "self", ".", "_ssl", ")", "==", "1", ")", "return", "True", "return", "False" ]
29.090909
16.727273
def changelist_view(self, request, extra_context=None): """ Redirect to the changelist view for subclasses. """ if self.model is not self.concrete_model: return HttpResponseRedirect( admin_url(self.concrete_model, "changelist")) extra_context = extra_context or {} extra_context["content_models"] = self.get_content_models() return super(ContentTypedAdmin, self).changelist_view( request, extra_context)
[ "def", "changelist_view", "(", "self", ",", "request", ",", "extra_context", "=", "None", ")", ":", "if", "self", ".", "model", "is", "not", "self", ".", "concrete_model", ":", "return", "HttpResponseRedirect", "(", "admin_url", "(", "self", ".", "concrete_m...
43.181818
16.545455
def get_chunk_hash(file, seed, filesz=None, chunksz=DEFAULT_CHUNK_SIZE, bufsz=DEFAULT_BUFFER_SIZE): """returns a hash of a chunk of the file provided. the position of the chunk is determined by the seed. additionally, the hmac of the chunk is calculated from the seed. :param file: a file like object to get the chunk hash from. should support `read()`, `seek()` and `tell()`. :param seed: the seed to use for calculating the chunk position and chunk hash :param chunksz: the size of the chunk to check :param bufsz: an optional buffer size to use for reading the file. """ if (filesz is None): file.seek(0, 2) filesz = file.tell() if (filesz < chunksz): chunksz = filesz prf = KeyedPRF(seed, filesz - chunksz + 1) i = prf.eval(0) file.seek(i) h = hmac.new(seed, None, hashlib.sha256) while (True): if (chunksz < bufsz): bufsz = chunksz buffer = file.read(bufsz) h.update(buffer) chunksz -= len(buffer) assert(chunksz >= 0) if (chunksz == 0): break return h.digest()
[ "def", "get_chunk_hash", "(", "file", ",", "seed", ",", "filesz", "=", "None", ",", "chunksz", "=", "DEFAULT_CHUNK_SIZE", ",", "bufsz", "=", "DEFAULT_BUFFER_SIZE", ")", ":", "if", "(", "filesz", "is", "None", ")", ":", "file", ".", "seek", "(", "0", ",...
37.485714
14.171429
def _get_column_type(self,column): """ Return 'numeric' if the column is of type integer or real, otherwise return 'string'. """ ctype = column.GetType() if ctype in [ogr.OFTInteger, ogr.OFTReal]: return 'numeric' else: return 'string'
[ "def", "_get_column_type", "(", "self", ",", "column", ")", ":", "ctype", "=", "column", ".", "GetType", "(", ")", "if", "ctype", "in", "[", "ogr", ".", "OFTInteger", ",", "ogr", ".", "OFTReal", "]", ":", "return", "'numeric'", "else", ":", "return", ...
36.5
9.5
def accumulate(iterable): " Return series of accumulated sums. " iterator = iter(iterable) sum_data = next(iterator) yield sum_data for el in iterator: sum_data += el yield sum_data
[ "def", "accumulate", "(", "iterable", ")", ":", "iterator", "=", "iter", "(", "iterable", ")", "sum_data", "=", "next", "(", "iterator", ")", "yield", "sum_data", "for", "el", "in", "iterator", ":", "sum_data", "+=", "el", "yield", "sum_data" ]
24
16
def from_string(cls, string, *, default_func=None): '''Construct a Service from a string. If default_func is provided and any ServicePart is missing, it is called with default_func(protocol, part) to obtain the missing part. ''' if not isinstance(string, str): raise TypeError(f'service must be a string: {string}') parts = string.split('://', 1) if len(parts) == 2: protocol, address = parts else: item, = parts protocol = None if default_func: if default_func(item, ServicePart.HOST) and default_func(item, ServicePart.PORT): protocol, address = item, '' else: protocol, address = default_func(None, ServicePart.PROTOCOL), item if not protocol: raise ValueError(f'invalid service string: {string}') if default_func: default_func = partial(default_func, protocol.lower()) address = NetAddress.from_string(address, default_func=default_func) return cls(protocol, address)
[ "def", "from_string", "(", "cls", ",", "string", ",", "*", ",", "default_func", "=", "None", ")", ":", "if", "not", "isinstance", "(", "string", ",", "str", ")", ":", "raise", "TypeError", "(", "f'service must be a string: {string}'", ")", "parts", "=", "s...
41.037037
22.148148
def qos_queue_scheduler_strict_priority_dwrr_traffic_class6(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") queue = ET.SubElement(qos, "queue") scheduler = ET.SubElement(queue, "scheduler") strict_priority = ET.SubElement(scheduler, "strict-priority") dwrr_traffic_class6 = ET.SubElement(strict_priority, "dwrr-traffic-class6") dwrr_traffic_class6.text = kwargs.pop('dwrr_traffic_class6') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "qos_queue_scheduler_strict_priority_dwrr_traffic_class6", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "qos", "=", "ET", ".", "SubElement", "(", "config", ",", "\"qos\"", ",", "xmlns", "...
49.384615
20.384615
def post(self, url, data=None, **kwargs): """Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param **kwargs: Optional arguments that ``request`` takes. """ return self.request('post', url, data=data, **kwargs)
[ "def", "post", "(", "self", ",", "url", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "request", "(", "'post'", ",", "url", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
44.666667
22.777778
def normalize_value(value, snake_case=True): """ :param value: :return value: """ if not isinstance(value, six.string_types): raise TypeError("the value passed to value must be a string") if snake_case: s1 = first_cap_re.sub(r'\1_\2', value) new_value = all_cap_re.sub(r'\1_\2', s1).lower() # .replace('-', '_') else: new_value = value.lower() return new_value
[ "def", "normalize_value", "(", "value", ",", "snake_case", "=", "True", ")", ":", "if", "not", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "raise", "TypeError", "(", "\"the value passed to value must be a string\"", ")", "if", "snake_c...
27.533333
19.266667
def render_widget(self, request, widget_id): '''Returns rendered widget in JSON response''' widget = get_widget_from_id(widget_id) response = widget.render(**{'request': request}) return JsonResponse({'result': response, 'id': widget_id})
[ "def", "render_widget", "(", "self", ",", "request", ",", "widget_id", ")", ":", "widget", "=", "get_widget_from_id", "(", "widget_id", ")", "response", "=", "widget", ".", "render", "(", "*", "*", "{", "'request'", ":", "request", "}", ")", "return", "J...
33.25
23.25
def write_config(configuration): """Helper to write the JSON configuration to a file""" with open(CONFIG_PATH, 'w') as f: json.dump(configuration, f, indent=2, sort_keys=True)
[ "def", "write_config", "(", "configuration", ")", ":", "with", "open", "(", "CONFIG_PATH", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "configuration", ",", "f", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ")" ]
47
8
def connection_lost(self, reason): """Protocols connection lost handler. """ LOG.info( 'Connection to peer %s lost, reason: %s Resetting ' 'retry connect loop: %s' % (self._neigh_conf.ip_address, reason, self._connect_retry_event.is_set()), extra={ 'resource_name': self._neigh_conf.name, 'resource_id': self._neigh_conf.id } ) self.state.bgp_state = const.BGP_FSM_IDLE if self._protocol: self._protocol.stop() self._protocol = None # Create new collection for initial RT NLRIs self._init_rtc_nlri_path = [] self._sent_init_non_rtc_update = False # Clear sink. self.clear_outgoing_msg_list() # Un-schedule timers self._unschedule_sending_init_updates() # Increment the version number of this source. self.version_num += 1 self._peer_manager.on_peer_down(self) # Check configuration if neighbor is still enabled, we try # reconnecting. if self._neigh_conf.enabled: if not self._connect_retry_event.is_set(): self._connect_retry_event.set()
[ "def", "connection_lost", "(", "self", ",", "reason", ")", ":", "LOG", ".", "info", "(", "'Connection to peer %s lost, reason: %s Resetting '", "'retry connect loop: %s'", "%", "(", "self", ".", "_neigh_conf", ".", "ip_address", ",", "reason", ",", "self", ".", "_...
37.529412
13.588235
def partial_derivative(self, X, y=0): """Compute partial derivative :math:`C(u|v)` of cumulative density. Args: X: `np.ndarray` y: `float` Returns: """ self.check_fit() U, V = self.split_matrix(X) if self.theta == 1: return V else: t1 = np.power(-np.log(U), self.theta) t2 = np.power(-np.log(V), self.theta) p1 = self.cumulative_distribution(X) p2 = np.power(t1 + t2, -1 + 1.0 / self.theta) p3 = np.power(-np.log(V), self.theta - 1) return np.divide(np.multiply(np.multiply(p1, p2), p3), V) - y
[ "def", "partial_derivative", "(", "self", ",", "X", ",", "y", "=", "0", ")", ":", "self", ".", "check_fit", "(", ")", "U", ",", "V", "=", "self", ".", "split_matrix", "(", "X", ")", "if", "self", ".", "theta", "==", "1", ":", "return", "V", "el...
27.041667
20.625
def free(**kwargs): ''' Stop synchronization of directory. ''' output, err = cli_syncthing_adapter.free(kwargs['path']) click.echo("%s" % output, err=err)
[ "def", "free", "(", "*", "*", "kwargs", ")", ":", "output", ",", "err", "=", "cli_syncthing_adapter", ".", "free", "(", "kwargs", "[", "'path'", "]", ")", "click", ".", "echo", "(", "\"%s\"", "%", "output", ",", "err", "=", "err", ")" ]
31.4
17.4
def authenticate(username, password): """ Returns: a dict with: pk: the pk of the user token: dict containing all the data from the api (access_token, refresh_token, expires_at etc.) user_data: dict containing user data such as first_name, last_name etc. if the authentication succeeds Raises Unauthorized if the authentication fails """ session = MoJOAuth2Session( client=LegacyApplicationClient( client_id=settings.API_CLIENT_ID ) ) token = session.fetch_token( token_url=get_request_token_url(), username=username, password=password, auth=HTTPBasicAuth(settings.API_CLIENT_ID, settings.API_CLIENT_SECRET), timeout=15, encoding='utf-8' ) user_data = session.get('/users/{username}/'.format(username=username)).json() return { 'pk': user_data.get('pk'), 'token': token, 'user_data': user_data }
[ "def", "authenticate", "(", "username", ",", "password", ")", ":", "session", "=", "MoJOAuth2Session", "(", "client", "=", "LegacyApplicationClient", "(", "client_id", "=", "settings", ".", "API_CLIENT_ID", ")", ")", "token", "=", "session", ".", "fetch_token", ...
29.323529
18.264706
def marshal_with(self, schema, envelope=None): """ A decorator that apply marshalling to the return values of your methods. :param schema: The schema class to be used to serialize the values. :param envelope: The key used to envelope the data. :return: A function. """ # schema is pre instantiated to avoid instantiate it # on every request schema_is_class = isclass(schema) schema_cache = schema() if schema_is_class else schema def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): data = func(*args, **kwargs) if isinstance(data, self.__app.response_class): return data schema_instance = schema_cache # if there is the parameter 'fields' in the url # we cannot use the cached schema instance # in this case we have to instantiate the schema # on every request. if schema_is_class: only = get_fields_from_request(schema=schema) if only: schema_instance = schema(only=only) return marshal(data, schema_instance, envelope) return wrapper return decorator
[ "def", "marshal_with", "(", "self", ",", "schema", ",", "envelope", "=", "None", ")", ":", "# schema is pre instantiated to avoid instantiate it", "# on every request", "schema_is_class", "=", "isclass", "(", "schema", ")", "schema_cache", "=", "schema", "(", ")", "...
37.314286
18.685714
def _execute(self, source, hidden): """ Execute 'source'. If 'hidden', do not show any output. See parent class :meth:`execute` docstring for full details. """ msg_id = self.kernel_manager.shell_channel.execute(source, hidden) self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id, 'user') self._hidden = hidden if not hidden: self.executing.emit(source)
[ "def", "_execute", "(", "self", ",", "source", ",", "hidden", ")", ":", "msg_id", "=", "self", ".", "kernel_manager", ".", "shell_channel", ".", "execute", "(", "source", ",", "hidden", ")", "self", ".", "_request_info", "[", "'execute'", "]", "[", "msg_...
43
18.3
def unionIntoArray(self, inputVector, outputVector, forceOutput=False): """ Create a union of the inputVector and copy the result into the outputVector Parameters: ---------------------------- @param inputVector: The inputVector can be either a full numpy array containing 0's and 1's, or a list of non-zero entry indices @param outputVector: A numpy array that matches the length of the union pooler. @param forceOutput: if True, a union will be created without regard to minHistory """ if isinstance(inputVector, numpy.ndarray): if inputVector.size == self._numInputs: activeBits = numpy.where(inputVector)[0] else: raise ValueError( "Input vector dimensions don't match. Expecting %s but got %s" % ( self._numInputs, inputVector.size)) elif isinstance(inputVector, list): if len(inputVector) > 0: if max(inputVector) >= self._numInputs: raise ValueError( "Non-zero entry indices exceed input dimension of union pooler. " "Expecting %s but got %s" % (self._numInputs, max(inputVector))) activeBits = inputVector else: raise TypeError("Unsuported input types") if len(outputVector) != self._numInputs: raise ValueError( "Output vector dimension does match dimension of union pooler " "Expecting %s but got %s" % (self._numInputs, len(outputVector))) unionSDR = self.updateHistory(activeBits, forceOutput) numpy.copyto(outputVector, unionSDR, casting="unsafe")
[ "def", "unionIntoArray", "(", "self", ",", "inputVector", ",", "outputVector", ",", "forceOutput", "=", "False", ")", ":", "if", "isinstance", "(", "inputVector", ",", "numpy", ".", "ndarray", ")", ":", "if", "inputVector", ".", "size", "==", "self", ".", ...
40.275
20.725
def exons(self, contig=None, strand=None): """ Create exon object for all exons in the database, optionally restrict to a particular chromosome using the `contig` argument. """ # DataFrame with single column called "exon_id" exon_ids = self.exon_ids(contig=contig, strand=strand) return [ self.exon_by_id(exon_id) for exon_id in exon_ids ]
[ "def", "exons", "(", "self", ",", "contig", "=", "None", ",", "strand", "=", "None", ")", ":", "# DataFrame with single column called \"exon_id\"", "exon_ids", "=", "self", ".", "exon_ids", "(", "contig", "=", "contig", ",", "strand", "=", "strand", ")", "re...
37.909091
14.818182
def disable_logger(logger_name: str, propagate: bool = False): """Disable output for the logger of the specified name.""" log = logging.getLogger(logger_name) log.propagate = propagate for handler in log.handlers: log.removeHandler(handler)
[ "def", "disable_logger", "(", "logger_name", ":", "str", ",", "propagate", ":", "bool", "=", "False", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "logger_name", ")", "log", ".", "propagate", "=", "propagate", "for", "handler", "in", "log", "....
43.166667
7.833333
def get_all_values_of_all_params(self): """ Return a dictionary containing all values that are taken by all available parameters. Always returns the parameter list in alphabetical order. """ values = collections.OrderedDict([[p, []] for p in sorted(self.get_params())]) for result in self.get_results(): for param in self.get_params(): values[param] += [result['params'][param]] sorted_values = collections.OrderedDict([[k, sorted(list(set(values[k])))] for k in values.keys()]) for k in sorted_values.keys(): if sorted_values[k] == []: sorted_values[k] = None return sorted_values
[ "def", "get_all_values_of_all_params", "(", "self", ")", ":", "values", "=", "collections", ".", "OrderedDict", "(", "[", "[", "p", ",", "[", "]", "]", "for", "p", "in", "sorted", "(", "self", ".", "get_params", "(", ")", ")", "]", ")", "for", "resul...
35
19.833333
def birthdays_subcommand(vcard_list, parsable): """Print birthday contact table. :param vcard_list: the vcards to search for matching entries which should be printed :type vcard_list: list of carddav_object.CarddavObject :param parsable: machine readable output: columns devided by tabulator (\t) :type parsable: bool :returns: None :rtype: None """ # filter out contacts without a birthday date vcard_list = [ vcard for vcard in vcard_list if vcard.get_birthday() is not None] # sort by date (month and day) # The sort function should work for strings and datetime objects. All # strings will besorted before any datetime objects. vcard_list.sort( key=lambda x: (x.get_birthday().month, x.get_birthday().day) if isinstance(x.get_birthday(), datetime.datetime) else (0, 0, x.get_birthday())) # add to string list birthday_list = [] for vcard in vcard_list: date = vcard.get_birthday() if parsable: if config.display_by_name() == "first_name": birthday_list.append("%04d.%02d.%02d\t%s" % (date.year, date.month, date.day, vcard.get_first_name_last_name())) else: birthday_list.append("%04d.%02d.%02d\t%s" % (date.year, date.month, date.day, vcard.get_last_name_first_name())) else: if config.display_by_name() == "first_name": birthday_list.append("%s\t%s" % (vcard.get_first_name_last_name(), vcard.get_formatted_birthday())) else: birthday_list.append("%s\t%s" % (vcard.get_last_name_first_name(), vcard.get_formatted_birthday())) if birthday_list: if parsable: print('\n'.join(birthday_list)) else: list_birthdays(birthday_list) else: if not parsable: print("Found no birthdays") sys.exit(1)
[ "def", "birthdays_subcommand", "(", "vcard_list", ",", "parsable", ")", ":", "# filter out contacts without a birthday date", "vcard_list", "=", "[", "vcard", "for", "vcard", "in", "vcard_list", "if", "vcard", ".", "get_birthday", "(", ")", "is", "not", "None", "]...
40.981132
20.283019
def start_blocking(self): """ Start the advertiser in the background, but wait until it is ready """ self._cav_started.clear() self.start() self._cav_started.wait()
[ "def", "start_blocking", "(", "self", ")", ":", "self", ".", "_cav_started", ".", "clear", "(", ")", "self", ".", "start", "(", ")", "self", ".", "_cav_started", ".", "wait", "(", ")" ]
32
15
def parse_unit(expression): """Evaluate a python expression string containing constants Argument: | ``expression`` -- A string containing a numerical expressions including unit conversions. In addition to the variables in this module, also the following shorthands are supported: """ try: g = globals() g.update(shorthands) return float(eval(str(expression), g)) except: raise ValueError("Could not interpret '%s' as a unit or a measure." % expression)
[ "def", "parse_unit", "(", "expression", ")", ":", "try", ":", "g", "=", "globals", "(", ")", "g", ".", "update", "(", "shorthands", ")", "return", "float", "(", "eval", "(", "str", "(", "expression", ")", ",", "g", ")", ")", "except", ":", "raise",...
32.235294
23.058824
def acknowledge_svc_problem(self, service, sticky, notify, author, comment): """Acknowledge a service problem Format of the line that triggers function call:: ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;<sticky>;<notify>; <persistent:obsolete>;<author>;<comment> :param service: service to acknowledge the problem :type service: alignak.objects.service.Service :param sticky: if sticky == 2, the acknowledge will remain until the service returns to an OK state else the acknowledge will be removed as soon as the service state changes :param notify: if to 1, send a notification :type notify: integer :param author: name of the author or the acknowledge :type author: str :param comment: comment (description) of the acknowledge :type comment: str :return: None """ notification_period = None if getattr(service, 'notification_period', None) is not None: notification_period = self.daemon.timeperiods[service.notification_period] service.acknowledge_problem(notification_period, self.hosts, self.services, sticky, notify, author, comment)
[ "def", "acknowledge_svc_problem", "(", "self", ",", "service", ",", "sticky", ",", "notify", ",", "author", ",", "comment", ")", ":", "notification_period", "=", "None", "if", "getattr", "(", "service", ",", "'notification_period'", ",", "None", ")", "is", "...
51.291667
24.583333
def final_spin_from_f0_tau(f0, tau, l=2, m=2): """Returns the final spin based on the given frequency and damping time. .. note:: Currently, only l = m = 2 is supported. Any other indices will raise a ``KeyError``. Parameters ---------- f0 : float or array Frequency of the QNM (in Hz). tau : float or array Damping time of the QNM (in seconds). l : int, optional l-index of the harmonic. Default is 2. m : int, optional m-index of the harmonic. Default is 2. Returns ------- float or array The spin of the final black hole. If the combination of frequency and damping times give an unphysical result, ``numpy.nan`` will be returned. """ f0, tau, input_is_array = ensurearray(f0, tau) # from Berti et al. 2006 a, b, c = _berti_spin_constants[l,m] origshape = f0.shape # flatten inputs for storing results f0 = f0.ravel() tau = tau.ravel() spins = numpy.zeros(f0.size) for ii in range(spins.size): Q = f0[ii] * tau[ii] * numpy.pi try: s = 1. - ((Q-a)/b)**(1./c) except ValueError: s = numpy.nan spins[ii] = s spins = spins.reshape(origshape) return formatreturn(spins, input_is_array)
[ "def", "final_spin_from_f0_tau", "(", "f0", ",", "tau", ",", "l", "=", "2", ",", "m", "=", "2", ")", ":", "f0", ",", "tau", ",", "input_is_array", "=", "ensurearray", "(", "f0", ",", "tau", ")", "# from Berti et al. 2006", "a", ",", "b", ",", "c", ...
30.119048
16.714286
def addSubparser(subparsers, subcommand, description): """ Add a subparser with subcommand to the subparsers object """ parser = subparsers.add_parser( subcommand, description=description, help=description) return parser
[ "def", "addSubparser", "(", "subparsers", ",", "subcommand", ",", "description", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "subcommand", ",", "description", "=", "description", ",", "help", "=", "description", ")", "return", "parser" ]
34.571429
12
def create_namespaced_cron_job(self, namespace, body, **kwargs): """ create a CronJob This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_cron_job(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V2alpha1CronJob body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V2alpha1CronJob If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs) return data
[ "def", "create_namespaced_cron_job", "(", "self", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", "."...
66.25
39.583333
def make_qemu_dirs(max_qemu_id, output_dir, topology_name): """ Create Qemu VM working directories if required :param int max_qemu_id: Number of directories to create :param str output_dir: Output directory :param str topology_name: Topology name """ if max_qemu_id is not None: for i in range(1, max_qemu_id + 1): qemu_dir = os.path.join(output_dir, topology_name + '-files', 'qemu', 'vm-%s' % i) os.makedirs(qemu_dir)
[ "def", "make_qemu_dirs", "(", "max_qemu_id", ",", "output_dir", ",", "topology_name", ")", ":", "if", "max_qemu_id", "is", "not", "None", ":", "for", "i", "in", "range", "(", "1", ",", "max_qemu_id", "+", "1", ")", ":", "qemu_dir", "=", "os", ".", "pat...
38.769231
12.461538
def section(self, section, skip=['type', 'order']): """ Return section items, skip selected (type/order by default) """ return [(key, val) for key, val in self.parser.items(section) if key not in skip]
[ "def", "section", "(", "self", ",", "section", ",", "skip", "=", "[", "'type'", ",", "'order'", "]", ")", ":", "return", "[", "(", "key", ",", "val", ")", "for", "key", ",", "val", "in", "self", ".", "parser", ".", "items", "(", "section", ")", ...
57.5
11.25
def parse_info(wininfo_name, egginfo_name): """Extract metadata from filenames. Extracts the 4 metadataitems needed (name, version, pyversion, arch) from the installer filename and the name of the egg-info directory embedded in the zipfile (if any). The egginfo filename has the format:: name-ver(-pyver)(-arch).egg-info The installer filename has the format:: name-ver.arch(-pyver).exe Some things to note: 1. The installer filename is not definitive. An installer can be renamed and work perfectly well as an installer. So more reliable data should be used whenever possible. 2. The egg-info data should be preferred for the name and version, because these come straight from the distutils metadata, and are mandatory. 3. The pyver from the egg-info data should be ignored, as it is constructed from the version of Python used to build the installer, which is irrelevant - the installer filename is correct here (even to the point that when it's not there, any version is implied). 4. The architecture must be taken from the installer filename, as it is not included in the egg-info data. 5. Architecture-neutral installers still have an architecture because the installer format itself (being executable) is architecture-specific. We should therefore ignore the architecture if the content is pure-python. """ egginfo = None if egginfo_name: egginfo = egg_info_re.search(egginfo_name) if not egginfo: raise ValueError("Egg info filename %s is not valid" % (egginfo_name,)) # Parse the wininst filename # 1. Distribution name (up to the first '-') w_name, sep, rest = wininfo_name.partition('-') if not sep: raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) # Strip '.exe' rest = rest[:-4] # 2. Python version (from the last '-', must start with 'py') rest2, sep, w_pyver = rest.rpartition('-') if sep and w_pyver.startswith('py'): rest = rest2 w_pyver = w_pyver.replace('.', '') else: # Not version specific - use py2.py3. While it is possible that # pure-Python code is not compatible with both Python 2 and 3, there # is no way of knowing from the wininst format, so we assume the best # here (the user can always manually rename the wheel to be more # restrictive if needed). w_pyver = 'py2.py3' # 3. Version and architecture w_ver, sep, w_arch = rest.rpartition('.') if not sep: raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) if egginfo: w_name = egginfo.group('name') w_ver = egginfo.group('ver') return dict(name=w_name, ver=w_ver, arch=w_arch, pyver=w_pyver)
[ "def", "parse_info", "(", "wininfo_name", ",", "egginfo_name", ")", ":", "egginfo", "=", "None", "if", "egginfo_name", ":", "egginfo", "=", "egg_info_re", ".", "search", "(", "egginfo_name", ")", "if", "not", "egginfo", ":", "raise", "ValueError", "(", "\"Eg...
39.957746
22.098592
def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.cloudformation.CloudFormationConnection` :return: A connection to Amazon's CloudFormation Service """ from boto.cloudformation import CloudFormationConnection return CloudFormationConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
[ "def", "connect_cloudformation", "(", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "boto", ".", "cloudformation", "import", "CloudFormationConnection", "return", "CloudFormationConnection", "("...
43.153846
21.461538
def _auth_headers(self): """Headers required to authenticate a request. Assumes your ``Context`` already has a authentication token, either provided explicitly or obtained by logging into the Splunk instance. :returns: A list of 2-tuples containing key and value """ if self.token is _NoAuthenticationToken: return [] else: # Ensure the token is properly formatted if self.token.startswith('Splunk '): token = self.token else: token = 'Splunk %s' % self.token return [("Authorization", token)]
[ "def", "_auth_headers", "(", "self", ")", ":", "if", "self", ".", "token", "is", "_NoAuthenticationToken", ":", "return", "[", "]", "else", ":", "# Ensure the token is properly formatted", "if", "self", ".", "token", ".", "startswith", "(", "'Splunk '", ")", "...
35.222222
16.833333