text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def add_tmp(self, value): """Add a temporary variable to the scope. Parameters ---------- value : object An arbitrary object to be assigned to a temporary variable. Returns ------- name : basestring The name of the temporary variable created. """ name = '{name}_{num}_{hex_id}'.format(name=type(value).__name__, num=self.ntemps, hex_id=_raw_hex_id(self)) # add to inner most scope assert name not in self.temps self.temps[name] = value assert name in self.temps # only increment if the variable gets put in the scope return name
[ "def", "add_tmp", "(", "self", ",", "value", ")", ":", "name", "=", "'{name}_{num}_{hex_id}'", ".", "format", "(", "name", "=", "type", "(", "value", ")", ".", "__name__", ",", "num", "=", "self", ".", "ntemps", ",", "hex_id", "=", "_raw_hex_id", "(", ...
31.041667
20.875
def add_layer3_vlan_interface(self, interface_id, vlan_id, address=None, network_value=None, virtual_mapping=None, virtual_resource_name=None, zone_ref=None, comment=None, **kw): """ Add a Layer 3 VLAN interface. Optionally specify an address and network if assigning an IP to the VLAN. This method will also assign an IP address to an existing VLAN, or add an additional address to an existing VLAN. This method may commonly be used on a Master Engine to create VLANs for virtual firewall engines. Example of creating a VLAN and passing kwargs to define a DHCP server service on the VLAN interface:: engine = Engine('engine1') engine.physical_interface.add_layer3_vlan_interface(interface_id=20, vlan_id=20, address='20.20.20.20', network_value='20.20.20.0/24', comment='foocomment', dhcp_server_on_interface={ 'default_gateway': '20.20.20.1', 'default_lease_time': 7200, 'dhcp_address_range': '20.20.20.101-20.20.20.120', 'dhcp_range_per_node': [], 'primary_dns_server': '8.8.8.8'}) :param str,int interface_id: interface identifier :param int vlan_id: vlan identifier :param str address: optional IP address to assign to VLAN :param str network_value: network cidr if address is specified. In format: 10.10.10.0/24. :param str zone_ref: zone to use, by name, href, or Zone :param str comment: optional comment for VLAN level of interface :param int virtual_mapping: virtual engine mapping id See :class:`smc.core.engine.VirtualResource.vfw_id` :param str virtual_resource_name: name of virtual resource See :class:`smc.core.engine.VirtualResource.name` :param dict kw: keyword arguments are passed to top level of VLAN interface, not the base level physical interface. This is useful if you want to pass in a configuration that enables the DHCP server on a VLAN for example. :raises EngineCommandFailed: failure creating interface :return: None """ interfaces = {'nodes': [{'address': address, 'network_value': network_value}] if address and network_value else [], 'zone_ref': zone_ref, 'virtual_mapping': virtual_mapping, 'virtual_resource_name': virtual_resource_name, 'comment': comment} interfaces.update(**kw) _interface = {'interface_id': interface_id, 'interfaces': [interfaces]} if 'single_fw' in self._engine.type: # L2FW / IPS _interface.update(interface='single_node_interface') try: interface = self._engine.interface.get(interface_id) vlan = interface.vlan_interface.get(vlan_id) # Interface exists, so we need to update but check if VLAN already exists if vlan is None: interfaces.update(vlan_id=vlan_id) interface._add_interface(**_interface) else: _interface.update(interface_id='{}.{}'.format(interface_id, vlan_id)) vlan._add_interface(**_interface) return interface.update() except InterfaceNotFound: interfaces.update(vlan_id=vlan_id) interface = Layer3PhysicalInterface(**_interface) return self._engine.add_interface(interface)
[ "def", "add_layer3_vlan_interface", "(", "self", ",", "interface_id", ",", "vlan_id", ",", "address", "=", "None", ",", "network_value", "=", "None", ",", "virtual_mapping", "=", "None", ",", "virtual_resource_name", "=", "None", ",", "zone_ref", "=", "None", ...
53.707692
24.507692
def processMeme(imgParams): ''' Wrapper function for genMeme() and findMeme() imgParams may be a string of the following forms: * 'text0 | text1' * 'text0' * ' | text1' Fails gracefully when it can't find or generate a meme by returning an appropriate image url with the failure message on it. ''' template_id = findMeme(imgParams) if template_id is None: print("Couldn't find a suitable match for meme :(") return meme_not_supported # if template_id exists imgParams = imgParams.split('|') if len(imgParams) == 2 or len(imgParams) == 1: text0 = imgParams[0] if len(imgParams) == 2: text1 = imgParams[1] # Bottom text text1 exists elif len(imgParams) == 1: text1 = '' # No bottom text imgURL = genMeme(template_id, text0, text1) if imgURL is None: # Couldn't generate meme print("Couldn't generate meme :(") return couldnt_create_meme else: # Success! # print(imgURL) return imgURL elif len(imgParams) > 2: print("Too many lines of captions! Cannot create meme.") return too_many_lines elif len(imgParams) < 1: # No top text text0 exists print("Too few lines of captions! Cannot create meme.") return too_few_lines
[ "def", "processMeme", "(", "imgParams", ")", ":", "template_id", "=", "findMeme", "(", "imgParams", ")", "if", "template_id", "is", "None", ":", "print", "(", "\"Couldn't find a suitable match for meme :(\"", ")", "return", "meme_not_supported", "# if template_id exists...
29.76087
20.369565
def get_market_history(self, market): """ Used to retrieve the latest trades that have occurred for a specific market. Endpoint: 1.1 /market/getmarkethistory 2.0 NO Equivalent Example :: {'success': True, 'message': '', 'result': [ {'Id': 5625015, 'TimeStamp': '2017-08-31T01:29:50.427', 'Quantity': 7.31008193, 'Price': 0.00177639, 'Total': 0.01298555, 'FillType': 'FILL', 'OrderType': 'BUY'}, ... ] } :param market: String literal for the market (ex: BTC-LTC) :type market: str :return: Market history in JSON :rtype : dict """ return self._api_query(path_dict={ API_V1_1: '/public/getmarkethistory', }, options={'market': market, 'marketname': market}, protection=PROTECTION_PUB)
[ "def", "get_market_history", "(", "self", ",", "market", ")", ":", "return", "self", ".", "_api_query", "(", "path_dict", "=", "{", "API_V1_1", ":", "'/public/getmarkethistory'", ",", "}", ",", "options", "=", "{", "'market'", ":", "market", ",", "'marketnam...
33.064516
15.516129
def bucket_type_key(bucket_type): """ Registers a function that calculates test item key for the specified bucket type. """ def decorator(f): @functools.wraps(f) def wrapped(item, session): key = f(item) if session is not None: for handler in session.random_order_bucket_type_key_handlers: key = handler(item, key) return key bucket_type_keys[bucket_type] = wrapped return wrapped return decorator
[ "def", "bucket_type_key", "(", "bucket_type", ")", ":", "def", "decorator", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapped", "(", "item", ",", "session", ")", ":", "key", "=", "f", "(", "item", ")", "if", "sessio...
24.142857
21.571429
def _generate_sdss_object_name( self): """ *generate sdss object names for the results* **Key Arguments:** # - **Return:** - None .. todo:: """ self.log.info('starting the ``_generate_sdss_object_name`` method') converter = unit_conversion( log=self.log ) # Names should be of the format `SDSS JHHMMSS.ss+DDMMSS.s` # where the coordinates are truncated, not rounded. for row in self.results: raSex = converter.ra_decimal_to_sexegesimal( ra=row["ra"], delimiter=":" ) decSex = converter.dec_decimal_to_sexegesimal( dec=row["dec"], delimiter=":" ) raSex = raSex.replace(":", "")[:9] decSex = decSex.replace(":", "")[:9] sdssName = "SDSS J%(raSex)s%(decSex)s" % locals() row["sdss_name"] = sdssName wordType = ["unknown", "cosmic_ray", "defect", "galaxy", "ghost", "knownobj", "star", "trail", "sky", "notatype", ] numberType = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] row["type"] = wordType[row["type"]] self.log.info('completed the ``_generate_sdss_object_name`` method') return None
[ "def", "_generate_sdss_object_name", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``_generate_sdss_object_name`` method'", ")", "converter", "=", "unit_conversion", "(", "log", "=", "self", ".", "log", ")", "# Names should be of the form...
29.954545
21.863636
def pop_state(self, idx=None): """ Pops off the most recent state. :param idx: If provided, specifies the index at which the next string begins. """ self.state.pop() if idx is not None: self.str_begin = idx
[ "def", "pop_state", "(", "self", ",", "idx", "=", "None", ")", ":", "self", ".", "state", ".", "pop", "(", ")", "if", "idx", "is", "not", "None", ":", "self", ".", "str_begin", "=", "idx" ]
23.166667
17
def shortcut(*names): """Add an shortcut (alias) to a decorated function, but not to class methods! Use aliased/alias decorators for class members! Calling the shortcut (alias) will call the decorated function. The shortcut name will be appended to the module's __all__ variable and the shortcut function will inherit the function's docstring Examples -------- In some module you have defined a function >>> @shortcut('is_tmatrix') # doctest: +SKIP >>> def is_transition_matrix(args): # doctest: +SKIP ... pass # doctest: +SKIP Now you are able to call the function under its short name >>> is_tmatrix(args) # doctest: +SKIP """ def wrap(f): globals_ = f.__globals__ for name in names: globals_[name] = f if '__all__' in globals_ and name not in globals_['__all__']: globals_['__all__'].append(name) return f return wrap
[ "def", "shortcut", "(", "*", "names", ")", ":", "def", "wrap", "(", "f", ")", ":", "globals_", "=", "f", ".", "__globals__", "for", "name", "in", "names", ":", "globals_", "[", "name", "]", "=", "f", "if", "'__all__'", "in", "globals_", "and", "nam...
35.653846
22.038462
def _create_chord_chart(self, data, works, output_dir): """Generates and writes to a file in `output_dir` the data used to display a chord chart. :param data: data to derive the chord data from :type data: `pandas.DataFrame` :param works: works to display :type works: `list` :param output_dir: directory to output data file to :type output_dir: `str` """ matrix = [] chord_data = data.unstack(BASE_WORK)[SHARED] for index, row_data in chord_data.fillna(value=0).iterrows(): matrix.append([value / 100 for value in row_data]) colours = generate_colours(len(works)) colour_works = [{'work': work, 'colour': colour} for work, colour in zip(chord_data, colours)] json_data = json.dumps({'works': colour_works, 'matrix': matrix}) with open(os.path.join(output_dir, 'chord_data.js'), 'w') as fh: fh.write('var chordData = {}'.format(json_data))
[ "def", "_create_chord_chart", "(", "self", ",", "data", ",", "works", ",", "output_dir", ")", ":", "matrix", "=", "[", "]", "chord_data", "=", "data", ".", "unstack", "(", "BASE_WORK", ")", "[", "SHARED", "]", "for", "index", ",", "row_data", "in", "ch...
45.272727
17.5
def write(self, file_or_filename, prog=None, format='xdot'): """ Writes the case data in Graphviz DOT language. The format 'raw' is used to dump the Dot representation of the Case object, without further processing. The output can be processed by any of graphviz tools, defined in 'prog'. """ if prog is None: file = super(DotWriter, self).write(file_or_filename) else: buf = StringIO.StringIO() super(DotWriter, self).write(buf) buf.seek(0) data = self.create(buf.getvalue(), prog, format) if isinstance(file_or_filename, basestring): file = None try: file = open(file_or_filename, "wb") except: logger.error("Error opening %s." % file_or_filename) finally: if file is not None: file.write(data) file.close() else: file = file_or_filename file.write(data) return file
[ "def", "write", "(", "self", ",", "file_or_filename", ",", "prog", "=", "None", ",", "format", "=", "'xdot'", ")", ":", "if", "prog", "is", "None", ":", "file", "=", "super", "(", "DotWriter", ",", "self", ")", ".", "write", "(", "file_or_filename", ...
36.466667
17.233333
def init_get(self, request): """ Initialize the :class:`LogoutView` attributes on GET request :param django.http.HttpRequest request: The current request object """ self.request = request self.service = request.GET.get('service') self.url = request.GET.get('url') self.ajax = settings.CAS_ENABLE_AJAX_AUTH and 'HTTP_X_AJAX' in request.META
[ "def", "init_get", "(", "self", ",", "request", ")", ":", "self", ".", "request", "=", "request", "self", ".", "service", "=", "request", ".", "GET", ".", "get", "(", "'service'", ")", "self", ".", "url", "=", "request", ".", "GET", ".", "get", "("...
40.3
18.5
def validate(args): """ cldf validate <DATASET> Validate a dataset against the CLDF specification, i.e. check - whether required tables and columns are present - whether values for required columns are present - the referential integrity of the dataset """ ds = _get_dataset(args) ds.validate(log=args.log)
[ "def", "validate", "(", "args", ")", ":", "ds", "=", "_get_dataset", "(", "args", ")", "ds", ".", "validate", "(", "log", "=", "args", ".", "log", ")" ]
30.272727
14.090909
def _writeFeatures(self, i, image): """ Write a text file containing the features as a table. @param i: The number of the image in self._images. @param image: A member of self._images. @return: The C{str} features file name - just the base name, not including the path to the file. """ basename = 'features-%d.txt' % i filename = '%s/%s' % (self._outputDir, basename) featureList = image['graphInfo']['features'] with open(filename, 'w') as fp: for feature in featureList: fp.write('%s\n\n' % feature.feature) return basename
[ "def", "_writeFeatures", "(", "self", ",", "i", ",", "image", ")", ":", "basename", "=", "'features-%d.txt'", "%", "i", "filename", "=", "'%s/%s'", "%", "(", "self", ".", "_outputDir", ",", "basename", ")", "featureList", "=", "image", "[", "'graphInfo'", ...
39.9375
11.5625
def get(self, request,pk): """ If the user requests his profile return it, else return a 403 (Forbidden) """ requested_profile = Profile.objects.get(user=pk) if requested_profile.user == self.request.user: return render(request, self.template_name, {'profile' : requested_profile}) else: return HttpResponse(status = 403)
[ "def", "get", "(", "self", ",", "request", ",", "pk", ")", ":", "requested_profile", "=", "Profile", ".", "objects", ".", "get", "(", "user", "=", "pk", ")", "if", "requested_profile", ".", "user", "==", "self", ".", "request", ".", "user", ":", "ret...
33
21.923077
def _grid_widgets(self): """Put the widgets in the correct position based on self.__compound.""" orient = str(self._scale.cget('orient')) self._scale.grid(row=2, column=2, sticky='ew' if orient == tk.HORIZONTAL else 'ns', padx=(0, self.__entryscalepad) if self.__compound is tk.RIGHT else (self.__entryscalepad, 0) if self.__compound is tk.LEFT else 0, pady=(0, self.__entryscalepad) if self.__compound is tk.BOTTOM else (self.__entryscalepad, 0) if self.__compound is tk.TOP else 0) self._entry.grid(row=1 if self.__compound is tk.TOP else 3 if self.__compound is tk.BOTTOM else 2, column=1 if self.__compound is tk.LEFT else 3 if self.__compound is tk.RIGHT else 2) if orient == tk.HORIZONTAL: self.columnconfigure(0, weight=0) self.columnconfigure(2, weight=1) self.columnconfigure(4, weight=0) self.rowconfigure(0, weight=1) self.rowconfigure(2, weight=0) self.rowconfigure(4, weight=1) else: self.rowconfigure(0, weight=0) self.rowconfigure(2, weight=1) self.rowconfigure(4, weight=0) self.columnconfigure(0, weight=1) self.columnconfigure(2, weight=0) self.columnconfigure(4, weight=1)
[ "def", "_grid_widgets", "(", "self", ")", ":", "orient", "=", "str", "(", "self", ".", "_scale", ".", "cget", "(", "'orient'", ")", ")", "self", ".", "_scale", ".", "grid", "(", "row", "=", "2", ",", "column", "=", "2", ",", "sticky", "=", "'ew'"...
53.653846
22
def attached_to_model(self): """Tells if the current index is the one attached to the model field, not instance field""" try: if not bool(self.model): return False except AttributeError: return False else: try: return not bool(self.instance) except AttributeError: return True
[ "def", "attached_to_model", "(", "self", ")", ":", "try", ":", "if", "not", "bool", "(", "self", ".", "model", ")", ":", "return", "False", "except", "AttributeError", ":", "return", "False", "else", ":", "try", ":", "return", "not", "bool", "(", "self...
32.75
13.166667
def _get_non_empty_list(cls, iter): """Return a list of the input, excluding all ``None`` values.""" res = [] for value in iter: if hasattr(value, 'items'): value = cls._get_non_empty_dict(value) or None if value is not None: res.append(value) return res
[ "def", "_get_non_empty_list", "(", "cls", ",", "iter", ")", ":", "res", "=", "[", "]", "for", "value", "in", "iter", ":", "if", "hasattr", "(", "value", ",", "'items'", ")", ":", "value", "=", "cls", ".", "_get_non_empty_dict", "(", "value", ")", "or...
37.111111
11.333333
def attachments_ping(request): """Heartbeat view for the attachments backend. :returns: ``True`` if succeeds to write and delete, ``False`` otherwise. """ # Do nothing if server is readonly. if asbool(request.registry.settings.get('readonly', False)): return True # We will fake a file upload, so pick a file extension that is allowed. extensions = request.attachment.extensions or {'json'} allowed_extension = "." + list(extensions)[-1] status = False try: content = cgi.FieldStorage() content.filename = HEARTBEAT_FILENAME + allowed_extension content.file = BytesIO(HEARTBEAT_CONTENT.encode('utf-8')) content.type = 'application/octet-stream' stored = utils.save_file(request, content, keep_link=False, replace=True) relative_location = stored['location'].replace(request.attachment.base_url, '') request.attachment.delete(relative_location) status = True except Exception as e: logger.exception(e) return status
[ "def", "attachments_ping", "(", "request", ")", ":", "# Do nothing if server is readonly.", "if", "asbool", "(", "request", ".", "registry", ".", "settings", ".", "get", "(", "'readonly'", ",", "False", ")", ")", ":", "return", "True", "# We will fake a file uploa...
36.428571
22.892857
def default_get_arg_names_from_class_name(class_name): """Converts normal class names into normal arg names. Normal class names are assumed to be CamelCase with an optional leading underscore. Normal arg names are assumed to be lower_with_underscores. Args: class_name: a class name, e.g., "FooBar" or "_FooBar" Returns: all likely corresponding arg names, e.g., ["foo_bar"] """ parts = [] rest = class_name if rest.startswith('_'): rest = rest[1:] while True: m = re.match(r'([A-Z][a-z]+)(.*)', rest) if m is None: break parts.append(m.group(1)) rest = m.group(2) if not parts: return [] return ['_'.join(part.lower() for part in parts)]
[ "def", "default_get_arg_names_from_class_name", "(", "class_name", ")", ":", "parts", "=", "[", "]", "rest", "=", "class_name", "if", "rest", ".", "startswith", "(", "'_'", ")", ":", "rest", "=", "rest", "[", "1", ":", "]", "while", "True", ":", "m", "...
30.75
20.5
def put(self, path=None, method='PUT', **options): """ Equals :meth:`route` with a ``PUT`` method parameter. """ return self.route(path, method, **options)
[ "def", "put", "(", "self", ",", "path", "=", "None", ",", "method", "=", "'PUT'", ",", "*", "*", "options", ")", ":", "return", "self", ".", "route", "(", "path", ",", "method", ",", "*", "*", "options", ")" ]
56.333333
6.666667
def _format_type(cls): """Format a type name for printing.""" if cls.__module__ == _BUILTIN_MODULE: return cls.__name__ else: return '%s.%s' % (cls.__module__, cls.__name__)
[ "def", "_format_type", "(", "cls", ")", ":", "if", "cls", ".", "__module__", "==", "_BUILTIN_MODULE", ":", "return", "cls", ".", "__name__", "else", ":", "return", "'%s.%s'", "%", "(", "cls", ".", "__module__", ",", "cls", ".", "__name__", ")" ]
32.666667
13
def taskotron_task_outcome(config, message, outcome=None): """ Particular taskotron task outcome With this rule, you can limit messages to only those of particular `taskotron <https://taskotron.fedoraproject.org/>`_ task outcome. You can specify several outcomes by separating them with a comma ',', i.e.: ``PASSED,FAILED``. The full list of supported outcomes can be found in the libtaskotron `documentation <https://docs.qadevel.cloud.fedoraproject.org/ libtaskotron/latest/resultyaml.html#minimal-version>`_. """ # We only operate on taskotron messages, first off. if not taskotron_result_new(config, message): return False if not outcome: return False outcomes = [item.strip().lower() for item in outcome.split(',')] return message['msg']['result'].get('outcome').lower() in outcomes
[ "def", "taskotron_task_outcome", "(", "config", ",", "message", ",", "outcome", "=", "None", ")", ":", "# We only operate on taskotron messages, first off.", "if", "not", "taskotron_result_new", "(", "config", ",", "message", ")", ":", "return", "False", "if", "not"...
36.652174
25.26087
def rating(self, **kwargs): """ This method lets users rate a movie. A valid session id or guest session id is required. Args: session_id: see Authentication. guest_session_id: see Authentication. value: Rating value. Returns: A dict representation of the JSON returned from the API. """ path = self._get_id_path('rating') payload = { 'value': kwargs.pop('value', None), } response = self._POST(path, kwargs, payload) self._set_attrs_to_values(response) return response
[ "def", "rating", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_id_path", "(", "'rating'", ")", "payload", "=", "{", "'value'", ":", "kwargs", ".", "pop", "(", "'value'", ",", "None", ")", ",", "}", "response", "=",...
27.636364
18.454545
async def _report_version(self): """ This is a private message handler method. This method reads the following 2 bytes after the report version command (0xF9 - non sysex). The first byte is the major number and the second byte is the minor number. :returns: None """ # get next two bytes major = await self.read() version_string = str(major) minor = await self.read() version_string += '.' version_string += str(minor) self.query_reply_data[PrivateConstants.REPORT_VERSION] = version_string
[ "async", "def", "_report_version", "(", "self", ")", ":", "# get next two bytes", "major", "=", "await", "self", ".", "read", "(", ")", "version_string", "=", "str", "(", "major", ")", "minor", "=", "await", "self", ".", "read", "(", ")", "version_string",...
35
14.411765
def thellier_interpreter_BS_pars_calc(self, Grade_As): ''' calcualte sample or site bootstrap paleointensities and statistics Grade_As={} ''' thellier_interpreter_pars = {} thellier_interpreter_pars['fail_criteria'] = [] thellier_interpreter_pars['pass_or_fail'] = 'pass' BOOTSTRAP_N = int(self.preferences['BOOTSTRAP_N']) Grade_A_samples_BS = {} if len(list(Grade_As.keys())) >= self.acceptance_criteria['sample_int_n']['value']: for specimen in list(Grade_As.keys()): if specimen not in list(Grade_A_samples_BS.keys()) and len(Grade_As[specimen]) > 0: Grade_A_samples_BS[specimen] = [] for B in Grade_As[specimen]: Grade_A_samples_BS[specimen].append(B) Grade_A_samples_BS[specimen].sort() specimen_int_max_slope_diff = max( Grade_A_samples_BS[specimen]) / min(Grade_A_samples_BS[specimen]) if specimen_int_max_slope_diff > self.acceptance_criteria['specimen_int_max_slope_diff']: self.thellier_interpreter_log.write( "-I- specimen %s Failed specimen_int_max_slope_diff\n" % specimen, Grade_A_samples_BS[specimen]) del Grade_A_samples_BS[specimen] if len(list(Grade_A_samples_BS.keys())) >= self.acceptance_criteria['sample_int_n']['value']: BS_means_collection = [] for i in range(BOOTSTRAP_N): B_BS = [] for j in range(len(list(Grade_A_samples_BS.keys()))): LIST = list(Grade_A_samples_BS.keys()) specimen = random.choice(LIST) if self.acceptance_criteria['interpreter_method']['value'] == 'bs': B = random.choice(Grade_A_samples_BS[specimen]) if self.acceptance_criteria['interpreter_method']['value'] == 'bs_par': B = random.uniform(min(Grade_A_samples_BS[specimen]), max( Grade_A_samples_BS[specimen])) B_BS.append(B) BS_means_collection.append(mean(B_BS)) BS_means = array(BS_means_collection) BS_means.sort() sample_median = median(BS_means) sample_std = std(BS_means, ddof=1) sample_68 = [BS_means[(0.16) * len(BS_means)], BS_means[(0.84) * len(BS_means)]] sample_95 = [BS_means[(0.025) * len(BS_means)], BS_means[(0.975) * len(BS_means)]] else: String = "-I- sample %s FAIL: not enough specimen int_n= %i < %i " % (sample, len( list(Grade_A_samples_BS.keys())), int(self.acceptance_criteria['sample_int_n']['value'])) # print String self.thellier_interpreter_log.write(String) thellier_interpreter_pars['bs_bedian'] = sample_median thellier_interpreter_pars['bs_std'] = sample_std thellier_interpreter_pars['bs_68'] = sample_68 thellier_interpreter_pars['bs_95'] = sample_95 thellier_interpreter_pars['bs_n'] = len( list(Grade_A_samples_BS.keys()))
[ "def", "thellier_interpreter_BS_pars_calc", "(", "self", ",", "Grade_As", ")", ":", "thellier_interpreter_pars", "=", "{", "}", "thellier_interpreter_pars", "[", "'fail_criteria'", "]", "=", "[", "]", "thellier_interpreter_pars", "[", "'pass_or_fail'", "]", "=", "'pas...
51.564516
24.048387
def command(cmd): """Execute command and raise an exception upon an error. >>> 'README' in command('ls') True >>> command('nonexistingcommand') #doctest: +ELLIPSIS Traceback (most recent call last): ... SdistCreationError """ status, out = commands.getstatusoutput(cmd) if status is not 0: logger.error("Something went wrong:") logger.error(out) raise SdistCreationError() return out
[ "def", "command", "(", "cmd", ")", ":", "status", ",", "out", "=", "commands", ".", "getstatusoutput", "(", "cmd", ")", "if", "status", "is", "not", "0", ":", "logger", ".", "error", "(", "\"Something went wrong:\"", ")", "logger", ".", "error", "(", "...
26.470588
16.529412
def deploy(self, config_file, region=None, profile_name=None): """ Deploy the configuration and Lambda functions of a Greengrass group to the Greengrass core contained in the group. :param config_file: config file of the group to deploy :param region: the region from which to deploy the group. :param profile_name: the name of the `awscli` profile to use. [default: None] """ config = GroupConfigFile(config_file=config_file) if config.is_fresh(): raise ValueError("Config not yet tracking a group. Cannot deploy.") if region is None: region = self._region gg_client = _get_gg_session(region=region, profile_name=profile_name) dep_req = gg_client.create_deployment( GroupId=config['group']['id'], GroupVersionId=config['group']['version'], DeploymentType="NewDeployment" ) print("Group deploy requested for deployment_id:{0}".format( dep_req['DeploymentId'], ))
[ "def", "deploy", "(", "self", ",", "config_file", ",", "region", "=", "None", ",", "profile_name", "=", "None", ")", ":", "config", "=", "GroupConfigFile", "(", "config_file", "=", "config_file", ")", "if", "config", ".", "is_fresh", "(", ")", ":", "rais...
38.666667
20.814815
def get_api_version(base_url, api_version=None, timeout=10, verify=True): """ Get the API version specified or resolve the latest version :return api version :rtype: float """ versions = available_api_versions(base_url, timeout, verify) newest_version = max([float(i) for i in versions]) if api_version is None: # Use latest api_version = newest_version else: if api_version not in versions: api_version = newest_version return api_version
[ "def", "get_api_version", "(", "base_url", ",", "api_version", "=", "None", ",", "timeout", "=", "10", ",", "verify", "=", "True", ")", ":", "versions", "=", "available_api_versions", "(", "base_url", ",", "timeout", ",", "verify", ")", "newest_version", "="...
29.588235
17.705882
def init(opts): ''' This function gets called when the proxy starts up. For login the protocol and port are cached. ''' log.debug('Initting esxcluster proxy module in process %s', os.getpid()) log.debug('Validating esxcluster proxy input') schema = EsxclusterProxySchema.serialize() log.trace('schema = %s', schema) proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) log.trace('proxy_conf = %s', proxy_conf) try: jsonschema.validate(proxy_conf, schema) except jsonschema.exceptions.ValidationError as exc: raise salt.exceptions.InvalidConfigError(exc) # Save mandatory fields in cache for key in ('vcenter', 'datacenter', 'cluster', 'mechanism'): DETAILS[key] = proxy_conf[key] # Additional validation if DETAILS['mechanism'] == 'userpass': if 'username' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'username\' key found in proxy config.') if 'passwords' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'passwords\' key found in proxy config.') for key in ('username', 'passwords'): DETAILS[key] = proxy_conf[key] else: if 'domain' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'domain\' key found in proxy config.') if 'principal' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'principal\' key found in proxy config.') for key in ('domain', 'principal'): DETAILS[key] = proxy_conf[key] # Save optional DETAILS['protocol'] = proxy_conf.get('protocol') DETAILS['port'] = proxy_conf.get('port') # Test connection if DETAILS['mechanism'] == 'userpass': # Get the correct login details log.debug('Retrieving credentials and testing vCenter connection for ' 'mehchanism \'userpass\'') try: username, password = find_credentials() DETAILS['password'] = password except salt.exceptions.SaltSystemExit as err: log.critical('Error: %s', err) return False return True
[ "def", "init", "(", "opts", ")", ":", "log", ".", "debug", "(", "'Initting esxcluster proxy module in process %s'", ",", "os", ".", "getpid", "(", ")", ")", "log", ".", "debug", "(", "'Validating esxcluster proxy input'", ")", "schema", "=", "EsxclusterProxySchema...
39.721311
15.557377
def format_request_email_templ(increq, template, **ctx): """Format the email message element for inclusion request notification. Formats the message according to the provided template file, using some default fields from 'increq' object as default context. Arbitrary context can be provided as keywords ('ctx'), and those will not be overwritten by the fields from 'increq' object. :param increq: Inclusion request object for which the request is made. :type increq: `invenio_communities.models.InclusionRequest` :param template: relative path to jinja template. :type template: str :param ctx: Optional extra context parameters passed to formatter. :type ctx: dict. :returns: Formatted message. :rtype: str """ # Add minimal information to the contex (without overwriting). curate_link = '{site_url}/communities/{id}/curate/'.format( site_url=current_app.config['THEME_SITEURL'], id=increq.community.id) min_ctx = dict( record=Record.get_record(increq.record.id), requester=increq.user, community=increq.community, curate_link=curate_link, ) for k, v in min_ctx.items(): if k not in ctx: ctx[k] = v msg_element = render_template_to_string(template, **ctx) return msg_element
[ "def", "format_request_email_templ", "(", "increq", ",", "template", ",", "*", "*", "ctx", ")", ":", "# Add minimal information to the contex (without overwriting).", "curate_link", "=", "'{site_url}/communities/{id}/curate/'", ".", "format", "(", "site_url", "=", "current_...
38.176471
20.352941
def view_dot_graph(graph, filename=None, view=False): """ View the given DOT source. If view is True, the image is rendered and viewed by the default application in the system. The file path of the output is returned. If view is False, a graphviz.Source object is returned. If view is False and the environment is in a IPython session, an IPython image object is returned and can be displayed inline in the notebook. This function requires the graphviz package. Args ---- - graph [str]: a DOT source code - filename [str]: optional. if given and view is True, this specifies the file path for the rendered output to write to. - view [bool]: if True, opens the rendered output file. """ # Optionally depends on graphviz package import graphviz as gv src = gv.Source(graph) if view: # Returns the output file path return src.render(filename, view=view) else: # Attempts to show the graph in IPython notebook try: __IPYTHON__ except NameError: return src else: import IPython.display as display format = 'svg' return display.SVG(data=src.pipe(format))
[ "def", "view_dot_graph", "(", "graph", ",", "filename", "=", "None", ",", "view", "=", "False", ")", ":", "# Optionally depends on graphviz package", "import", "graphviz", "as", "gv", "src", "=", "gv", ".", "Source", "(", "graph", ")", "if", "view", ":", "...
34.111111
21.722222
def data_ma(self): """ A 2D `~numpy.ma.MaskedArray` cutout image of the segment using the minimal bounding box. The mask is `True` for pixels outside of the source segment (i.e. neighboring segments within the rectangular cutout image are masked). """ mask = (self._segment_img[self.slices] != self.label) return np.ma.masked_array(self._segment_img[self.slices], mask=mask)
[ "def", "data_ma", "(", "self", ")", ":", "mask", "=", "(", "self", ".", "_segment_img", "[", "self", ".", "slices", "]", "!=", "self", ".", "label", ")", "return", "np", ".", "ma", ".", "masked_array", "(", "self", ".", "_segment_img", "[", "self", ...
36.416667
22.75
def mtime(self, fpath=None): """ Returns: str: strftime-formatted mtime (modification time) of fpath """ return dtformat( datetime.datetime.utcfromtimestamp( os.path.getmtime(fpath or self.fpath)))
[ "def", "mtime", "(", "self", ",", "fpath", "=", "None", ")", ":", "return", "dtformat", "(", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "fpath", "or", "self", ".", "fpath", ")", ")", ")" ]
32.75
13
def read_line(csv_contents, options, prop_indices, mol, ensemble_list=None): """ read csv line """ if not ensemble_list: score_field = options.score_field status_field = options.status_field active_label = options.active_label decoy_label = options.decoy_label # do the active/decoy labels have appropriate values? active_value_matcher = re.compile(active_label) decoy_value_matcher = re.compile(decoy_label) status_label_index = prop_indices[status_field] if not active_value_matcher.match(csv_contents[status_label_index]) and not decoy_value_matcher.match( csv_contents[status_label_index]): print("\n molecule lacks appropriate status label") return 1 # are the score field values defined? score_field_indices = [] if ensemble_list: queryList = ensemble_list else: queryList = [x for x in prop_indices.keys() if score_field in x] for query in queryList: score_field_indices.append(prop_indices[query]) for value in [csv_contents[x] for x in score_field_indices]: if value in ('', 'n/a', 'N/A', None): print("\n molecule lacks appropriate score field value") return 1 # loop over property values for label in prop_indices.keys(): # get property value value_index = prop_indices[label] value = csv_contents[value_index] # set corresponding molecule attribute if label in queryList: mol.SetProp(label, value, 'score') else: mol.SetProp(label, value) # return mol return mol
[ "def", "read_line", "(", "csv_contents", ",", "options", ",", "prop_indices", ",", "mol", ",", "ensemble_list", "=", "None", ")", ":", "if", "not", "ensemble_list", ":", "score_field", "=", "options", ".", "score_field", "status_field", "=", "options", ".", ...
32.916667
17.9375
def wvalue(wave, indep_var): r""" Return the dependent variable value at a given independent variable point. If the independent variable point is not in the independent variable vector the dependent variable value is obtained by linear interpolation :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param indep_var: Independent variable point for which the dependent variable is to be obtained :type indep_var: integer or float :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.wvalue :raises: * RuntimeError (Argument \`indep_var\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * ValueError (Argument \`indep_var\` is not in the independent variable vector range) .. [[[end]]] """ close_min = np.isclose(indep_var, wave._indep_vector[0], FP_RTOL, FP_ATOL) close_max = np.isclose(indep_var, wave._indep_vector[-1], FP_RTOL, FP_ATOL) pexdoc.exh.addex( ValueError, "Argument `indep_var` is not in the independent variable vector range", bool( ((indep_var < wave._indep_vector[0]) and (not close_min)) or ((indep_var > wave._indep_vector[-1]) and (not close_max)) ), ) if close_min: return wave._dep_vector[0] if close_max: return wave._dep_vector[-1] idx = np.searchsorted(wave._indep_vector, indep_var) xdelta = wave._indep_vector[idx] - wave._indep_vector[idx - 1] ydelta = wave._dep_vector[idx] - wave._dep_vector[idx - 1] slope = ydelta / float(xdelta) return wave._dep_vector[idx - 1] + slope * (indep_var - wave._indep_vector[idx - 1])
[ "def", "wvalue", "(", "wave", ",", "indep_var", ")", ":", "close_min", "=", "np", ".", "isclose", "(", "indep_var", ",", "wave", ".", "_indep_vector", "[", "0", "]", ",", "FP_RTOL", ",", "FP_ATOL", ")", "close_max", "=", "np", ".", "isclose", "(", "i...
36.040816
24.061224
def _setLearningMode(self, l4Learning = False, l2Learning=False): """ Sets the learning mode for L4 and L2. """ for column in self.L4Columns: column.setParameter("learn", 0, l4Learning) for column in self.L2Columns: column.setParameter("learningMode", 0, l2Learning)
[ "def", "_setLearningMode", "(", "self", ",", "l4Learning", "=", "False", ",", "l2Learning", "=", "False", ")", ":", "for", "column", "in", "self", ".", "L4Columns", ":", "column", ".", "setParameter", "(", "\"learn\"", ",", "0", ",", "l4Learning", ")", "...
36.375
8.125
def truncate_graph_bbox(G, north, south, east, west, truncate_by_edge=False, retain_all=False): """ Remove every node in graph that falls outside a bounding box. Needed because overpass returns entire ways that also include nodes outside the bbox if the way (that is, a way with a single OSM ID) has a node inside the bbox at some point. Parameters ---------- G : networkx multidigraph north : float northern latitude of bounding box south : float southern latitude of bounding box east : float eastern longitude of bounding box west : float western longitude of bounding box truncate_by_edge : bool if True retain node if it's outside bbox but at least one of node's neighbors are within bbox retain_all : bool if True, return the entire graph even if it is not connected Returns ------- networkx multidigraph """ start_time = time.time() G = G.copy() nodes_outside_bbox = [] for node, data in G.nodes(data=True): if data['y'] > north or data['y'] < south or data['x'] > east or data['x'] < west: # this node is outside the bounding box if not truncate_by_edge: # if we're not truncating by edge, add node to list of nodes # outside the bounding box nodes_outside_bbox.append(node) else: # if we're truncating by edge, see if any of node's neighbors # are within bounding box any_neighbors_in_bbox = False neighbors = list(G.successors(node)) + list(G.predecessors(node)) for neighbor in neighbors: x = G.nodes[neighbor]['x'] y = G.nodes[neighbor]['y'] if y < north and y > south and x < east and x > west: any_neighbors_in_bbox = True break # if none of its neighbors are within the bounding box, add node # to list of nodes outside the bounding box if not any_neighbors_in_bbox: nodes_outside_bbox.append(node) G.remove_nodes_from(nodes_outside_bbox) log('Truncated graph by bounding box in {:,.2f} seconds'.format(time.time()-start_time)) # remove any isolated nodes and retain only the largest component (if # retain_all is True) if not retain_all: G = remove_isolated_nodes(G) G = get_largest_component(G) return G
[ "def", "truncate_graph_bbox", "(", "G", ",", "north", ",", "south", ",", "east", ",", "west", ",", "truncate_by_edge", "=", "False", ",", "retain_all", "=", "False", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "G", "=", "G", ".", "cop...
36.676471
21.117647
def add_buttons(self, *args): """add_buttons(*args) The add_buttons() method adds several buttons to the Gtk.Dialog using the button data passed as arguments to the method. This method is the same as calling the Gtk.Dialog.add_button() repeatedly. The button data pairs - button text (or stock ID) and a response ID integer are passed individually. For example: .. code-block:: python dialog.add_buttons(Gtk.STOCK_OPEN, 42, "Close", Gtk.ResponseType.CLOSE) will add "Open" and "Close" buttons to dialog. """ def _button(b): while b: t, r = b[0:2] b = b[2:] yield t, r try: for text, response in _button(args): self.add_button(text, response) except (IndexError): raise TypeError('Must pass an even number of arguments')
[ "def", "add_buttons", "(", "self", ",", "*", "args", ")", ":", "def", "_button", "(", "b", ")", ":", "while", "b", ":", "t", ",", "r", "=", "b", "[", "0", ":", "2", "]", "b", "=", "b", "[", "2", ":", "]", "yield", "t", ",", "r", "try", ...
34.884615
22.884615
def pitch_hz_to_contour(annotation): '''Convert a pitch_hz annotation to a contour''' annotation.namespace = 'pitch_contour' data = annotation.pop_data() for obs in data: annotation.append(time=obs.time, duration=obs.duration, confidence=obs.confidence, value=dict(index=0, frequency=np.abs(obs.value), voiced=obs.value > 0)) return annotation
[ "def", "pitch_hz_to_contour", "(", "annotation", ")", ":", "annotation", ".", "namespace", "=", "'pitch_contour'", "data", "=", "annotation", ".", "pop_data", "(", ")", "for", "obs", "in", "data", ":", "annotation", ".", "append", "(", "time", "=", "obs", ...
40.583333
15.75
def target_socket(self, config): """ This method overrides :meth:`.WNetworkNativeTransport.target_socket` method. Do the same thing as basic method do, but also checks that the result address is IPv4 address. :param config: beacon configuration :return: WIPV4SocketInfo """ target = WNetworkNativeTransport.target_socket(self, config) if isinstance(target.address(), WIPV4Address) is False: raise ValueError('Invalid address for broadcast transport') return target
[ "def", "target_socket", "(", "self", ",", "config", ")", ":", "target", "=", "WNetworkNativeTransport", ".", "target_socket", "(", "self", ",", "config", ")", "if", "isinstance", "(", "target", ".", "address", "(", ")", ",", "WIPV4Address", ")", "is", "Fal...
43.090909
16.909091
def get_task_log(self, project, release_id, environment_id, release_deploy_phase_id, task_id, start_line=None, end_line=None, **kwargs): """GetTaskLog. [Preview API] Gets the task log of a release as a plain text file. :param str project: Project ID or project name :param int release_id: Id of the release. :param int environment_id: Id of release environment. :param int release_deploy_phase_id: Release deploy phase Id. :param int task_id: ReleaseTask Id for the log. :param long start_line: Starting line number for logs :param long end_line: Ending line number for logs :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if release_id is not None: route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') if environment_id is not None: route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int') if release_deploy_phase_id is not None: route_values['releaseDeployPhaseId'] = self._serialize.url('release_deploy_phase_id', release_deploy_phase_id, 'int') if task_id is not None: route_values['taskId'] = self._serialize.url('task_id', task_id, 'int') query_parameters = {} if start_line is not None: query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long') if end_line is not None: query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long') response = self._send(http_method='GET', location_id='17c91af7-09fd-4256-bff1-c24ee4f73bc0', version='5.1-preview.2', route_values=route_values, query_parameters=query_parameters, accept_media_type='text/plain') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback)
[ "def", "get_task_log", "(", "self", ",", "project", ",", "release_id", ",", "environment_id", ",", "release_deploy_phase_id", ",", "task_id", ",", "start_line", "=", "None", ",", "end_line", "=", "None", ",", "*", "*", "kwargs", ")", ":", "route_values", "="...
56.512821
24.487179
def add_api_stage_to_api_key(self, api_key, api_id, stage_name): """ Add api stage to Api key """ self.apigateway_client.update_api_key( apiKey=api_key, patchOperations=[ { 'op': 'add', 'path': '/stages', 'value': '{}/{}'.format(api_id, stage_name) } ] )
[ "def", "add_api_stage_to_api_key", "(", "self", ",", "api_key", ",", "api_id", ",", "stage_name", ")", ":", "self", ".", "apigateway_client", ".", "update_api_key", "(", "apiKey", "=", "api_key", ",", "patchOperations", "=", "[", "{", "'op'", ":", "'add'", "...
29.214286
14.214286
def check_block_spacing( self, first_block_type: LineType, second_block_type: LineType, error_message: str, ) -> typing.Generator[AAAError, None, None]: """ Checks there is a clear single line between ``first_block_type`` and ``second_block_type``. Note: Is tested via ``check_arrange_act_spacing()`` and ``check_act_assert_spacing()``. """ numbered_lines = list(enumerate(self)) first_block_lines = filter(lambda l: l[1] is first_block_type, numbered_lines) try: first_block_lineno = list(first_block_lines)[-1][0] except IndexError: # First block has no lines return second_block_lines = filter(lambda l: l[1] is second_block_type, numbered_lines) try: second_block_lineno = next(second_block_lines)[0] except StopIteration: # Second block has no lines return blank_lines = [ bl for bl in numbered_lines[first_block_lineno + 1:second_block_lineno] if bl[1] is LineType.blank_line ] if not blank_lines: # Point at line above second block yield AAAError( line_number=self.fn_offset + second_block_lineno - 1, offset=0, text=error_message.format('none'), ) return if len(blank_lines) > 1: # Too many blank lines - point at the first extra one, the 2nd yield AAAError( line_number=self.fn_offset + blank_lines[1][0], offset=0, text=error_message.format(len(blank_lines)), )
[ "def", "check_block_spacing", "(", "self", ",", "first_block_type", ":", "LineType", ",", "second_block_type", ":", "LineType", ",", "error_message", ":", "str", ",", ")", "->", "typing", ".", "Generator", "[", "AAAError", ",", "None", ",", "None", "]", ":",...
34.285714
21.22449
def RegisterMessage(self, message): """Registers the given message type in the local database. Args: message: a message.Message, to be registered. Returns: The provided message. """ desc = message.DESCRIPTOR self._symbols[desc.full_name] = message if desc.file.name not in self._symbols_by_file: self._symbols_by_file[desc.file.name] = {} self._symbols_by_file[desc.file.name][desc.full_name] = message self.pool.AddDescriptor(desc) return message
[ "def", "RegisterMessage", "(", "self", ",", "message", ")", ":", "desc", "=", "message", ".", "DESCRIPTOR", "self", ".", "_symbols", "[", "desc", ".", "full_name", "]", "=", "message", "if", "desc", ".", "file", ".", "name", "not", "in", "self", ".", ...
28.941176
17.470588
def count_comments_handler(sender, **kwargs): """ Update Entry.comment_count when a public comment was posted. """ comment = kwargs['comment'] if comment.is_public: entry = comment.content_object if isinstance(entry, Entry): entry.comment_count = F('comment_count') + 1 entry.save(update_fields=['comment_count'])
[ "def", "count_comments_handler", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "comment", "=", "kwargs", "[", "'comment'", "]", "if", "comment", ".", "is_public", ":", "entry", "=", "comment", ".", "content_object", "if", "isinstance", "(", "entry", ",...
36.4
9
def save_metadata(self, data_dir, feature_name=None): """See base class for details.""" filepath = _get_metadata_filepath(data_dir, feature_name) with tf.io.gfile.GFile(filepath, 'w') as f: json.dump({ 'shape': [-1 if d is None else d for d in self._shape], 'encoding_format': self._encoding_format, }, f, sort_keys=True)
[ "def", "save_metadata", "(", "self", ",", "data_dir", ",", "feature_name", "=", "None", ")", ":", "filepath", "=", "_get_metadata_filepath", "(", "data_dir", ",", "feature_name", ")", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "filepath", ",...
44.75
14.125
def _where_in_sub(self, column, query, boolean, negate=False): """ Add a where in with a sub select to the query :param column: The column :type column: str :param query: A QueryBuilder instance :type query: QueryBuilder :param boolean: The boolean operator :type boolean: str :param negate: Whether it is a not where in :param negate: bool :return: The current QueryBuilder instance :rtype: QueryBuilder """ if negate: type = 'not_in_sub' else: type = 'in_sub' self.wheres.append({ 'type': type, 'column': column, 'query': query, 'boolean': boolean }) self.merge_bindings(query) return self
[ "def", "_where_in_sub", "(", "self", ",", "column", ",", "query", ",", "boolean", ",", "negate", "=", "False", ")", ":", "if", "negate", ":", "type", "=", "'not_in_sub'", "else", ":", "type", "=", "'in_sub'", "self", ".", "wheres", ".", "append", "(", ...
23.235294
18.882353
def get_git_postversion(addon_dir): """ return the addon version number, with a developmental version increment if there were git commits in the addon_dir after the last version change. If the last change to the addon correspond to the version number in the manifest it is used as is for the python package version. Otherwise a counter is incremented for each commit and resulting version number has the following form: [8|9].0.x.y.z.1devN, N being the number of git commits since the version change. Note: we use .99.devN because: * pip ignores .postN by design (https://github.com/pypa/pip/issues/2872) * x.y.z.devN is anterior to x.y.z Note: we don't put the sha1 of the commit in the version number because this is not PEP 440 compliant and is therefore misinterpreted by pip. """ addon_dir = os.path.realpath(addon_dir) last_version = read_manifest(addon_dir).get('version', '0.0.0') last_version_parsed = parse_version(last_version) if not is_git_controlled(addon_dir): return last_version if get_git_uncommitted(addon_dir): uncommitted = True count = 1 else: uncommitted = False count = 0 last_sha = None git_root = get_git_root(addon_dir) for sha in git_log_iterator(addon_dir): try: manifest = read_manifest_from_sha(sha, addon_dir, git_root) except NoManifestFound: break version = manifest.get('version', '0.0.0') version_parsed = parse_version(version) if version_parsed != last_version_parsed: break if last_sha is None: last_sha = sha else: count += 1 if not count: return last_version if last_sha: return last_version + ".99.dev%s" % count if uncommitted: return last_version + ".dev1" # if everything is committed, the last commit # must have the same version as current, # so last_sha must be set and we'll never reach this branch return last_version
[ "def", "get_git_postversion", "(", "addon_dir", ")", ":", "addon_dir", "=", "os", ".", "path", ".", "realpath", "(", "addon_dir", ")", "last_version", "=", "read_manifest", "(", "addon_dir", ")", ".", "get", "(", "'version'", ",", "'0.0.0'", ")", "last_versi...
38.018868
18.056604
def rotate(self): """ Rotate the kill ring, then yank back the new top. Returns ------- A text string or None. """ self._index -= 1 if self._index >= 0: return self._ring[self._index] return None
[ "def", "rotate", "(", "self", ")", ":", "self", ".", "_index", "-=", "1", "if", "self", ".", "_index", ">=", "0", ":", "return", "self", ".", "_ring", "[", "self", ".", "_index", "]", "return", "None" ]
23.818182
15.818182
def multisorted(items, *keys): """Sort by multiple attributes. Args: items: An iterable series to be sorted. *keys: Key objects which extract key values from the items. The first key will be the most significant, and the last key the least significant. If no key functions are provided, the items will be sorted in ascending natural order. Returns: A list of items sorted according to keys. """ if len(keys) == 0: keys = [asc()] for key in reversed(keys): items = sorted(items, key=key.func, reverse=key.reverse) return items
[ "def", "multisorted", "(", "items", ",", "*", "keys", ")", ":", "if", "len", "(", "keys", ")", "==", "0", ":", "keys", "=", "[", "asc", "(", ")", "]", "for", "key", "in", "reversed", "(", "keys", ")", ":", "items", "=", "sorted", "(", "items", ...
34.722222
18.277778
def convertDict2Attrs(self, *args, **kwargs): """The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Group object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuGroup just created. .. todo:: pass a valid (perhaps default) urlfunc, and its corresponding id to entid to each MambuGroup, telling MambuStruct not to connect() by default. It's desirable to connect at any other further moment to refresh some element in the list. """ for n,c in enumerate(self.attrs): # ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE! try: params = self.params except AttributeError as aerr: params = {} kwargs.update(params) try: group = self.mambugroupclass(urlfunc=None, entid=None, *args, **kwargs) except AttributeError as ae: self.mambugroupclass = MambuGroup group = self.mambugroupclass(urlfunc=None, entid=None, *args, **kwargs) group.init(c, *args, **kwargs) self.attrs[n] = group
[ "def", "convertDict2Attrs", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "n", ",", "c", "in", "enumerate", "(", "self", ".", "attrs", ")", ":", "# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!", "try", ":",...
45.551724
19.965517
def tag(self, tokens): """Return a list of (token, tag) tuples for a given list of tokens.""" # Lazy load model first time we tag if not self.classes: self.load(self.model) prev, prev2 = self.START tags = [] for i, token in enumerate(tokens): tag = self.tagdict.get(token) if not tag: features = self._get_features(i, tokens, prev, prev2) tag = self.perceptron.predict(features) tags.append((token, tag)) prev2 = prev prev = tag return tags
[ "def", "tag", "(", "self", ",", "tokens", ")", ":", "# Lazy load model first time we tag", "if", "not", "self", ".", "classes", ":", "self", ".", "load", "(", "self", ".", "model", ")", "prev", ",", "prev2", "=", "self", ".", "START", "tags", "=", "[",...
36.5625
12.0625
def _debugGraph(self): """internal util to print out contents of graph""" print("Len of graph: ", len(self.rdflib_graph)) for x, y, z in self.rdflib_graph: print(x, y, z)
[ "def", "_debugGraph", "(", "self", ")", ":", "print", "(", "\"Len of graph: \"", ",", "len", "(", "self", ".", "rdflib_graph", ")", ")", "for", "x", ",", "y", ",", "z", "in", "self", ".", "rdflib_graph", ":", "print", "(", "x", ",", "y", ",", "z", ...
40.4
9.6
def compare(ver1='', oper='==', ver2='', cmp_func=None, ignore_epoch=False): ''' Compares two version numbers. Accepts a custom function to perform the cmp-style version comparison, otherwise uses version_cmp(). ''' cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,), '>=': (0, 1), '>': (1,)} if oper not in ('!=',) and oper not in cmp_map: log.error('Invalid operator \'%s\' for version comparison', oper) return False if cmp_func is None: cmp_func = version_cmp cmp_result = cmp_func(ver1, ver2, ignore_epoch=ignore_epoch) if cmp_result is None: return False # Check if integer/long if not isinstance(cmp_result, numbers.Integral): log.error('The version comparison function did not return an ' 'integer/long.') return False if oper == '!=': return cmp_result not in cmp_map['=='] else: # Gracefully handle cmp_result not in (-1, 0, 1). if cmp_result < -1: cmp_result = -1 elif cmp_result > 1: cmp_result = 1 return cmp_result in cmp_map[oper]
[ "def", "compare", "(", "ver1", "=", "''", ",", "oper", "=", "'=='", ",", "ver2", "=", "''", ",", "cmp_func", "=", "None", ",", "ignore_epoch", "=", "False", ")", ":", "cmp_map", "=", "{", "'<'", ":", "(", "-", "1", ",", ")", ",", "'<='", ":", ...
32.705882
21.470588
def from_data_frame(df, categorical=False, nb_classes=None): """Convert DataFrame back to pair of numpy arrays """ lp_rdd = df.rdd.map(lambda row: LabeledPoint(row.label, row.features)) features, labels = from_labeled_point(lp_rdd, categorical, nb_classes) return features, labels
[ "def", "from_data_frame", "(", "df", ",", "categorical", "=", "False", ",", "nb_classes", "=", "None", ")", ":", "lp_rdd", "=", "df", ".", "rdd", ".", "map", "(", "lambda", "row", ":", "LabeledPoint", "(", "row", ".", "label", ",", "row", ".", "featu...
49.166667
16.833333
def get_s3_multipart_chunk_size(filesize): """Returns the chunk size of the S3 multipart object, given a file's size.""" if filesize <= AWS_MAX_MULTIPART_COUNT * AWS_MIN_CHUNK_SIZE: return AWS_MIN_CHUNK_SIZE else: div = filesize // AWS_MAX_MULTIPART_COUNT if div * AWS_MAX_MULTIPART_COUNT < filesize: div += 1 return ((div + MiB - 1) // MiB) * MiB
[ "def", "get_s3_multipart_chunk_size", "(", "filesize", ")", ":", "if", "filesize", "<=", "AWS_MAX_MULTIPART_COUNT", "*", "AWS_MIN_CHUNK_SIZE", ":", "return", "AWS_MIN_CHUNK_SIZE", "else", ":", "div", "=", "filesize", "//", "AWS_MAX_MULTIPART_COUNT", "if", "div", "*", ...
43.888889
12.222222
def post(self, request, *args, **kwargs): """ Returns POST response. :param request: the request instance. :rtype: django.http.HttpResponse. """ form = None link_type = int(request.POST.get('link_type', 0)) if link_type == Link.LINK_TYPE_EMAIL: form = EmailLinkForm(**self.get_form_kwargs()) elif link_type == Link.LINK_TYPE_EXTERNAL: form = ExternalLinkForm(**self.get_form_kwargs()) if form: if form.is_valid(): return self.form_valid(form) else: return self.form_invalid(form) else: raise Http404()
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "form", "=", "None", "link_type", "=", "int", "(", "request", ".", "POST", ".", "get", "(", "'link_type'", ",", "0", ")", ")", "if", "link_type", "==...
30.545455
15
def delete(self, ids): """ Method to delete network-ipv4's by their ids :param ids: Identifiers of network-ipv4's :return: None """ url = build_uri_with_ids('api/v3/networkv4/%s/', ids) return super(ApiNetworkIPv4, self).delete(url)
[ "def", "delete", "(", "self", ",", "ids", ")", ":", "url", "=", "build_uri_with_ids", "(", "'api/v3/networkv4/%s/'", ",", "ids", ")", "return", "super", "(", "ApiNetworkIPv4", ",", "self", ")", ".", "delete", "(", "url", ")" ]
28.1
17.3
def use(self, middleware=None, path='/', method_mask=HTTPMethod.ALL): """ Use the middleware (a callable with parameters res, req, next) upon requests match the provided path. A None path matches every request. Returns the middleware so this method may be used as a decorator. Args: middleware (callable): A function with signature '(req, res)' to be called with every request which matches path. path (str or regex): Object used to test the requests path. If it matches, either by equality or a successful regex match, the middleware is called with the req/res pair. method_mask (Optional[HTTPMethod]): Filters requests by HTTP method. The HTTPMethod enum behaves as a bitmask, so multiple methods may be joined by `+` or `\|`, removed with `-`, or toggled with `^` (e.g. `HTTPMethod.GET + HTTPMethod.POST`, `HTTPMethod.ALL - HTTPMethod.DELETE`). Returns: Returns the provided middleware; a requirement for this method to be used as a decorator. """ # catch decorator pattern if middleware is None: return lambda mw: self.use(mw, path, method_mask) if hasattr(middleware, '__growler_router'): router = getattr(middleware, '__growler_router') if isinstance(router, (types.MethodType,)): router = router() self.add_router(path, router) elif isinstance(type(middleware), RouterMeta): router = middleware._RouterMeta__growler_router() self.add_router(path, router) elif hasattr(middleware, '__iter__'): for mw in middleware: self.use(mw, path, method_mask) else: log.info("{} Using {} on path {}", id(self), middleware, path) self.middleware.add(path=path, func=middleware, method_mask=method_mask) return middleware
[ "def", "use", "(", "self", ",", "middleware", "=", "None", ",", "path", "=", "'/'", ",", "method_mask", "=", "HTTPMethod", ".", "ALL", ")", ":", "# catch decorator pattern", "if", "middleware", "is", "None", ":", "return", "lambda", "mw", ":", "self", "....
42.959184
19.857143
def safeMakeDirs(path, mode=0o777): """ Creates the given directory path, as well as intermediate directories. Catches any I/O exception, returning False in that case; otherwise, True is returned. """ try: os.makedirs(path, mode) return True except OSError: return False
[ "def", "safeMakeDirs", "(", "path", ",", "mode", "=", "0o777", ")", ":", "try", ":", "os", ".", "makedirs", "(", "path", ",", "mode", ")", "return", "True", "except", "OSError", ":", "return", "False" ]
29.333333
18
def datetimes_to_durations( start_times, end_times, fill_date=datetime.today(), freq="D", dayfirst=False, na_values=None ): """ This is a very flexible function for transforming arrays of start_times and end_times to the proper format for lifelines: duration and event observation arrays. Parameters ---------- start_times: an array, Series or DataFrame iterable representing start times. These can be strings, or datetime objects. end_times: an array, Series or DataFrame iterable representing end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship. fill_date: datetime, optional (default=datetime.Today()) the date to use if end_times is a None or empty string. This corresponds to last date of observation. Anything after this date is also censored. freq: string, optional (default='D') the units of time to use. See Pandas 'freq'. Default 'D' for days. dayfirst: boolean, optional (default=False) convert assuming European-style dates, i.e. day/month/year. na_values : list, optional list of values to recognize as NA/NaN. Ex: ['', 'NaT'] Returns ------- T: numpy array array of floats representing the durations with time units given by freq. C: numpy array boolean array of event observations: 1 if death observed, 0 else. Examples -------- >>> from lifelines.utils import datetimes_to_durations >>> >>> start_dates = ['2015-01-01', '2015-04-01', '2014-04-05'] >>> end_dates = ['2016-02-02', None, '2014-05-06'] >>> >>> T, E = datetimes_to_durations(start_dates, end_dates, freq="D") >>> T # array([ 397., 1414., 31.]) >>> E # array([ True, False, True]) """ fill_date = pd.to_datetime(fill_date) freq_string = "timedelta64[%s]" % freq start_times = pd.Series(start_times).copy() end_times = pd.Series(end_times).copy() C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""])) end_times[~C] = fill_date start_times_ = pd.to_datetime(start_times, dayfirst=dayfirst) end_times_ = pd.to_datetime(end_times, dayfirst=dayfirst, errors="coerce") deaths_after_cutoff = end_times_ > fill_date C[deaths_after_cutoff] = False T = (end_times_ - start_times_).values.astype(freq_string).astype(float) if (T < 0).sum(): warnings.warn("Warning: some values of start_times are after end_times") return T, C.values
[ "def", "datetimes_to_durations", "(", "start_times", ",", "end_times", ",", "fill_date", "=", "datetime", ".", "today", "(", ")", ",", "freq", "=", "\"D\"", ",", "dayfirst", "=", "False", ",", "na_values", "=", "None", ")", ":", "fill_date", "=", "pd", "...
42.067797
25.932203
def validate_schema(instance, schema, test_required=True, data_location=None, skip_missing_data=False): """Check if DictField values are consistent with our data types. Perform basic JSON schema validation and our custom validations: * check that required fields are given (if `test_required` is set to ``True``) * check if ``basic:file:`` and ``list:basic:file`` fields match regex given in schema (only if ``validate_regex`` is defined in schema for coresponding fields) and exists (only if ``data_location`` is given) * check if directories referenced in ``basic:dir:`` and ``list:basic:dir``fields exist (only if ``data_location`` is given) * check that referenced ``Data`` objects (in ``data:<data_type>`` and ``list:data:<data_type>`` fields) exists and are of type ``<data_type>`` * check that referenced ``Storage`` objects (in ``basic:json`` fields) exists :param list instance: Instance to be validated :param list schema: Schema for validation :param bool test_required: Flag for testing if all required fields are present. It is usefule if validation is run before ``Data`` object is finished and there are some field stil missing (default: ``False``) :param :class:`~resolwe.flow.models.data.DataLocation` data_location: data location used for checking if files and directories exist (default: ``None``) :param bool skip_missing_data: Don't raise an error if referenced ``Data`` object does not exist :rtype: None :raises ValidationError: if ``instance`` doesn't match schema defined in ``schema`` """ from .storage import Storage # Prevent circular import. path_prefix = None if data_location: path_prefix = data_location.get_path() def validate_refs(field): """Validate reference paths.""" for ref_filename in field.get('refs', []): ref_path = os.path.join(path_prefix, ref_filename) if not os.path.exists(ref_path): raise ValidationError("Path referenced in `refs` ({}) does not exist.".format(ref_path)) if not (os.path.isfile(ref_path) or os.path.isdir(ref_path)): raise ValidationError( "Path referenced in `refs` ({}) is neither a file or directory.".format(ref_path)) def validate_file(field, regex): """Validate file name (and check that it exists).""" filename = field['file'] if regex and not re.search(regex, filename): raise ValidationError( "File name {} does not match regex {}".format(filename, regex)) if path_prefix: path = os.path.join(path_prefix, filename) if not os.path.exists(path): raise ValidationError("Referenced path ({}) does not exist.".format(path)) if not os.path.isfile(path): raise ValidationError("Referenced path ({}) is not a file.".format(path)) validate_refs(field) def validate_dir(field): """Check that dirs and referenced files exists.""" dirname = field['dir'] if path_prefix: path = os.path.join(path_prefix, dirname) if not os.path.exists(path): raise ValidationError("Referenced path ({}) does not exist.".format(path)) if not os.path.isdir(path): raise ValidationError("Referenced path ({}) is not a directory.".format(path)) validate_refs(field) def validate_data(data_pk, type_): """Check that `Data` objects exist and is of right type.""" from .data import Data # prevent circular import data_qs = Data.objects.filter(pk=data_pk).values('process__type') if not data_qs.exists(): if skip_missing_data: return raise ValidationError( "Referenced `Data` object does not exist (id:{})".format(data_pk)) data = data_qs.first() if not data['process__type'].startswith(type_): raise ValidationError( "Data object of type `{}` is required, but type `{}` is given. " "(id:{})".format(type_, data['process__type'], data_pk)) def validate_range(value, interval, name): """Check that given value is inside the specified range.""" if not interval: return if value < interval[0] or value > interval[1]: raise ValidationError( "Value of field '{}' is out of range. It should be between {} and {}.".format( name, interval[0], interval[1] ) ) is_dirty = False dirty_fields = [] for _schema, _fields, _ in iterate_schema(instance, schema): name = _schema['name'] is_required = _schema.get('required', True) if test_required and is_required and name not in _fields: is_dirty = True dirty_fields.append(name) if name in _fields: field = _fields[name] type_ = _schema.get('type', "") # Treat None as if the field is missing. if not is_required and field is None: continue try: jsonschema.validate([{"type": type_, "value": field}], TYPE_SCHEMA) except jsonschema.exceptions.ValidationError as ex: raise ValidationError(ex.message) choices = [choice['value'] for choice in _schema.get('choices', [])] allow_custom_choice = _schema.get('allow_custom_choice', False) if choices and not allow_custom_choice and field not in choices: raise ValidationError( "Value of field '{}' must match one of predefined choices. " "Current value: {}".format(name, field) ) if type_ == 'basic:file:': validate_file(field, _schema.get('validate_regex')) elif type_ == 'list:basic:file:': for obj in field: validate_file(obj, _schema.get('validate_regex')) elif type_ == 'basic:dir:': validate_dir(field) elif type_ == 'list:basic:dir:': for obj in field: validate_dir(obj) elif type_ == 'basic:json:' and not Storage.objects.filter(pk=field).exists(): raise ValidationError( "Referenced `Storage` object does not exist (id:{})".format(field)) elif type_.startswith('data:'): validate_data(field, type_) elif type_.startswith('list:data:'): for data_id in field: validate_data(data_id, type_[5:]) # remove `list:` from type elif type_ == 'basic:integer:' or type_ == 'basic:decimal:': validate_range(field, _schema.get('range'), name) elif type_ == 'list:basic:integer:' or type_ == 'list:basic:decimal:': for obj in field: validate_range(obj, _schema.get('range'), name) try: # Check that schema definitions exist for all fields for _, _ in iterate_fields(instance, schema): pass except KeyError as ex: raise ValidationError(str(ex)) if is_dirty: dirty_fields = ['"{}"'.format(field) for field in dirty_fields] raise DirtyError("Required fields {} not given.".format(', '.join(dirty_fields)))
[ "def", "validate_schema", "(", "instance", ",", "schema", ",", "test_required", "=", "True", ",", "data_location", "=", "None", ",", "skip_missing_data", "=", "False", ")", ":", "from", ".", "storage", "import", "Storage", "# Prevent circular import.", "path_prefi...
40.254054
22.740541
def set_shaders(self, vert, frag): """ This function takes care of setting the shading code and compiling+linking it into a working program object that is ready to use. """ self._linked = False # Create temporary shader objects vert_handle = gl.glCreateShader(gl.GL_VERTEX_SHADER) frag_handle = gl.glCreateShader(gl.GL_FRAGMENT_SHADER) # For both vertex and fragment shader: set source, compile, check for code, handle, type_ in [(vert, vert_handle, 'vertex'), (frag, frag_handle, 'fragment')]: gl.glShaderSource(handle, code) gl.glCompileShader(handle) status = gl.glGetShaderParameter(handle, gl.GL_COMPILE_STATUS) if not status: errors = gl.glGetShaderInfoLog(handle) errormsg = self._get_error(code, errors, 4) raise RuntimeError("Shader compilation error in %s:\n%s" % (type_ + ' shader', errormsg)) # Attach shaders gl.glAttachShader(self._handle, vert_handle) gl.glAttachShader(self._handle, frag_handle) # Link the program and check gl.glLinkProgram(self._handle) if not gl.glGetProgramParameter(self._handle, gl.GL_LINK_STATUS): raise RuntimeError('Program linking error:\n%s' % gl.glGetProgramInfoLog(self._handle)) # Now we can remove the shaders. We no longer need them and it # frees up precious GPU memory: # http://gamedev.stackexchange.com/questions/47910 gl.glDetachShader(self._handle, vert_handle) gl.glDetachShader(self._handle, frag_handle) gl.glDeleteShader(vert_handle) gl.glDeleteShader(frag_handle) # Now we know what variables will be used by the program self._unset_variables = self._get_active_attributes_and_uniforms() self._handles = {} self._known_invalid = set() self._linked = True
[ "def", "set_shaders", "(", "self", ",", "vert", ",", "frag", ")", ":", "self", ".", "_linked", "=", "False", "# Create temporary shader objects", "vert_handle", "=", "gl", ".", "glCreateShader", "(", "gl", ".", "GL_VERTEX_SHADER", ")", "frag_handle", "=", "gl"...
50.225
16.15
def deduplicate(s, ch): """ From http://stackoverflow.com/q/42216559/610569 s = 'this is an irritating string with random spacing .' deduplicate(s) 'this is an irritating string with random spacing .' """ return ch.join([substring for substring in s.strip().split(ch) if substring])
[ "def", "deduplicate", "(", "s", ",", "ch", ")", ":", "return", "ch", ".", "join", "(", "[", "substring", "for", "substring", "in", "s", ".", "strip", "(", ")", ".", "split", "(", "ch", ")", "if", "substring", "]", ")" ]
35.666667
19.666667
def get_loss(modality_type, value=None): """Gets default loss transformation; if none available, return value.""" if modality_type in (ModalityType.AUDIO, ModalityType.AUDIO_SPECTRAL, ModalityType.CLASS_LABEL, ModalityType.IDENTITY, ModalityType.IDENTITY_SYMBOL, ModalityType.IMAGE, ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, ModalityType.IMAGE_CHANNEL_COMPRESS, ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM, ModalityType.REAL, ModalityType.SPEECH_RECOGNITION, ModalityType.SYMBOL, ModalityType.SYMBOL_WEIGHTS_ALL): return generic_loss elif modality_type == ModalityType.CTC_SYMBOL: return ctc_symbol_loss elif modality_type == ModalityType.GENERIC_L2_LOSS: return generic_l2_loss elif modality_type == ModalityType.MULTI_LABEL: return multi_label_loss elif modality_type in (ModalityType.ONE_HOT_CLASS_LABEL, ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL, ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL, ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL): return one_hot_class_label_loss elif modality_type == ModalityType.REAL_L2_LOSS: return real_l2_loss elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS: return real_log_poisson_loss elif modality_type == ModalityType.SIGMOID_CLASS_LABEL: return sigmoid_class_label_loss elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL: return sigmoid_max_pooling_class_label_loss elif modality_type == ModalityType.SYMBOL_ONE_HOT: return symbol_one_hot_loss elif modality_type in (ModalityType.VIDEO, ModalityType.VIDEO_BITWISE, ModalityType.VIDEO_PIXEL_NOISE): return video_loss elif modality_type == ModalityType.VIDEO_IDENTITY: return video_identity_loss elif modality_type == ModalityType.VIDEO_L1: return video_l1_loss elif modality_type == ModalityType.VIDEO_L1_RAW: return video_l1_raw_loss elif modality_type == ModalityType.VIDEO_L2: return video_l2_loss elif modality_type == ModalityType.VIDEO_L2_RAW: return video_l2_raw_loss return value
[ "def", "get_loss", "(", "modality_type", ",", "value", "=", "None", ")", ":", "if", "modality_type", "in", "(", "ModalityType", ".", "AUDIO", ",", "ModalityType", ".", "AUDIO_SPECTRAL", ",", "ModalityType", ".", "CLASS_LABEL", ",", "ModalityType", ".", "IDENTI...
45.461538
13.038462
def _set_ndb_cache_policy(): """Tell NDB to never cache anything in memcache or in-process. This ensures that entities fetched from Datastore input_readers via NDB will not bloat up the request memory size and Datastore Puts will avoid doing calls to memcache. Without this you get soft memory limit exits, which hurts overall throughput. """ ndb_ctx = ndb.get_context() ndb_ctx.set_cache_policy(lambda key: False) ndb_ctx.set_memcache_policy(lambda key: False)
[ "def", "_set_ndb_cache_policy", "(", ")", ":", "ndb_ctx", "=", "ndb", ".", "get_context", "(", ")", "ndb_ctx", ".", "set_cache_policy", "(", "lambda", "key", ":", "False", ")", "ndb_ctx", ".", "set_memcache_policy", "(", "lambda", "key", ":", "False", ")" ]
42.727273
16.454545
def rotation_matrix(phi, theta, psi): """Retourne la matrice de rotation décrite par les angles d'Euler donnés en paramètres""" return np.dot(Rz_matrix(phi), np.dot(Rx_matrix(theta), Rz_matrix(psi)))
[ "def", "rotation_matrix", "(", "phi", ",", "theta", ",", "psi", ")", ":", "return", "np", ".", "dot", "(", "Rz_matrix", "(", "phi", ")", ",", "np", ".", "dot", "(", "Rx_matrix", "(", "theta", ")", ",", "Rz_matrix", "(", "psi", ")", ")", ")" ]
68.333333
12.666667
def parse_declarations(self, declarations): """ parse a css declaration list """ declarations = self.declaration_re.findall(declarations) return dict(declarations)
[ "def", "parse_declarations", "(", "self", ",", "declarations", ")", ":", "declarations", "=", "self", ".", "declaration_re", ".", "findall", "(", "declarations", ")", "return", "dict", "(", "declarations", ")" ]
33
6.333333
def like_user(self, user_id, amount=None, filtration=True): """ Likes last user_id's medias """ if filtration: if not self.check_user(user_id): return False self.logger.info("Liking user_%s's feed:" % user_id) user_id = self.convert_to_user_id(user_id) medias = self.get_user_medias(user_id, filtration=filtration) if not medias: self.logger.info( "None medias received: account is closed or medias have been filtered.") return False return self.like_medias(medias[:amount])
[ "def", "like_user", "(", "self", ",", "user_id", ",", "amount", "=", "None", ",", "filtration", "=", "True", ")", ":", "if", "filtration", ":", "if", "not", "self", ".", "check_user", "(", "user_id", ")", ":", "return", "False", "self", ".", "logger", ...
41.384615
16.076923
def draw(self, mode, selection): """ Draw program in given mode, with given selection (IndexBuffer or first, count). """ if not self._linked: raise RuntimeError('Cannot draw program if code has not been set') # Init gl.check_error('Check before draw') mode = as_enum(mode) # Draw if len(selection) == 3: # Selection based on indices id_, gtype, count = selection if count: self._pre_draw() ibuf = self._parser.get_object(id_) ibuf.activate() gl.glDrawElements(mode, count, as_enum(gtype), None) ibuf.deactivate() else: # Selection based on start and count first, count = selection if count: self._pre_draw() gl.glDrawArrays(mode, first, count) # Wrap up gl.check_error('Check after draw') self._post_draw()
[ "def", "draw", "(", "self", ",", "mode", ",", "selection", ")", ":", "if", "not", "self", ".", "_linked", ":", "raise", "RuntimeError", "(", "'Cannot draw program if code has not been set'", ")", "# Init", "gl", ".", "check_error", "(", "'Check before draw'", ")...
34.964286
12.571429
def returns_true_or_raises(f): """A safety net. Decorator for functions that are only allowed to return True or raise an exception. Args: f: A function whose only expected return value is True. Returns: A wrapped functions whose guaranteed only return value is True. """ @functools.wraps(f) def wrapped(*args, **kwargs): ret = f(*args, **kwargs) if ret is not True: raise RuntimeError("Unexpected return value %r" % ret) return True return wrapped
[ "def", "returns_true_or_raises", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "r...
27.368421
21.736842
def _sm_stop_from_pain(self, *args, **kwargs): """ Stop chaos while there is a blockade event in progress """ _logger.info("Stopping chaos for blockade %s" % self._blockade_name) self._do_reset_all()
[ "def", "_sm_stop_from_pain", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_logger", ".", "info", "(", "\"Stopping chaos for blockade %s\"", "%", "self", ".", "_blockade_name", ")", "self", ".", "_do_reset_all", "(", ")" ]
39
12.666667
def update_local_reference(self, index, ref): """Replace the reference in the reference list and cache it.""" self._references[index] = ref self.cache()
[ "def", "update_local_reference", "(", "self", ",", "index", ",", "ref", ")", ":", "self", ".", "_references", "[", "index", "]", "=", "ref", "self", ".", "cache", "(", ")" ]
43.25
7
def associa_equipamento(self, id_equip, id_grupo_equipamento): """Associa um equipamento a um grupo. :param id_equip: Identificador do equipamento. :param id_grupo_equipamento: Identificador do grupo de equipamento. :return: Dicionário com a seguinte estrutura: {'equipamento_grupo':{'id': < id_equip_do_grupo >}} :raise GrupoEquipamentoNaoExisteError: Grupo de equipamento não cadastrado. :raise InvalidParameterError: O identificador do equipamento e/ou do grupo são nulos ou inválidos. :raise EquipamentoNaoExisteError: Equipamento não cadastrado. :raise EquipamentoError: Equipamento já está associado ao grupo. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta. """ equip_map = dict() equip_map['id_grupo'] = id_grupo_equipamento equip_map['id_equipamento'] = id_equip code, xml = self.submit( {'equipamento_grupo': equip_map}, 'POST', 'equipamentogrupo/associa/') return self.response(code, xml)
[ "def", "associa_equipamento", "(", "self", ",", "id_equip", ",", "id_grupo_equipamento", ")", ":", "equip_map", "=", "dict", "(", ")", "equip_map", "[", "'id_grupo'", "]", "=", "id_grupo_equipamento", "equip_map", "[", "'id_equipamento'", "]", "=", "id_equip", "...
45.88
27.88
def parse_line(self, line): """Parse VCF header ``line`` (trailing '\r\n' or '\n' is ignored) :param str line: ``str`` with line to parse :param dict sub_parsers: ``dict`` mapping header line types to appropriate parser objects :returns: appropriate :py:class:`HeaderLine` parsed from ``line`` :raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` if there was a problem parsing the file """ if not line or not line.startswith("##"): raise exceptions.InvalidHeaderException( 'Invalid VCF header line (must start with "##") {}'.format(line) ) if "=" not in line: raise exceptions.InvalidHeaderException( 'Invalid VCF header line (must contain "=") {}'.format(line) ) line = line[len("##") :].rstrip() # trim '^##' and trailing whitespace # split key/value pair at "=" key, value = split_mapping(line) sub_parser = self.sub_parsers.get(key, self.sub_parsers["__default__"]) return sub_parser.parse_key_value(key, value)
[ "def", "parse_line", "(", "self", ",", "line", ")", ":", "if", "not", "line", "or", "not", "line", ".", "startswith", "(", "\"##\"", ")", ":", "raise", "exceptions", ".", "InvalidHeaderException", "(", "'Invalid VCF header line (must start with \"##\") {}'", ".", ...
48.347826
19.043478
def msg(self, *args, **kwargs): "Only execute callback when interval is reached." if self.timestamp is None or self._interval_reached(): self.callback(*args, **kwargs) self.reset()
[ "def", "msg", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "timestamp", "is", "None", "or", "self", ".", "_interval_reached", "(", ")", ":", "self", ".", "callback", "(", "*", "args", ",", "*", "*", "kwargs...
43.2
13.2
def _create_default_config_file(self): """ If config file does not exists create and set default values. """ logger.info('Initialize Maya launcher, creating config file...\n') self.add_section(self.DEFAULTS) self.add_section(self.PATTERNS) self.add_section(self.ENVIRONMENTS) self.add_section(self.EXECUTABLES) self.set(self.DEFAULTS, 'executable', None) self.set(self.DEFAULTS, 'environment', None) self.set(self.PATTERNS, 'exclude', ', '.join(self.EXLUDE_PATTERNS)) self.set(self.PATTERNS, 'icon_ext', ', '.join(self.ICON_EXTENSIONS)) self.config_file.parent.mkdir(exist_ok=True) self.config_file.touch() with self.config_file.open('wb') as f: self.write(f) # If this function is run inform the user that a new file has been # created. sys.exit('Maya launcher has successfully created config file at:\n' ' "{}"'.format(str(self.config_file)))
[ "def", "_create_default_config_file", "(", "self", ")", ":", "logger", ".", "info", "(", "'Initialize Maya launcher, creating config file...\\n'", ")", "self", ".", "add_section", "(", "self", ".", "DEFAULTS", ")", "self", ".", "add_section", "(", "self", ".", "PA...
44.304348
17.26087
def parse_block_scalar_indent(token_class): """Process indentation spaces in a block scalar.""" def callback(lexer, match, context): text = match.group() if context.block_scalar_indent is None: if len(text) <= max(context.indent, 0): context.stack.pop() context.stack.pop() return context.block_scalar_indent = len(text) else: if len(text) < context.block_scalar_indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback
[ "def", "parse_block_scalar_indent", "(", "token_class", ")", ":", "def", "callback", "(", "lexer", ",", "match", ",", "context", ")", ":", "text", "=", "match", ".", "group", "(", ")", "if", "context", ".", "block_scalar_indent", "is", "None", ":", "if", ...
40.052632
9.578947
def save(self, path, group=None, ifo='P1'): """ Save frequency series to a Numpy .npy, hdf, or text file. The first column contains the sample frequencies, the second contains the values. In the case of a complex frequency series saved as text, the imaginary part is written as a third column. When using hdf format, the data is stored as a single vector, along with relevant attributes. Parameters ---------- path: string Destination file path. Must end with either .hdf, .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt. """ ext = _os.path.splitext(path)[1] if ext == '.npy': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T _numpy.save(path, output) elif ext == '.txt': if self.kind == 'real': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T elif self.kind == 'complex': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy().real, self.numpy().imag)).T _numpy.savetxt(path, output) elif ext == '.xml' or path.endswith('.xml.gz'): from pycbc.io.live import make_psd_xmldoc from glue.ligolw import utils if self.kind != 'real': raise ValueError('XML only supports real frequency series') output = self.lal() # When writing in this format we must *not* have the 0 values at # frequencies less than flow. To resolve this we set the first # non-zero value < flow. data_lal = output.data.data first_idx = _numpy.argmax(data_lal>0) if not first_idx == 0: data_lal[:first_idx] = data_lal[first_idx] psddict = {ifo: output} utils.write_filename(make_psd_xmldoc(psddict), path, gz=path.endswith(".gz")) elif ext =='.hdf': key = 'data' if group is None else group f = h5py.File(path) ds = f.create_dataset(key, data=self.numpy(), compression='gzip', compression_opts=9, shuffle=True) ds.attrs['epoch'] = float(self.epoch) ds.attrs['delta_f'] = float(self.delta_f) else: raise ValueError('Path must end with .npy, .txt, .xml, .xml.gz ' 'or .hdf')
[ "def", "save", "(", "self", ",", "path", ",", "group", "=", "None", ",", "ifo", "=", "'P1'", ")", ":", "ext", "=", "_os", ".", "path", ".", "splitext", "(", "path", ")", "[", "1", "]", "if", "ext", "==", "'.npy'", ":", "output", "=", "_numpy", ...
43.546875
19.046875
def input(name, default=None, foreach=None): """Decorator to declare input for a node. Plain inputs, that is plain python objects, are directly passed to the node. Whereas streams generated by other nodes are requested and once the handles of all input streams are available the node is instantiated. Args: name (str): Name of the node function argument the input will be passed to. default: An optional default value for the input. This can be any python object or another node. foreach (bool): This parameter is currently not supported and only for internal usage. Returns: The original function decorated with this input specification. A function is turned into a node by the :func:`node` decorator. """ assert default is None or foreach is None value = foreach if foreach is not None else default value = StreamSpec(value) if isinstance(value, Node) else value foreach = foreach is not None spec = InputSpec(name, value, foreach) def deco(func): """Add {!r} to function.""".format(spec) specs = func.__dict__.setdefault('__marv_input_specs__', OrderedDict()) if spec.name in specs: raise InputNameCollision(spec.name) specs[spec.name] = spec return func return deco
[ "def", "input", "(", "name", ",", "default", "=", "None", ",", "foreach", "=", "None", ")", ":", "assert", "default", "is", "None", "or", "foreach", "is", "None", "value", "=", "foreach", "if", "foreach", "is", "not", "None", "else", "default", "value"...
38.142857
19.285714
def sndwrite_like(samples:np.ndarray, likefile:str, outfile:str) -> None: """ Write samples to outfile with samplerate and encoding taken from likefile """ info = sndinfo(likefile) sndwrite(samples, info.samplerate, outfile, encoding=info.encoding)
[ "def", "sndwrite_like", "(", "samples", ":", "np", ".", "ndarray", ",", "likefile", ":", "str", ",", "outfile", ":", "str", ")", "->", "None", ":", "info", "=", "sndinfo", "(", "likefile", ")", "sndwrite", "(", "samples", ",", "info", ".", "samplerate"...
38
15.714286
def _unicode(self): '''This returns a printable representation of the screen as a unicode string (which, under Python 3.x, is the same as 'str'). The end of each screen line is terminated by a newline.''' return u'\n'.join ([ u''.join(c) for c in self.w ])
[ "def", "_unicode", "(", "self", ")", ":", "return", "u'\\n'", ".", "join", "(", "[", "u''", ".", "join", "(", "c", ")", "for", "c", "in", "self", ".", "w", "]", ")" ]
47.333333
27.666667
def convert(self, line=None, is_end=True): """Read the line content and return the converted value :param line: the line to feed to converter :param is_end: if set to True, will raise an error if the line has something remaining. """ if line is not None: self.line = line if not self.line: raise TomlDecodeError(self.parser.lineno, 'EOF is hit!') token = None self.line = self.line.lstrip() for key, pattern in self.patterns: m = pattern.match(self.line) if m: self.line = self.line[m.end():] handler = getattr(self, 'convert_%s' % key) token = handler(m) break else: raise TomlDecodeError(self.parser.lineno, 'Parsing error: %r' % self.line) if is_end and not BLANK_RE.match(self.line): raise TomlDecodeError(self.parser.lineno, 'Something is remained: %r' % self.line) return token
[ "def", "convert", "(", "self", ",", "line", "=", "None", ",", "is_end", "=", "True", ")", ":", "if", "line", "is", "not", "None", ":", "self", ".", "line", "=", "line", "if", "not", "self", ".", "line", ":", "raise", "TomlDecodeError", "(", "self",...
39.25
13.464286
def maybeDeferred(f, *args, **kw): """ Copied from twsited.internet.defer and add a check to detect fibers. """ try: result = f(*args, **kw) except Exception: return fail(failure.Failure()) if IFiber.providedBy(result): import traceback frames = traceback.extract_stack() msg = "%s returned a fiber instead of a deferred" % (f, ) if len(frames) > 1: msg += "; called from %s" % (frames[-2], ) raise RuntimeError(msg) if isinstance(result, Deferred): return result elif isinstance(result, failure.Failure): return fail(result) else: return succeed(result)
[ "def", "maybeDeferred", "(", "f", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "try", ":", "result", "=", "f", "(", "*", "args", ",", "*", "*", "kw", ")", "except", "Exception", ":", "return", "fail", "(", "failure", ".", "Failure", "(", ")...
28.782609
15.130435
def fcoe_fcoe_fabric_map_fcoe_fcf_map_fcf_map_fif_rbid_fcf_map_fif_rbid_add(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe") fcoe_fabric_map = ET.SubElement(fcoe, "fcoe-fabric-map") fcoe_fabric_map_name_key = ET.SubElement(fcoe_fabric_map, "fcoe-fabric-map-name") fcoe_fabric_map_name_key.text = kwargs.pop('fcoe_fabric_map_name') fcoe_fcf_map = ET.SubElement(fcoe_fabric_map, "fcoe-fcf-map") fcf_map_name_key = ET.SubElement(fcoe_fcf_map, "fcf-map-name") fcf_map_name_key.text = kwargs.pop('fcf_map_name') fcf_map_fif_rbid = ET.SubElement(fcoe_fcf_map, "fcf-map-fif-rbid") fcf_map_fif_rbid_add = ET.SubElement(fcf_map_fif_rbid, "fcf-map-fif-rbid-add") fcf_map_fif_rbid_add.text = kwargs.pop('fcf_map_fif_rbid_add') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_fcoe_fabric_map_fcoe_fcf_map_fcf_map_fif_rbid_fcf_map_fif_rbid_add", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe", "=", "ET", ".", "SubElement", "(", "config", ",", "\"fcoe\"", "...
58.764706
27.176471
def sign_message(message, wif, hashfn=hashlib.sha256): """ Sign a digest with a wif key :param str wif: Private key in """ if not isinstance(message, bytes): message = bytes(message, "utf-8") digest = hashfn(message).digest() priv_key = PrivateKey(wif) p = bytes(priv_key) if SECP256K1_MODULE == "secp256k1": ndata = secp256k1.ffi.new("const int *ndata") ndata[0] = 0 while True: ndata[0] += 1 privkey = secp256k1.PrivateKey(p, raw=True) sig = secp256k1.ffi.new("secp256k1_ecdsa_recoverable_signature *") signed = secp256k1.lib.secp256k1_ecdsa_sign_recoverable( privkey.ctx, sig, digest, privkey.private_key, secp256k1.ffi.NULL, ndata ) if not signed == 1: # pragma: no cover raise AssertionError() signature, i = privkey.ecdsa_recoverable_serialize(sig) if _is_canonical(signature): i += 4 # compressed i += 27 # compact break elif SECP256K1_MODULE == "cryptography": cnt = 0 private_key = ec.derive_private_key( int(repr(priv_key), 16), ec.SECP256K1(), default_backend() ) public_key = private_key.public_key() while True: cnt += 1 if not cnt % 20: # pragma: no cover log.info( "Still searching for a canonical signature. Tried %d times already!" % cnt ) order = ecdsa.SECP256k1.order # signer = private_key.signer(ec.ECDSA(hashes.SHA256())) # signer.update(message) # sigder = signer.finalize() sigder = private_key.sign(message, ec.ECDSA(hashes.SHA256())) r, s = decode_dss_signature(sigder) signature = ecdsa.util.sigencode_string(r, s, order) # Make sure signature is canonical! # sigder = bytearray(sigder) lenR = sigder[3] lenS = sigder[5 + lenR] if lenR is 32 and lenS is 32: # Derive the recovery parameter # i = recoverPubkeyParameter(message, digest, signature, public_key) i += 4 # compressed i += 27 # compact break else: # pragma: no branch # pragma: no cover cnt = 0 sk = ecdsa.SigningKey.from_string(p, curve=ecdsa.SECP256k1) while 1: cnt += 1 if not cnt % 20: # pragma: no branch log.info( "Still searching for a canonical signature. Tried %d times already!" % cnt ) # Deterministic k # k = ecdsa.rfc6979.generate_k( sk.curve.generator.order(), sk.privkey.secret_multiplier, hashlib.sha256, hashlib.sha256( digest + struct.pack( "d", time.time() ) # use the local time to randomize the signature ).digest(), ) # Sign message # sigder = sk.sign_digest(digest, sigencode=ecdsa.util.sigencode_der, k=k) # Reformating of signature # r, s = ecdsa.util.sigdecode_der(sigder, sk.curve.generator.order()) signature = ecdsa.util.sigencode_string(r, s, sk.curve.generator.order()) # Make sure signature is canonical! # sigder = bytearray(sigder) lenR = sigder[3] lenS = sigder[5 + lenR] if lenR is 32 and lenS is 32: # Derive the recovery parameter # i = recoverPubkeyParameter( message, digest, signature, sk.get_verifying_key() ) i += 4 # compressed i += 27 # compact break # pack signature # sigstr = struct.pack("<B", i) sigstr += signature return sigstr
[ "def", "sign_message", "(", "message", ",", "wif", ",", "hashfn", "=", "hashlib", ".", "sha256", ")", ":", "if", "not", "isinstance", "(", "message", ",", "bytes", ")", ":", "message", "=", "bytes", "(", "message", ",", "\"utf-8\"", ")", "digest", "=",...
34.726496
17.811966
def main(): """Run playbook""" for flag in ('--check',): if flag not in sys.argv: sys.argv.append(flag) obj = PlaybookCLI(sys.argv) obj.parse() obj.run()
[ "def", "main", "(", ")", ":", "for", "flag", "in", "(", "'--check'", ",", ")", ":", "if", "flag", "not", "in", "sys", ".", "argv", ":", "sys", ".", "argv", ".", "append", "(", "flag", ")", "obj", "=", "PlaybookCLI", "(", "sys", ".", "argv", ")"...
23.25
14.5
def set(self, channel, state): """Set DAC value and enable output""" checked_val = self._check_dac_val(channel, state) self._dac_enabled = 0x40 self.i2c.write8(self._dac_enabled, checked_val * 255)
[ "def", "set", "(", "self", ",", "channel", ",", "state", ")", ":", "checked_val", "=", "self", ".", "_check_dac_val", "(", "channel", ",", "state", ")", "self", ".", "_dac_enabled", "=", "0x40", "self", ".", "i2c", ".", "write8", "(", "self", ".", "_...
45
11.2
def OnClose(self, event): """Program exit event handler""" # If changes have taken place save of old grid if undo.stack().haschanged(): save_choice = self.interfaces.get_save_request_from_user() if save_choice is None: # Cancelled close operation return elif save_choice: # User wants to save content post_command_event(self.main_window, self.main_window.SaveMsg) # Save the AUI state config["window_layout"] = repr(self.main_window._mgr.SavePerspective()) # Uninit the AUI stuff self.main_window._mgr.UnInit() # Save config config.save() # Close main_window self.main_window.Destroy() # Set file mode to 600 to protect GPG passwd a bit sp = wx.StandardPaths.Get() pyspreadrc_path = sp.GetUserConfigDir() + "/." + config.config_filename try: os.chmod(pyspreadrc_path, 0600) except OSError: dummyfile = open(pyspreadrc_path, "w") dummyfile.close() os.chmod(pyspreadrc_path, 0600)
[ "def", "OnClose", "(", "self", ",", "event", ")", ":", "# If changes have taken place save of old grid", "if", "undo", ".", "stack", "(", ")", ".", "haschanged", "(", ")", ":", "save_choice", "=", "self", ".", "interfaces", ".", "get_save_request_from_user", "("...
28.175
21.875
def get_brokers(self): """ Parses the KAKFA_URL and returns a list of hostname:port pairs in the format that kafka-python expects. """ return ['{}:{}'.format(parsedUrl.hostname, parsedUrl.port) for parsedUrl in [urlparse(url) for url in self.kafka_url.split(',')]]
[ "def", "get_brokers", "(", "self", ")", ":", "return", "[", "'{}:{}'", ".", "format", "(", "parsedUrl", ".", "hostname", ",", "parsedUrl", ".", "port", ")", "for", "parsedUrl", "in", "[", "urlparse", "(", "url", ")", "for", "url", "in", "self", ".", ...
44.857143
20
def getFontName(self, names, default="helvetica"): """ Name of a font """ # print names, self.fontList if type(names) is not ListType: if type(names) not in six.string_types: names = str(names) names = names.strip().split(",") for name in names: if type(name) not in six.string_types: name = str(name) font = self.fontList.get(name.strip().lower(), None) if font is not None: return font return self.fontList.get(default, None)
[ "def", "getFontName", "(", "self", ",", "names", ",", "default", "=", "\"helvetica\"", ")", ":", "# print names, self.fontList", "if", "type", "(", "names", ")", "is", "not", "ListType", ":", "if", "type", "(", "names", ")", "not", "in", "six", ".", "str...
36
8.625
def construct_result_generator_middleware(result_generators): """ Constructs a middleware which intercepts requests for any method found in the provided mapping of endpoints to generator functions, returning whatever response the generator function returns. Callbacks must be functions with the signature `fn(method, params)`. """ def result_generator_middleware(make_request, web3): def middleware(method, params): if method in result_generators: result = result_generators[method](method, params) return {'result': result} else: return make_request(method, params) return middleware return result_generator_middleware
[ "def", "construct_result_generator_middleware", "(", "result_generators", ")", ":", "def", "result_generator_middleware", "(", "make_request", ",", "web3", ")", ":", "def", "middleware", "(", "method", ",", "params", ")", ":", "if", "method", "in", "result_generator...
45.3125
14.5625
def uniq(seq): """ Return a copy of seq without duplicates. """ seen = set() return [x for x in seq if str(x) not in seen and not seen.add(str(x))]
[ "def", "uniq", "(", "seq", ")", ":", "seen", "=", "set", "(", ")", "return", "[", "x", "for", "x", "in", "seq", "if", "str", "(", "x", ")", "not", "in", "seen", "and", "not", "seen", ".", "add", "(", "str", "(", "x", ")", ")", "]" ]
39.75
20.5
def replication_state(self, repl_id): """ Retrieves the state for the given replication. Possible values are ``triggered``, ``completed``, ``error``, and ``None`` (meaning not yet triggered). :param str repl_id: Replication id used to identify the replication to inspect. :returns: Replication state as a ``str`` """ if "scheduler" in self.client.features(): try: repl_doc = Scheduler(self.client).get_doc(repl_id) except HTTPError as err: raise CloudantReplicatorException(err.response.status_code, repl_id) state = repl_doc['state'] else: try: repl_doc = self.database[repl_id] except KeyError: raise CloudantReplicatorException(404, repl_id) repl_doc.fetch() state = repl_doc.get('_replication_state') return state
[ "def", "replication_state", "(", "self", ",", "repl_id", ")", ":", "if", "\"scheduler\"", "in", "self", ".", "client", ".", "features", "(", ")", ":", "try", ":", "repl_doc", "=", "Scheduler", "(", "self", ".", "client", ")", ".", "get_doc", "(", "repl...
37.4
19.72
def deserialize(cls, cls_target, obj_raw): """ :type cls_target: T|type :type obj_raw: int|str|bool|float|list|dict|None :rtype: T """ cls._initialize() deserializer = cls._get_deserializer(cls_target) if deserializer == cls: return cls._deserialize_default(cls_target, obj_raw) else: return deserializer.deserialize(cls_target, obj_raw)
[ "def", "deserialize", "(", "cls", ",", "cls_target", ",", "obj_raw", ")", ":", "cls", ".", "_initialize", "(", ")", "deserializer", "=", "cls", ".", "_get_deserializer", "(", "cls_target", ")", "if", "deserializer", "==", "cls", ":", "return", "cls", ".", ...
28.133333
18.933333
def _getPrototypes(indices, overlaps, topNumber=1): """ Given a compressed overlap array and a set of indices specifying a subset of those in that array, return the set of topNumber indices of vectors that have maximum average overlap with other vectors in `indices`. @param indices (arraylike) Array of indices for which to get prototypes. @param overlaps (numpy.ndarray) Condensed array of overlaps of the form returned by _computeOverlaps(). @param topNumber (int) The number of prototypes to return. Optional, defaults to 1. @returns (numpy.ndarray) Array of indices of prototypes """ # find the number of data points based on the length of the overlap array # solves for n: len(overlaps) = n(n-1)/2 n = numpy.roots([1, -1, -2 * len(overlaps)]).max() k = len(indices) indices = numpy.array(indices, dtype=int) rowIdxs = numpy.ndarray((k, k-1), dtype=int) colIdxs = numpy.ndarray((k, k-1), dtype=int) for i in xrange(k): rowIdxs[i, :] = indices[i] colIdxs[i, :i] = indices[:i] colIdxs[i, i:] = indices[i+1:] idx = HierarchicalClustering._condensedIndex(rowIdxs, colIdxs, n) subsampledOverlaps = overlaps[idx] meanSubsampledOverlaps = subsampledOverlaps.mean(1) biggestOverlapSubsetIdxs = numpy.argsort( -meanSubsampledOverlaps)[:topNumber] return indices[biggestOverlapSubsetIdxs]
[ "def", "_getPrototypes", "(", "indices", ",", "overlaps", ",", "topNumber", "=", "1", ")", ":", "# find the number of data points based on the length of the overlap array", "# solves for n: len(overlaps) = n(n-1)/2", "n", "=", "numpy", ".", "roots", "(", "[", "1", ",", ...
36.394737
21.078947
def _construct_category(glyph_name, data): """Derive (sub)category of a glyph name.""" # Glyphs creates glyphs that start with an underscore as "non-exportable" glyphs or # construction helpers without a category. if glyph_name.startswith("_"): return None, None # Glyph variants (e.g. "fi.alt") don't have their own entry, so we strip e.g. the # ".alt" and try a second lookup with just the base name. A variant is hopefully in # the same category as its base glyph. base_name = glyph_name.split(".", 1)[0] base_attribute = data.names.get(base_name) or {} if base_attribute: category = base_attribute.get("category") sub_category = base_attribute.get("subCategory") return category, sub_category # Detect ligatures. if "_" in base_name: base_names = base_name.split("_") base_names_attributes = [_lookup_attributes(name, data) for name in base_names] first_attribute = base_names_attributes[0] # If the first part is a Mark, Glyphs 2.6 declares the entire glyph a Mark if first_attribute.get("category") == "Mark": category = first_attribute.get("category") sub_category = first_attribute.get("subCategory") return category, sub_category # If the first part is a Letter... if first_attribute.get("category") == "Letter": # ... and the rest are only marks or separators or don't exist, the # sub_category is that of the first part ... if all( a.get("category") in (None, "Mark", "Separator") for a in base_names_attributes[1:] ): category = first_attribute.get("category") sub_category = first_attribute.get("subCategory") return category, sub_category # ... otherwise, a ligature. category = first_attribute.get("category") sub_category = "Ligature" return category, sub_category # TODO: Cover more cases. E.g. "one_one" -> ("Number", "Ligature") but # "one_onee" -> ("Number", "Composition"). # Still nothing? Maybe we're looking at something like "uni1234.alt", try # using fontTools' AGL module to convert the base name to something meaningful. # Corner case: when looking at ligatures, names that don't exist in the AGLFN # are skipped, so len("acutecomb_o") == 2 but len("dotaccentcomb_o") == 1. character = fontTools.agl.toUnicode(base_name) if character: category, sub_category = _translate_category( glyph_name, unicodedata.category(character[0]) ) return category, sub_category return None, None
[ "def", "_construct_category", "(", "glyph_name", ",", "data", ")", ":", "# Glyphs creates glyphs that start with an underscore as \"non-exportable\" glyphs or", "# construction helpers without a category.", "if", "glyph_name", ".", "startswith", "(", "\"_\"", ")", ":", "return", ...
44.683333
20.6
def save(self): """ If an id exists in the database, we assume we'll update it, and if not then we'll insert it. This could be a problem with creating your own id's on new objects, however luckily, we keep track of if this is a new object through a private _new variable, and use that to determine if we insert or update. """ if not self._new: data = self._data.copy() ID = data.pop(self.primaryKey) reply = r.table(self.table).get(ID) \ .update(data, durability=self.durability, non_atomic=self.non_atomic) \ .run(self._conn) else: reply = r.table(self.table) \ .insert(self._data, durability=self.durability, upsert=self.upsert) \ .run(self._conn) self._new = False if "generated_keys" in reply and reply["generated_keys"]: self._data[self.primaryKey] = reply["generated_keys"][0] if "errors" in reply and reply["errors"] > 0: raise Exception("Could not insert entry: %s" % reply["first_error"]) return True
[ "def", "save", "(", "self", ")", ":", "if", "not", "self", ".", "_new", ":", "data", "=", "self", ".", "_data", ".", "copy", "(", ")", "ID", "=", "data", ".", "pop", "(", "self", ".", "primaryKey", ")", "reply", "=", "r", ".", "table", "(", "...
37.848485
18.393939
def getTextualNode(self, subreference=None): """ Retrieve a passage and store it in the object :param subreference: CtsReference of the passage (Note : if given a list, this should be a list of string that \ compose the reference) :type subreference: Union[CtsReference, URN, str, list] :rtype: CtsPassage :returns: Object representing the passage :raises: *TypeError* when reference is not a list or a CtsReference """ if isinstance(subreference, URN): urn = str(subreference) elif isinstance(subreference, CtsReference): urn = "{0}:{1}".format(self.urn, str(subreference)) elif isinstance(subreference, str): if ":" in subreference: urn = subreference else: urn = "{0}:{1}".format(self.urn.upTo(URN.NO_PASSAGE), subreference) elif isinstance(subreference, list): urn = "{0}:{1}".format(self.urn, ".".join(subreference)) else: urn = str(self.urn) response = xmlparser(self.retriever.getPassage(urn=urn)) self._parse_request(response.xpath("//ti:request", namespaces=XPATH_NAMESPACES)[0]) return CtsPassage(urn=urn, resource=response, retriever=self.retriever)
[ "def", "getTextualNode", "(", "self", ",", "subreference", "=", "None", ")", ":", "if", "isinstance", "(", "subreference", ",", "URN", ")", ":", "urn", "=", "str", "(", "subreference", ")", "elif", "isinstance", "(", "subreference", ",", "CtsReference", ")...
45.285714
21.357143
def is_permanent_redirect(self): """True if this Response one of the permanent versions of redirect.""" return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
[ "def", "is_permanent_redirect", "(", "self", ")", ":", "return", "(", "'location'", "in", "self", ".", "headers", "and", "self", ".", "status_code", "in", "(", "codes", ".", "moved_permanently", ",", "codes", ".", "permanent_redirect", ")", ")" ]
76.333333
29