text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def parse_filespec(fspec, sep=':', gpat='*'): """ Parse given filespec `fspec` and return [(filetype, filepath)]. Because anyconfig.load should find correct file's type to load by the file extension, this function will not try guessing file's type if not file type is specified explicitly. :param fspec: filespec :param sep: a char separating filetype and filepath in filespec :param gpat: a char for glob pattern >>> parse_filespec("base.json") [('base.json', None)] >>> parse_filespec("json:base.json") [('base.json', 'json')] >>> parse_filespec("yaml:foo.yaml") [('foo.yaml', 'yaml')] >>> parse_filespec("yaml:foo.dat") [('foo.dat', 'yaml')] TODO: Allow '*' (glob pattern) in filepath when escaped with '\\', etc. # >>> parse_filespec("yaml:bar/*.conf") # [('bar/a.conf', 'yaml'), ('bar/b.conf', 'yaml')] """ if sep in fspec: tpl = (ftype, fpath) = tuple(fspec.split(sep)) else: tpl = (ftype, fpath) = (None, fspec) return [(fs, ftype) for fs in sorted(glob.glob(fpath))] \ if gpat in fspec else [flip(tpl)]
[ "def", "parse_filespec", "(", "fspec", ",", "sep", "=", "':'", ",", "gpat", "=", "'*'", ")", ":", "if", "sep", "in", "fspec", ":", "tpl", "=", "(", "ftype", ",", "fpath", ")", "=", "tuple", "(", "fspec", ".", "split", "(", "sep", ")", ")", "else", ":", "tpl", "=", "(", "ftype", ",", "fpath", ")", "=", "(", "None", ",", "fspec", ")", "return", "[", "(", "fs", ",", "ftype", ")", "for", "fs", "in", "sorted", "(", "glob", ".", "glob", "(", "fpath", ")", ")", "]", "if", "gpat", "in", "fspec", "else", "[", "flip", "(", "tpl", ")", "]" ]
34.375
17.8125
def _round(self): """ This is the environment implementation of :meth:`BaseAnchor.round`. Subclasses may override this method. """ self.x = normalizers.normalizeRounding(self.x) self.y = normalizers.normalizeRounding(self.y)
[ "def", "_round", "(", "self", ")", ":", "self", ".", "x", "=", "normalizers", ".", "normalizeRounding", "(", "self", ".", "x", ")", "self", ".", "y", "=", "normalizers", ".", "normalizeRounding", "(", "self", ".", "y", ")" ]
30.333333
12.333333
def stop_capture(self, adapter_number): """ Stops a packet capture. :param adapter_number: adapter number """ try: adapter = self._ethernet_adapters[adapter_number] except KeyError: raise DockerError("Adapter {adapter_number} doesn't exist on Docker VM '{name}'".format(name=self.name, adapter_number=adapter_number)) nio = adapter.get_nio(0) if not nio: raise DockerError("Adapter {} is not connected".format(adapter_number)) nio.stopPacketCapture() if self.status == "started" and self.ubridge: yield from self._stop_ubridge_capture(adapter_number) log.info("Docker VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number))
[ "def", "stop_capture", "(", "self", ",", "adapter_number", ")", ":", "try", ":", "adapter", "=", "self", ".", "_ethernet_adapters", "[", "adapter_number", "]", "except", "KeyError", ":", "raise", "DockerError", "(", "\"Adapter {adapter_number} doesn't exist on Docker VM '{name}'\"", ".", "format", "(", "name", "=", "self", ".", "name", ",", "adapter_number", "=", "adapter_number", ")", ")", "nio", "=", "adapter", ".", "get_nio", "(", "0", ")", "if", "not", "nio", ":", "raise", "DockerError", "(", "\"Adapter {} is not connected\"", ".", "format", "(", "adapter_number", ")", ")", "nio", ".", "stopPacketCapture", "(", ")", "if", "self", ".", "status", "==", "\"started\"", "and", "self", ".", "ubridge", ":", "yield", "from", "self", ".", "_stop_ubridge_capture", "(", "adapter_number", ")", "log", ".", "info", "(", "\"Docker VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}\"", ".", "format", "(", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "adapter_number", "=", "adapter_number", ")", ")" ]
43.653846
34.5
def choose_best_mask(self): """This method returns the index of the "best" mask as defined by having the lowest total penalty score. The penalty rules are defined by the standard. The mask with the lowest total score should be the easiest to read by optical scanners. """ self.scores = [] for n in range(len(self.masks)): self.scores.append([0,0,0,0]) #Score penalty rule number 1 #Look for five consecutive squares with the same color. #Each one found gets a penalty of 3 + 1 for every #same color square after the first five in the row. for (n, mask) in enumerate(self.masks): current = mask[0][0] counter = 0 total = 0 #Examine the mask row wise for row in range(0,len(mask)): counter = 0 for col in range(0,len(mask)): bit = mask[row][col] if bit == current: counter += 1 else: if counter >= 5: total += (counter - 5) + 3 counter = 1 current = bit if counter >= 5: total += (counter - 5) + 3 #Examine the mask column wise for col in range(0,len(mask)): counter = 0 for row in range(0,len(mask)): bit = mask[row][col] if bit == current: counter += 1 else: if counter >= 5: total += (counter - 5) + 3 counter = 1 current = bit if counter >= 5: total += (counter - 5) + 3 self.scores[n][0] = total #Score penalty rule 2 #This rule will add 3 to the score for each 2x2 block of the same #colored pixels there are. for (n, mask) in enumerate(self.masks): count = 0 #Don't examine the 0th and Nth row/column for i in range(0, len(mask)-1): for j in range(0, len(mask)-1): if mask[i][j] == mask[i+1][j] and \ mask[i][j] == mask[i][j+1] and \ mask[i][j] == mask[i+1][j+1]: count += 1 self.scores[n][1] = count * 3 #Score penalty rule 3 #This rule looks for 1011101 within the mask prefixed #and/or suffixed by four zeros. patterns = [[0,0,0,0,1,0,1,1,1,0,1], [1,0,1,1,1,0,1,0,0,0,0],] #[0,0,0,0,1,0,1,1,1,0,1,0,0,0,0]] for (n, mask) in enumerate(self.masks): nmatches = 0 for i in range(len(mask)): for j in range(len(mask)): for pattern in patterns: match = True k = j #Look for row matches for p in pattern: if k >= len(mask) or mask[i][k] != p: match = False break k += 1 if match: nmatches += 1 match = True k = j #Look for column matches for p in pattern: if k >= len(mask) or mask[k][i] != p: match = False break k += 1 if match: nmatches += 1 self.scores[n][2] = nmatches * 40 #Score the last rule, penalty rule 4. This rule measures how close #the pattern is to being 50% black. The further it deviates from #this this ideal the higher the penalty. for (n, mask) in enumerate(self.masks): nblack = 0 for row in mask: nblack += sum(row) total_pixels = len(mask)**2 ratio = nblack / total_pixels percent = (ratio * 100) - 50 self.scores[n][3] = int((abs(int(percent)) / 5) * 10) #Calculate the total for each score totals = [0] * len(self.scores) for i in range(len(self.scores)): for j in range(len(self.scores[i])): totals[i] += self.scores[i][j] #DEBUG CODE!!! #Prints out a table of scores #print('Rule Scores\n 1 2 3 4 Total') #for i in range(len(self.scores)): # print(i, end='') # for s in self.scores[i]: # print('{0: >6}'.format(s), end='') # print('{0: >7}'.format(totals[i])) #print('Mask Chosen: {0}'.format(totals.index(min(totals)))) #The lowest total wins return totals.index(min(totals))
[ "def", "choose_best_mask", "(", "self", ")", ":", "self", ".", "scores", "=", "[", "]", "for", "n", "in", "range", "(", "len", "(", "self", ".", "masks", ")", ")", ":", "self", ".", "scores", ".", "append", "(", "[", "0", ",", "0", ",", "0", ",", "0", "]", ")", "#Score penalty rule number 1", "#Look for five consecutive squares with the same color.", "#Each one found gets a penalty of 3 + 1 for every", "#same color square after the first five in the row.", "for", "(", "n", ",", "mask", ")", "in", "enumerate", "(", "self", ".", "masks", ")", ":", "current", "=", "mask", "[", "0", "]", "[", "0", "]", "counter", "=", "0", "total", "=", "0", "#Examine the mask row wise", "for", "row", "in", "range", "(", "0", ",", "len", "(", "mask", ")", ")", ":", "counter", "=", "0", "for", "col", "in", "range", "(", "0", ",", "len", "(", "mask", ")", ")", ":", "bit", "=", "mask", "[", "row", "]", "[", "col", "]", "if", "bit", "==", "current", ":", "counter", "+=", "1", "else", ":", "if", "counter", ">=", "5", ":", "total", "+=", "(", "counter", "-", "5", ")", "+", "3", "counter", "=", "1", "current", "=", "bit", "if", "counter", ">=", "5", ":", "total", "+=", "(", "counter", "-", "5", ")", "+", "3", "#Examine the mask column wise", "for", "col", "in", "range", "(", "0", ",", "len", "(", "mask", ")", ")", ":", "counter", "=", "0", "for", "row", "in", "range", "(", "0", ",", "len", "(", "mask", ")", ")", ":", "bit", "=", "mask", "[", "row", "]", "[", "col", "]", "if", "bit", "==", "current", ":", "counter", "+=", "1", "else", ":", "if", "counter", ">=", "5", ":", "total", "+=", "(", "counter", "-", "5", ")", "+", "3", "counter", "=", "1", "current", "=", "bit", "if", "counter", ">=", "5", ":", "total", "+=", "(", "counter", "-", "5", ")", "+", "3", "self", ".", "scores", "[", "n", "]", "[", "0", "]", "=", "total", "#Score penalty rule 2", "#This rule will add 3 to the score for each 2x2 block of the same", "#colored pixels there are.", "for", "(", "n", ",", "mask", ")", "in", "enumerate", "(", "self", ".", "masks", ")", ":", "count", "=", "0", "#Don't examine the 0th and Nth row/column", "for", "i", "in", "range", "(", "0", ",", "len", "(", "mask", ")", "-", "1", ")", ":", "for", "j", "in", "range", "(", "0", ",", "len", "(", "mask", ")", "-", "1", ")", ":", "if", "mask", "[", "i", "]", "[", "j", "]", "==", "mask", "[", "i", "+", "1", "]", "[", "j", "]", "and", "mask", "[", "i", "]", "[", "j", "]", "==", "mask", "[", "i", "]", "[", "j", "+", "1", "]", "and", "mask", "[", "i", "]", "[", "j", "]", "==", "mask", "[", "i", "+", "1", "]", "[", "j", "+", "1", "]", ":", "count", "+=", "1", "self", ".", "scores", "[", "n", "]", "[", "1", "]", "=", "count", "*", "3", "#Score penalty rule 3", "#This rule looks for 1011101 within the mask prefixed", "#and/or suffixed by four zeros.", "patterns", "=", "[", "[", "0", ",", "0", ",", "0", ",", "0", ",", "1", ",", "0", ",", "1", ",", "1", ",", "1", ",", "0", ",", "1", "]", ",", "[", "1", ",", "0", ",", "1", ",", "1", ",", "1", ",", "0", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", "]", ",", "]", "#[0,0,0,0,1,0,1,1,1,0,1,0,0,0,0]]", "for", "(", "n", ",", "mask", ")", "in", "enumerate", "(", "self", ".", "masks", ")", ":", "nmatches", "=", "0", "for", "i", "in", "range", "(", "len", "(", "mask", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "mask", ")", ")", ":", "for", "pattern", "in", "patterns", ":", "match", "=", "True", "k", "=", "j", "#Look for row matches", "for", "p", "in", "pattern", ":", "if", "k", ">=", "len", "(", "mask", ")", "or", "mask", "[", "i", "]", "[", "k", "]", "!=", "p", ":", "match", "=", "False", "break", "k", "+=", "1", "if", "match", ":", "nmatches", "+=", "1", "match", "=", "True", "k", "=", "j", "#Look for column matches", "for", "p", "in", "pattern", ":", "if", "k", ">=", "len", "(", "mask", ")", "or", "mask", "[", "k", "]", "[", "i", "]", "!=", "p", ":", "match", "=", "False", "break", "k", "+=", "1", "if", "match", ":", "nmatches", "+=", "1", "self", ".", "scores", "[", "n", "]", "[", "2", "]", "=", "nmatches", "*", "40", "#Score the last rule, penalty rule 4. This rule measures how close", "#the pattern is to being 50% black. The further it deviates from", "#this this ideal the higher the penalty.", "for", "(", "n", ",", "mask", ")", "in", "enumerate", "(", "self", ".", "masks", ")", ":", "nblack", "=", "0", "for", "row", "in", "mask", ":", "nblack", "+=", "sum", "(", "row", ")", "total_pixels", "=", "len", "(", "mask", ")", "**", "2", "ratio", "=", "nblack", "/", "total_pixels", "percent", "=", "(", "ratio", "*", "100", ")", "-", "50", "self", ".", "scores", "[", "n", "]", "[", "3", "]", "=", "int", "(", "(", "abs", "(", "int", "(", "percent", ")", ")", "/", "5", ")", "*", "10", ")", "#Calculate the total for each score", "totals", "=", "[", "0", "]", "*", "len", "(", "self", ".", "scores", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "scores", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "self", ".", "scores", "[", "i", "]", ")", ")", ":", "totals", "[", "i", "]", "+=", "self", ".", "scores", "[", "i", "]", "[", "j", "]", "#DEBUG CODE!!!", "#Prints out a table of scores", "#print('Rule Scores\\n 1 2 3 4 Total')", "#for i in range(len(self.scores)):", "# print(i, end='')", "# for s in self.scores[i]:", "# print('{0: >6}'.format(s), end='')", "# print('{0: >7}'.format(totals[i]))", "#print('Mask Chosen: {0}'.format(totals.index(min(totals))))", "#The lowest total wins", "return", "totals", ".", "index", "(", "min", "(", "totals", ")", ")" ]
36.028986
13.376812
def add_checkpoint_file(self,filename): """ Add filename as a checkpoint file for this DAG node @param filename: checkpoint filename to add """ if filename not in self.__checkpoint_files: self.__checkpoint_files.append(filename) if not isinstance(self.job(), CondorDAGManJob): if self.job().get_universe() == 'grid': self.add_checkpoint_macro(filename)
[ "def", "add_checkpoint_file", "(", "self", ",", "filename", ")", ":", "if", "filename", "not", "in", "self", ".", "__checkpoint_files", ":", "self", ".", "__checkpoint_files", ".", "append", "(", "filename", ")", "if", "not", "isinstance", "(", "self", ".", "job", "(", ")", ",", "CondorDAGManJob", ")", ":", "if", "self", ".", "job", "(", ")", ".", "get_universe", "(", ")", "==", "'grid'", ":", "self", ".", "add_checkpoint_macro", "(", "filename", ")" ]
40.7
7.5
def disconnect(self, *args, **kwargs): """ WebSocket was disconnected - leave the IRC channel. """ quit_message = "%s %s" % (settings.GNOTTY_VERSION_STRING, settings.GNOTTY_PROJECT_URL) self.client.connection.quit(quit_message) super(IRCNamespace, self).disconnect(*args, **kwargs)
[ "def", "disconnect", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "quit_message", "=", "\"%s %s\"", "%", "(", "settings", ".", "GNOTTY_VERSION_STRING", ",", "settings", ".", "GNOTTY_PROJECT_URL", ")", "self", ".", "client", ".", "connection", ".", "quit", "(", "quit_message", ")", "super", "(", "IRCNamespace", ",", "self", ")", ".", "disconnect", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
44.5
12.25
def _schedule_next_run(self): """ Compute the instant when this job should run next. """ if self.unit not in ('seconds', 'minutes', 'hours', 'days', 'weeks'): raise ScheduleValueError('Invalid unit') if self.latest is not None: if not (self.latest >= self.interval): raise ScheduleError('`latest` is greater than `interval`') interval = random.randint(self.interval, self.latest) else: interval = self.interval self.period = datetime.timedelta(**{self.unit: interval}) self.next_run = datetime.datetime.now() + self.period if self.start_day is not None: if self.unit != 'weeks': raise ScheduleValueError('`unit` should be \'weeks\'') weekdays = ( 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday' ) if self.start_day not in weekdays: raise ScheduleValueError('Invalid start day') weekday = weekdays.index(self.start_day) days_ahead = weekday - self.next_run.weekday() if days_ahead <= 0: # Target day already happened this week days_ahead += 7 self.next_run += datetime.timedelta(days_ahead) - self.period if self.at_time is not None: if (self.unit not in ('days', 'hours', 'minutes') and self.start_day is None): raise ScheduleValueError(('Invalid unit without' ' specifying start day')) kwargs = { 'second': self.at_time.second, 'microsecond': 0 } if self.unit == 'days' or self.start_day is not None: kwargs['hour'] = self.at_time.hour if self.unit in ['days', 'hours'] or self.start_day is not None: kwargs['minute'] = self.at_time.minute self.next_run = self.next_run.replace(**kwargs) # If we are running for the first time, make sure we run # at the specified time *today* (or *this hour*) as well if not self.last_run: now = datetime.datetime.now() if (self.unit == 'days' and self.at_time > now.time() and self.interval == 1): self.next_run = self.next_run - datetime.timedelta(days=1) elif self.unit == 'hours' \ and self.at_time.minute > now.minute \ or (self.at_time.minute == now.minute and self.at_time.second > now.second): self.next_run = self.next_run - datetime.timedelta(hours=1) elif self.unit == 'minutes' \ and self.at_time.second > now.second: self.next_run = self.next_run - \ datetime.timedelta(minutes=1) if self.start_day is not None and self.at_time is not None: # Let's see if we will still make that time we specified today if (self.next_run - datetime.datetime.now()).days >= 7: self.next_run -= self.period
[ "def", "_schedule_next_run", "(", "self", ")", ":", "if", "self", ".", "unit", "not", "in", "(", "'seconds'", ",", "'minutes'", ",", "'hours'", ",", "'days'", ",", "'weeks'", ")", ":", "raise", "ScheduleValueError", "(", "'Invalid unit'", ")", "if", "self", ".", "latest", "is", "not", "None", ":", "if", "not", "(", "self", ".", "latest", ">=", "self", ".", "interval", ")", ":", "raise", "ScheduleError", "(", "'`latest` is greater than `interval`'", ")", "interval", "=", "random", ".", "randint", "(", "self", ".", "interval", ",", "self", ".", "latest", ")", "else", ":", "interval", "=", "self", ".", "interval", "self", ".", "period", "=", "datetime", ".", "timedelta", "(", "*", "*", "{", "self", ".", "unit", ":", "interval", "}", ")", "self", ".", "next_run", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "self", ".", "period", "if", "self", ".", "start_day", "is", "not", "None", ":", "if", "self", ".", "unit", "!=", "'weeks'", ":", "raise", "ScheduleValueError", "(", "'`unit` should be \\'weeks\\''", ")", "weekdays", "=", "(", "'monday'", ",", "'tuesday'", ",", "'wednesday'", ",", "'thursday'", ",", "'friday'", ",", "'saturday'", ",", "'sunday'", ")", "if", "self", ".", "start_day", "not", "in", "weekdays", ":", "raise", "ScheduleValueError", "(", "'Invalid start day'", ")", "weekday", "=", "weekdays", ".", "index", "(", "self", ".", "start_day", ")", "days_ahead", "=", "weekday", "-", "self", ".", "next_run", ".", "weekday", "(", ")", "if", "days_ahead", "<=", "0", ":", "# Target day already happened this week", "days_ahead", "+=", "7", "self", ".", "next_run", "+=", "datetime", ".", "timedelta", "(", "days_ahead", ")", "-", "self", ".", "period", "if", "self", ".", "at_time", "is", "not", "None", ":", "if", "(", "self", ".", "unit", "not", "in", "(", "'days'", ",", "'hours'", ",", "'minutes'", ")", "and", "self", ".", "start_day", "is", "None", ")", ":", "raise", "ScheduleValueError", "(", "(", "'Invalid unit without'", "' specifying start day'", ")", ")", "kwargs", "=", "{", "'second'", ":", "self", ".", "at_time", ".", "second", ",", "'microsecond'", ":", "0", "}", "if", "self", ".", "unit", "==", "'days'", "or", "self", ".", "start_day", "is", "not", "None", ":", "kwargs", "[", "'hour'", "]", "=", "self", ".", "at_time", ".", "hour", "if", "self", ".", "unit", "in", "[", "'days'", ",", "'hours'", "]", "or", "self", ".", "start_day", "is", "not", "None", ":", "kwargs", "[", "'minute'", "]", "=", "self", ".", "at_time", ".", "minute", "self", ".", "next_run", "=", "self", ".", "next_run", ".", "replace", "(", "*", "*", "kwargs", ")", "# If we are running for the first time, make sure we run", "# at the specified time *today* (or *this hour*) as well", "if", "not", "self", ".", "last_run", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "(", "self", ".", "unit", "==", "'days'", "and", "self", ".", "at_time", ">", "now", ".", "time", "(", ")", "and", "self", ".", "interval", "==", "1", ")", ":", "self", ".", "next_run", "=", "self", ".", "next_run", "-", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "elif", "self", ".", "unit", "==", "'hours'", "and", "self", ".", "at_time", ".", "minute", ">", "now", ".", "minute", "or", "(", "self", ".", "at_time", ".", "minute", "==", "now", ".", "minute", "and", "self", ".", "at_time", ".", "second", ">", "now", ".", "second", ")", ":", "self", ".", "next_run", "=", "self", ".", "next_run", "-", "datetime", ".", "timedelta", "(", "hours", "=", "1", ")", "elif", "self", ".", "unit", "==", "'minutes'", "and", "self", ".", "at_time", ".", "second", ">", "now", ".", "second", ":", "self", ".", "next_run", "=", "self", ".", "next_run", "-", "datetime", ".", "timedelta", "(", "minutes", "=", "1", ")", "if", "self", ".", "start_day", "is", "not", "None", "and", "self", ".", "at_time", "is", "not", "None", ":", "# Let's see if we will still make that time we specified today", "if", "(", "self", ".", "next_run", "-", "datetime", ".", "datetime", ".", "now", "(", ")", ")", ".", "days", ">=", "7", ":", "self", ".", "next_run", "-=", "self", ".", "period" ]
47.565217
18.463768
def gpg_profile_put_key( blockchain_id, key_id, key_name=None, immutable=True, txid=None, key_url=None, use_key_server=True, key_server=None, proxy=None, wallet_keys=None, gpghome=None ): """ Put a local GPG key into a blockchain ID's global account. If the URL is not given, the key will be replicated to the default PGP key server and to either immutable (if @immutable) or mutable data. Return {'status': True, 'key_url': key_url, 'key_id': key fingerprint, ...} on success Return {'error': ...} on error """ if key_name is not None: assert is_valid_keyname(key_name) if key_server is None: key_server = DEFAULT_KEY_SERVER if gpghome is None: gpghome = get_default_gpg_home() put_res = {} extra_fields = {} key_data = None if key_name is not None: extra_fields = {'keyName': key_name} if key_url is None: gpg = gnupg.GPG( homedir=gpghome ) if use_key_server: # replicate key data to default server first res = gpg.send_keys( key_server, key_id ) if len(res.data) > 0: # error log.error("GPG failed to upload key '%s'" % key_id) log.error("GPG error:\n%s" % res.stderr) return {'error': 'Failed to repliate GPG key to default keyserver'} key_data = gpg.export_keys( [key_id] ) if immutable: # replicate to immutable storage immutable_result = client.put_immutable( blockchain_id, key_id, {key_id: key_data}, proxy=proxy, txid=txid, wallet_keys=wallet_keys ) if 'error' in immutable_result: return {'error': 'Failed to store hash of key %s to the blockchain. Error message: "%s"' % (key_id, immutable_result['error'])} else: put_res['transaction_hash'] = immutable_result['transaction_hash'] put_res['zonefile_hash'] = immutable_result['zonefile_hash'] key_url = client.make_immutable_data_url( blockchain_id, key_id, client.get_data_hash(key_data) ) else: # replicate to mutable storage mutable_name = key_name if key_name is None: mutable_name = key_id mutable_result = client.put_mutable( blockchain_id, key_id, {mutable_name: key_data}, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in mutable_result: return {'error': 'Failed to store key %s. Error message: "%s"' % (key_id, mutable_result['error'])} key_url = client.make_mutable_data_url( blockchain_id, key_id, mutable_result['version'] ) put_account_res = client.put_account( blockchain_id, "pgp", key_id, key_url, proxy=proxy, wallet_keys=wallet_keys, **extra_fields ) if 'error' in put_account_res: return put_account_res else: put_account_res.update( put_res ) put_account_res['key_url'] = key_url put_account_res['key_id'] = key_id return put_account_res
[ "def", "gpg_profile_put_key", "(", "blockchain_id", ",", "key_id", ",", "key_name", "=", "None", ",", "immutable", "=", "True", ",", "txid", "=", "None", ",", "key_url", "=", "None", ",", "use_key_server", "=", "True", ",", "key_server", "=", "None", ",", "proxy", "=", "None", ",", "wallet_keys", "=", "None", ",", "gpghome", "=", "None", ")", ":", "if", "key_name", "is", "not", "None", ":", "assert", "is_valid_keyname", "(", "key_name", ")", "if", "key_server", "is", "None", ":", "key_server", "=", "DEFAULT_KEY_SERVER", "if", "gpghome", "is", "None", ":", "gpghome", "=", "get_default_gpg_home", "(", ")", "put_res", "=", "{", "}", "extra_fields", "=", "{", "}", "key_data", "=", "None", "if", "key_name", "is", "not", "None", ":", "extra_fields", "=", "{", "'keyName'", ":", "key_name", "}", "if", "key_url", "is", "None", ":", "gpg", "=", "gnupg", ".", "GPG", "(", "homedir", "=", "gpghome", ")", "if", "use_key_server", ":", "# replicate key data to default server first ", "res", "=", "gpg", ".", "send_keys", "(", "key_server", ",", "key_id", ")", "if", "len", "(", "res", ".", "data", ")", ">", "0", ":", "# error ", "log", ".", "error", "(", "\"GPG failed to upload key '%s'\"", "%", "key_id", ")", "log", ".", "error", "(", "\"GPG error:\\n%s\"", "%", "res", ".", "stderr", ")", "return", "{", "'error'", ":", "'Failed to repliate GPG key to default keyserver'", "}", "key_data", "=", "gpg", ".", "export_keys", "(", "[", "key_id", "]", ")", "if", "immutable", ":", "# replicate to immutable storage ", "immutable_result", "=", "client", ".", "put_immutable", "(", "blockchain_id", ",", "key_id", ",", "{", "key_id", ":", "key_data", "}", ",", "proxy", "=", "proxy", ",", "txid", "=", "txid", ",", "wallet_keys", "=", "wallet_keys", ")", "if", "'error'", "in", "immutable_result", ":", "return", "{", "'error'", ":", "'Failed to store hash of key %s to the blockchain. Error message: \"%s\"'", "%", "(", "key_id", ",", "immutable_result", "[", "'error'", "]", ")", "}", "else", ":", "put_res", "[", "'transaction_hash'", "]", "=", "immutable_result", "[", "'transaction_hash'", "]", "put_res", "[", "'zonefile_hash'", "]", "=", "immutable_result", "[", "'zonefile_hash'", "]", "key_url", "=", "client", ".", "make_immutable_data_url", "(", "blockchain_id", ",", "key_id", ",", "client", ".", "get_data_hash", "(", "key_data", ")", ")", "else", ":", "# replicate to mutable storage ", "mutable_name", "=", "key_name", "if", "key_name", "is", "None", ":", "mutable_name", "=", "key_id", "mutable_result", "=", "client", ".", "put_mutable", "(", "blockchain_id", ",", "key_id", ",", "{", "mutable_name", ":", "key_data", "}", ",", "proxy", "=", "proxy", ",", "wallet_keys", "=", "wallet_keys", ")", "if", "'error'", "in", "mutable_result", ":", "return", "{", "'error'", ":", "'Failed to store key %s. Error message: \"%s\"'", "%", "(", "key_id", ",", "mutable_result", "[", "'error'", "]", ")", "}", "key_url", "=", "client", ".", "make_mutable_data_url", "(", "blockchain_id", ",", "key_id", ",", "mutable_result", "[", "'version'", "]", ")", "put_account_res", "=", "client", ".", "put_account", "(", "blockchain_id", ",", "\"pgp\"", ",", "key_id", ",", "key_url", ",", "proxy", "=", "proxy", ",", "wallet_keys", "=", "wallet_keys", ",", "*", "*", "extra_fields", ")", "if", "'error'", "in", "put_account_res", ":", "return", "put_account_res", "else", ":", "put_account_res", ".", "update", "(", "put_res", ")", "put_account_res", "[", "'key_url'", "]", "=", "key_url", "put_account_res", "[", "'key_id'", "]", "=", "key_id", "return", "put_account_res" ]
41.222222
29.944444
def save(self): """ Easy save(insert or update) for db models """ try: if self.exists() is False: self.db.session.add(self) # self.db.session.merge(self) self.db.session.commit() except (Exception, BaseException) as error: if current_app.config['DEBUG']: raise error return None
[ "def", "save", "(", "self", ")", ":", "try", ":", "if", "self", ".", "exists", "(", ")", "is", "False", ":", "self", ".", "db", ".", "session", ".", "add", "(", "self", ")", "# self.db.session.merge(self)\r", "self", ".", "db", ".", "session", ".", "commit", "(", ")", "except", "(", "Exception", ",", "BaseException", ")", "as", "error", ":", "if", "current_app", ".", "config", "[", "'DEBUG'", "]", ":", "raise", "error", "return", "None" ]
35.818182
9.454545
def doNew(self, WHAT={}, **params): """This function will perform the command -new.""" if hasattr(WHAT, '_modified'): for key in WHAT: if key not in ['RECORDID','MODID']: if WHAT.__new2old__.has_key(key): self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), WHAT[key]) else: self._addDBParam(key, WHAT[key]) elif type(WHAT)==dict: for key in WHAT: self._addDBParam(key, WHAT[key]) else: raise FMError, 'Python Runtime: Object type (%s) given to function doNew as argument WHAT cannot be used.' % type(WHAT) if self._layout == '': raise FMError, 'No layout was selected' for key in params: self._addDBParam(key, params[key]) if len(self._dbParams) == 0: raise FMError, 'No data to be added' return self._doAction('-new')
[ "def", "doNew", "(", "self", ",", "WHAT", "=", "{", "}", ",", "*", "*", "params", ")", ":", "if", "hasattr", "(", "WHAT", ",", "'_modified'", ")", ":", "for", "key", "in", "WHAT", ":", "if", "key", "not", "in", "[", "'RECORDID'", ",", "'MODID'", "]", ":", "if", "WHAT", ".", "__new2old__", ".", "has_key", "(", "key", ")", ":", "self", ".", "_addDBParam", "(", "WHAT", ".", "__new2old__", "[", "key", "]", ".", "encode", "(", "'utf-8'", ")", ",", "WHAT", "[", "key", "]", ")", "else", ":", "self", ".", "_addDBParam", "(", "key", ",", "WHAT", "[", "key", "]", ")", "elif", "type", "(", "WHAT", ")", "==", "dict", ":", "for", "key", "in", "WHAT", ":", "self", ".", "_addDBParam", "(", "key", ",", "WHAT", "[", "key", "]", ")", "else", ":", "raise", "FMError", ",", "'Python Runtime: Object type (%s) given to function doNew as argument WHAT cannot be used.'", "%", "type", "(", "WHAT", ")", "if", "self", ".", "_layout", "==", "''", ":", "raise", "FMError", ",", "'No layout was selected'", "for", "key", "in", "params", ":", "self", ".", "_addDBParam", "(", "key", ",", "params", "[", "key", "]", ")", "if", "len", "(", "self", ".", "_dbParams", ")", "==", "0", ":", "raise", "FMError", ",", "'No data to be added'", "return", "self", ".", "_doAction", "(", "'-new'", ")" ]
29.461538
19.923077
def enable_counter(self, base=None, database='counter', collection='counters'): """Register the builtin counter model, return the registered Counter class and the corresponding ``CounterMixin`` class. The ``CounterMixin`` automatically increases and decreases the counter after model creation(save without ``_id``) and deletion. It contains a classmethod ``count()`` which returns the current count of the model collection.""" Counter._database_ = database Counter._collection_ = collection bases = (base, Counter) if base else (Counter,) counter = self.register_model(type('Counter', bases, {})) class CounterMixin(object): """Mixin class for model""" @classmethod def inc_counter(cls): """Wrapper for ``Counter.increase()``.""" return counter.increase(cls._collection_) @classmethod def dec_counter(cls): """Wrapper for ``Counter.decrease()``.""" return counter.decrease(cls._collection_) @classmethod def chg_counter(cls, *args, **kwargs): """Wrapper for ``Counter.change_by()``.""" return counter.change_by(cls._collection_, *args, **kwargs) @classmethod def set_counter(cls, *args, **kwargs): """Wrapper for ``Counter.set_to()``.""" return counter.set_to(cls._collection_, *args, **kwargs) def on_save(self, old_dict): super(CounterMixin, self).on_save(old_dict) if not old_dict.get('_id'): counter.increase(self._collection_) def on_delete(self, *args, **kwargs): super(CounterMixin, self).on_delete(*args, **kwargs) counter.decrease(self._collection_) @classmethod def count(cls): """Return the current count of this collection.""" return counter.count(cls._collection_) logging.info('Counter enabled on: %s' % counter.collection) return counter, CounterMixin
[ "def", "enable_counter", "(", "self", ",", "base", "=", "None", ",", "database", "=", "'counter'", ",", "collection", "=", "'counters'", ")", ":", "Counter", ".", "_database_", "=", "database", "Counter", ".", "_collection_", "=", "collection", "bases", "=", "(", "base", ",", "Counter", ")", "if", "base", "else", "(", "Counter", ",", ")", "counter", "=", "self", ".", "register_model", "(", "type", "(", "'Counter'", ",", "bases", ",", "{", "}", ")", ")", "class", "CounterMixin", "(", "object", ")", ":", "\"\"\"Mixin class for model\"\"\"", "@", "classmethod", "def", "inc_counter", "(", "cls", ")", ":", "\"\"\"Wrapper for ``Counter.increase()``.\"\"\"", "return", "counter", ".", "increase", "(", "cls", ".", "_collection_", ")", "@", "classmethod", "def", "dec_counter", "(", "cls", ")", ":", "\"\"\"Wrapper for ``Counter.decrease()``.\"\"\"", "return", "counter", ".", "decrease", "(", "cls", ".", "_collection_", ")", "@", "classmethod", "def", "chg_counter", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrapper for ``Counter.change_by()``.\"\"\"", "return", "counter", ".", "change_by", "(", "cls", ".", "_collection_", ",", "*", "args", ",", "*", "*", "kwargs", ")", "@", "classmethod", "def", "set_counter", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrapper for ``Counter.set_to()``.\"\"\"", "return", "counter", ".", "set_to", "(", "cls", ".", "_collection_", ",", "*", "args", ",", "*", "*", "kwargs", ")", "def", "on_save", "(", "self", ",", "old_dict", ")", ":", "super", "(", "CounterMixin", ",", "self", ")", ".", "on_save", "(", "old_dict", ")", "if", "not", "old_dict", ".", "get", "(", "'_id'", ")", ":", "counter", ".", "increase", "(", "self", ".", "_collection_", ")", "def", "on_delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "CounterMixin", ",", "self", ")", ".", "on_delete", "(", "*", "args", ",", "*", "*", "kwargs", ")", "counter", ".", "decrease", "(", "self", ".", "_collection_", ")", "@", "classmethod", "def", "count", "(", "cls", ")", ":", "\"\"\"Return the current count of this collection.\"\"\"", "return", "counter", ".", "count", "(", "cls", ".", "_collection_", ")", "logging", ".", "info", "(", "'Counter enabled on: %s'", "%", "counter", ".", "collection", ")", "return", "counter", ",", "CounterMixin" ]
40.584906
17.849057
def check_shape(meth): """ Decorator for larray magic methods, to ensure that the operand has the same shape as the array. """ @wraps(meth) def wrapped_meth(self, val): if isinstance(val, (larray, numpy.ndarray)): if val.shape != self._shape: raise ValueError("shape mismatch: objects cannot be broadcast to a single shape") return meth(self, val) return wrapped_meth
[ "def", "check_shape", "(", "meth", ")", ":", "@", "wraps", "(", "meth", ")", "def", "wrapped_meth", "(", "self", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "(", "larray", ",", "numpy", ".", "ndarray", ")", ")", ":", "if", "val", ".", "shape", "!=", "self", ".", "_shape", ":", "raise", "ValueError", "(", "\"shape mismatch: objects cannot be broadcast to a single shape\"", ")", "return", "meth", "(", "self", ",", "val", ")", "return", "wrapped_meth" ]
35.666667
15.333333
def check_docstring_first(src, filename='<unknown>'): # type: (bytes, str) -> int """Returns nonzero if the source has what looks like a docstring that is not at the beginning of the source. A string will be considered a docstring if it is a STRING token with a col offset of 0. """ found_docstring_line = None found_code_line = None tok_gen = tokenize_tokenize(io.BytesIO(src).readline) for tok_type, _, (sline, scol), _, _ in tok_gen: # Looks like a docstring! if tok_type == tokenize.STRING and scol == 0: if found_docstring_line is not None: print( '{}:{} Multiple module docstrings ' '(first docstring on line {}).'.format( filename, sline, found_docstring_line, ), ) return 1 elif found_code_line is not None: print( '{}:{} Module docstring appears after code ' '(code seen on line {}).'.format( filename, sline, found_code_line, ), ) return 1 else: found_docstring_line = sline elif tok_type not in NON_CODE_TOKENS and found_code_line is None: found_code_line = sline return 0
[ "def", "check_docstring_first", "(", "src", ",", "filename", "=", "'<unknown>'", ")", ":", "# type: (bytes, str) -> int", "found_docstring_line", "=", "None", "found_code_line", "=", "None", "tok_gen", "=", "tokenize_tokenize", "(", "io", ".", "BytesIO", "(", "src", ")", ".", "readline", ")", "for", "tok_type", ",", "_", ",", "(", "sline", ",", "scol", ")", ",", "_", ",", "_", "in", "tok_gen", ":", "# Looks like a docstring!", "if", "tok_type", "==", "tokenize", ".", "STRING", "and", "scol", "==", "0", ":", "if", "found_docstring_line", "is", "not", "None", ":", "print", "(", "'{}:{} Multiple module docstrings '", "'(first docstring on line {}).'", ".", "format", "(", "filename", ",", "sline", ",", "found_docstring_line", ",", ")", ",", ")", "return", "1", "elif", "found_code_line", "is", "not", "None", ":", "print", "(", "'{}:{} Module docstring appears after code '", "'(code seen on line {}).'", ".", "format", "(", "filename", ",", "sline", ",", "found_code_line", ",", ")", ",", ")", "return", "1", "else", ":", "found_docstring_line", "=", "sline", "elif", "tok_type", "not", "in", "NON_CODE_TOKENS", "and", "found_code_line", "is", "None", ":", "found_code_line", "=", "sline", "return", "0" ]
36.378378
17.162162
def initiate(self, transport, to = None): """Initiate an XMPP connection over the `transport`. :Parameters: - `transport`: an XMPP transport instance - `to`: peer name (defaults to own jid domain part) """ if to is None: to = JID(self.me.domain) return StreamBase.initiate(self, transport, to)
[ "def", "initiate", "(", "self", ",", "transport", ",", "to", "=", "None", ")", ":", "if", "to", "is", "None", ":", "to", "=", "JID", "(", "self", ".", "me", ".", "domain", ")", "return", "StreamBase", ".", "initiate", "(", "self", ",", "transport", ",", "to", ")" ]
36.1
13.4
def tokenize(self, data): ''' Tokenize sentence. Args: [n-gram, n-gram, n-gram, ...] ''' super().tokenize(data) token_tuple_zip = self.n_gram.generate_tuple_zip(self.token, self.n) token_list = [] self.token = ["".join(list(token_tuple)) for token_tuple in token_tuple_zip]
[ "def", "tokenize", "(", "self", ",", "data", ")", ":", "super", "(", ")", ".", "tokenize", "(", "data", ")", "token_tuple_zip", "=", "self", ".", "n_gram", ".", "generate_tuple_zip", "(", "self", ".", "token", ",", "self", ".", "n", ")", "token_list", "=", "[", "]", "self", ".", "token", "=", "[", "\"\"", ".", "join", "(", "list", "(", "token_tuple", ")", ")", "for", "token_tuple", "in", "token_tuple_zip", "]" ]
28.333333
25.166667
def network_get_primary_address(binding): ''' Deprecated since Juju 2.3; use network_get() Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding :return: string. The primary IP address for the named binding :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] try: response = subprocess.check_output( cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() except CalledProcessError as e: if 'no network config found for binding' in e.output.decode('UTF-8'): raise NoNetworkBinding("No network binding for {}" .format(binding)) else: raise return response
[ "def", "network_get_primary_address", "(", "binding", ")", ":", "cmd", "=", "[", "'network-get'", ",", "'--primary-address'", ",", "binding", "]", "try", ":", "response", "=", "subprocess", ".", "check_output", "(", "cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", ".", "decode", "(", "'UTF-8'", ")", ".", "strip", "(", ")", "except", "CalledProcessError", "as", "e", ":", "if", "'no network config found for binding'", "in", "e", ".", "output", ".", "decode", "(", "'UTF-8'", ")", ":", "raise", "NoNetworkBinding", "(", "\"No network binding for {}\"", ".", "format", "(", "binding", ")", ")", "else", ":", "raise", "return", "response" ]
36.590909
21.863636
def equal(self, what, epsilon=None): """compares given object ``X`` with an expected ``Y`` object. It primarily assures that the compared objects are absolute equal ``==``. :param what: the expected value :param epsilon: a delta to leverage upper-bound floating point permissiveness """ try: comparison = DeepComparison(self.obj, what, epsilon).compare() error = False except AssertionError as e: error = e comparison = None if isinstance(comparison, DeepExplanation): error = comparison.get_assertion(self.obj, what) if self.negative: if error: return True msg = '%s should differ from %s, but is the same thing' raise AssertionError(msg % (safe_repr(self.obj), safe_repr(what))) else: if not error: return True raise error
[ "def", "equal", "(", "self", ",", "what", ",", "epsilon", "=", "None", ")", ":", "try", ":", "comparison", "=", "DeepComparison", "(", "self", ".", "obj", ",", "what", ",", "epsilon", ")", ".", "compare", "(", ")", "error", "=", "False", "except", "AssertionError", "as", "e", ":", "error", "=", "e", "comparison", "=", "None", "if", "isinstance", "(", "comparison", ",", "DeepExplanation", ")", ":", "error", "=", "comparison", ".", "get_assertion", "(", "self", ".", "obj", ",", "what", ")", "if", "self", ".", "negative", ":", "if", "error", ":", "return", "True", "msg", "=", "'%s should differ from %s, but is the same thing'", "raise", "AssertionError", "(", "msg", "%", "(", "safe_repr", "(", "self", ".", "obj", ")", ",", "safe_repr", "(", "what", ")", ")", ")", "else", ":", "if", "not", "error", ":", "return", "True", "raise", "error" ]
31.166667
23.266667
def export_handle(self, directory): """Get a filehandle for exporting""" filename = getattr(self, 'filename') dest_file = "%s/%s" % (directory, filename) dest_dir = os.path.dirname(dest_file) if not os.path.isdir(dest_dir): os.mkdir(dest_dir, 0o700) return open(dest_file, 'w')
[ "def", "export_handle", "(", "self", ",", "directory", ")", ":", "filename", "=", "getattr", "(", "self", ",", "'filename'", ")", "dest_file", "=", "\"%s/%s\"", "%", "(", "directory", ",", "filename", ")", "dest_dir", "=", "os", ".", "path", ".", "dirname", "(", "dest_file", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "dest_dir", ")", ":", "os", ".", "mkdir", "(", "dest_dir", ",", "0o700", ")", "return", "open", "(", "dest_file", ",", "'w'", ")" ]
36.666667
8.222222
def encodefuns(): """Returns a dictionary mapping ICC type signature sig to encoding function. Each function returns a string comprising the content of the encoded value. To form the full value, the type sig and the 4 zero bytes should be prefixed (8 bytes). """ def desc(ascii): """Return textDescription type [ICC 2001] 6.5.17. The ASCII part is filled in with the string `ascii`, the Unicode and ScriptCode parts are empty.""" ascii += '\x00' n = len(ascii) return struct.pack('>L%ds2LHB67s' % n, n, ascii, 0, 0, 0, 0, '') def text(ascii): """Return textType [ICC 2001] 6.5.18.""" return ascii + '\x00' def curv(f=None, n=256): """Return a curveType, [ICC 2001] 6.5.3. If no arguments are supplied then a TRC for a linear response is generated (no entries). If an argument is supplied and it is a number (for *f* to be a number it means that ``float(f)==f``) then a TRC for that gamma value is generated. Otherwise `f` is assumed to be a function that maps [0.0, 1.0] to [0.0, 1.0]; an `n` element table is generated for it. """ if f is None: return struct.pack('>L', 0) try: if float(f) == f: return struct.pack('>LH', 1, int(round(f * 2 ** 8))) except (TypeError, ValueError): pass assert n >= 2 table = [] M = float(n - 1) for i in range(n): x = i / M table.append(int(round(f(x) * 65535))) return struct.pack('>L%dH' % n, n, *table) def XYZ(*l): return struct.pack('>3l', *map(fs15f16, l)) return locals()
[ "def", "encodefuns", "(", ")", ":", "def", "desc", "(", "ascii", ")", ":", "\"\"\"Return textDescription type [ICC 2001] 6.5.17. The ASCII part is\n filled in with the string `ascii`, the Unicode and ScriptCode parts\n are empty.\"\"\"", "ascii", "+=", "'\\x00'", "n", "=", "len", "(", "ascii", ")", "return", "struct", ".", "pack", "(", "'>L%ds2LHB67s'", "%", "n", ",", "n", ",", "ascii", ",", "0", ",", "0", ",", "0", ",", "0", ",", "''", ")", "def", "text", "(", "ascii", ")", ":", "\"\"\"Return textType [ICC 2001] 6.5.18.\"\"\"", "return", "ascii", "+", "'\\x00'", "def", "curv", "(", "f", "=", "None", ",", "n", "=", "256", ")", ":", "\"\"\"Return a curveType, [ICC 2001] 6.5.3. If no arguments are\n supplied then a TRC for a linear response is generated (no entries).\n If an argument is supplied and it is a number (for *f* to be a\n number it means that ``float(f)==f``) then a TRC for that\n gamma value is generated.\n Otherwise `f` is assumed to be a function that maps [0.0, 1.0] to\n [0.0, 1.0]; an `n` element table is generated for it.\n \"\"\"", "if", "f", "is", "None", ":", "return", "struct", ".", "pack", "(", "'>L'", ",", "0", ")", "try", ":", "if", "float", "(", "f", ")", "==", "f", ":", "return", "struct", ".", "pack", "(", "'>LH'", ",", "1", ",", "int", "(", "round", "(", "f", "*", "2", "**", "8", ")", ")", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "pass", "assert", "n", ">=", "2", "table", "=", "[", "]", "M", "=", "float", "(", "n", "-", "1", ")", "for", "i", "in", "range", "(", "n", ")", ":", "x", "=", "i", "/", "M", "table", ".", "append", "(", "int", "(", "round", "(", "f", "(", "x", ")", "*", "65535", ")", ")", ")", "return", "struct", ".", "pack", "(", "'>L%dH'", "%", "n", ",", "n", ",", "*", "table", ")", "def", "XYZ", "(", "*", "l", ")", ":", "return", "struct", ".", "pack", "(", "'>3l'", ",", "*", "map", "(", "fs15f16", ",", "l", ")", ")", "return", "locals", "(", ")" ]
33.038462
19.807692
def _scene(self): """ A cached version of the pyembree scene. """ return _EmbreeWrap(vertices=self.mesh.vertices, faces=self.mesh.faces, scale=self._scale)
[ "def", "_scene", "(", "self", ")", ":", "return", "_EmbreeWrap", "(", "vertices", "=", "self", ".", "mesh", ".", "vertices", ",", "faces", "=", "self", ".", "mesh", ".", "faces", ",", "scale", "=", "self", ".", "_scale", ")" ]
33.571429
8.428571
def find_peaks(sig): """ Find hard peaks and soft peaks in a signal, defined as follows: - Hard peak: a peak that is either /\ or \/ - Soft peak: a peak that is either /-*\ or \-*/ In this case we define the middle as the peak Parameters ---------- sig : np array The 1d signal array Returns ------- hard_peaks : numpy array Array containing the indices of the hard peaks: soft_peaks : numpy array Array containing the indices of the soft peaks """ if len(sig) == 0: return np.empty([0]), np.empty([0]) tmp = sig[1:] tmp = np.append(tmp, [sig[-1]]) tmp = sig - tmp tmp[np.where(tmp>0)] = 1 tmp[np.where(tmp==0)] = 0 tmp[np.where(tmp<0)] = -1 tmp2 = tmp[1:] tmp2 = np.append(tmp2, [0]) tmp = tmp-tmp2 hard_peaks = np.where(np.logical_or(tmp==-2, tmp==+2))[0] + 1 soft_peaks = [] for iv in np.where(np.logical_or(tmp==-1,tmp==+1))[0]: t = tmp[iv] i = iv+1 while True: if i==len(tmp) or tmp[i] == -t or tmp[i] == -2 or tmp[i] == 2: break if tmp[i] == t: soft_peaks.append(int(iv + (i - iv)/2)) break i += 1 soft_peaks = np.array(soft_peaks, dtype='int') + 1 return hard_peaks, soft_peaks
[ "def", "find_peaks", "(", "sig", ")", ":", "if", "len", "(", "sig", ")", "==", "0", ":", "return", "np", ".", "empty", "(", "[", "0", "]", ")", ",", "np", ".", "empty", "(", "[", "0", "]", ")", "tmp", "=", "sig", "[", "1", ":", "]", "tmp", "=", "np", ".", "append", "(", "tmp", ",", "[", "sig", "[", "-", "1", "]", "]", ")", "tmp", "=", "sig", "-", "tmp", "tmp", "[", "np", ".", "where", "(", "tmp", ">", "0", ")", "]", "=", "1", "tmp", "[", "np", ".", "where", "(", "tmp", "==", "0", ")", "]", "=", "0", "tmp", "[", "np", ".", "where", "(", "tmp", "<", "0", ")", "]", "=", "-", "1", "tmp2", "=", "tmp", "[", "1", ":", "]", "tmp2", "=", "np", ".", "append", "(", "tmp2", ",", "[", "0", "]", ")", "tmp", "=", "tmp", "-", "tmp2", "hard_peaks", "=", "np", ".", "where", "(", "np", ".", "logical_or", "(", "tmp", "==", "-", "2", ",", "tmp", "==", "+", "2", ")", ")", "[", "0", "]", "+", "1", "soft_peaks", "=", "[", "]", "for", "iv", "in", "np", ".", "where", "(", "np", ".", "logical_or", "(", "tmp", "==", "-", "1", ",", "tmp", "==", "+", "1", ")", ")", "[", "0", "]", ":", "t", "=", "tmp", "[", "iv", "]", "i", "=", "iv", "+", "1", "while", "True", ":", "if", "i", "==", "len", "(", "tmp", ")", "or", "tmp", "[", "i", "]", "==", "-", "t", "or", "tmp", "[", "i", "]", "==", "-", "2", "or", "tmp", "[", "i", "]", "==", "2", ":", "break", "if", "tmp", "[", "i", "]", "==", "t", ":", "soft_peaks", ".", "append", "(", "int", "(", "iv", "+", "(", "i", "-", "iv", ")", "/", "2", ")", ")", "break", "i", "+=", "1", "soft_peaks", "=", "np", ".", "array", "(", "soft_peaks", ",", "dtype", "=", "'int'", ")", "+", "1", "return", "hard_peaks", ",", "soft_peaks" ]
25.94
20.5
def filter_for_filesize(result, size=None): """ Will test the filepath result and test if its size is at least self.filesize :param result: a list of dicts returned by Snakebite ls :param size: the file size in MB a file should be at least to trigger True :return: (bool) depending on the matching criteria """ if size: log = LoggingMixin().log log.debug( 'Filtering for file size >= %s in files: %s', size, map(lambda x: x['path'], result) ) size *= settings.MEGABYTE result = [x for x in result if x['length'] >= size] log.debug('HdfsSensor.poke: after size filter result is %s', result) return result
[ "def", "filter_for_filesize", "(", "result", ",", "size", "=", "None", ")", ":", "if", "size", ":", "log", "=", "LoggingMixin", "(", ")", ".", "log", "log", ".", "debug", "(", "'Filtering for file size >= %s in files: %s'", ",", "size", ",", "map", "(", "lambda", "x", ":", "x", "[", "'path'", "]", ",", "result", ")", ")", "size", "*=", "settings", ".", "MEGABYTE", "result", "=", "[", "x", "for", "x", "in", "result", "if", "x", "[", "'length'", "]", ">=", "size", "]", "log", ".", "debug", "(", "'HdfsSensor.poke: after size filter result is %s'", ",", "result", ")", "return", "result" ]
41.944444
20.166667
def create_thing(self, lid): """Create a new Thing with a local id (lid). Returns a [Thing](Thing.m.html#IoticAgent.IOT.Thing.Thing) object if successful or if the Thing already exists Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `lid` (required) (string) local identifier of your Thing. The local id is your name or nickname for the thing. It's "local" in that it's only available to you on this container, not searchable and not visible to others. """ evt = self.create_thing_async(lid) self._wait_and_except_if_failed(evt) try: with self.__new_things: return self.__new_things.pop(lid) except KeyError as ex: raise raise_from(IOTClientError('Thing %s not in cache (post-create)' % lid), ex)
[ "def", "create_thing", "(", "self", ",", "lid", ")", ":", "evt", "=", "self", ".", "create_thing_async", "(", "lid", ")", "self", ".", "_wait_and_except_if_failed", "(", "evt", ")", "try", ":", "with", "self", ".", "__new_things", ":", "return", "self", ".", "__new_things", ".", "pop", "(", "lid", ")", "except", "KeyError", "as", "ex", ":", "raise", "raise_from", "(", "IOTClientError", "(", "'Thing %s not in cache (post-create)'", "%", "lid", ")", ",", "ex", ")" ]
49.409091
29.909091
def wait_for_tasks_to_complete(batch_service_client, job_ids, timeout): """Returns when all tasks in the specified job reach the Completed state. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The id of the job whose tasks should be to monitored. :param timedelta timeout: The duration to wait for task completion. If all tasks in the specified job do not reach Completed state within this time period, an exception will be raised. """ timeout_expiration = datetime.datetime.now() + timeout print("Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end='') while datetime.datetime.now() < timeout_expiration: print('.', end='') sys.stdout.flush() # tasks = batch_service_client.task.list(job_id) # incomplete_tasks = [task for task in tasks if # task.state != batchmodels.TaskState.completed] for (job_id, _) in job_ids: tasks = batch_service_client.task.list(job_id) incomplete_tasks = [task for task in tasks if task.state != batchmodels.TaskState.completed] if incomplete_tasks: break if not incomplete_tasks: print() return True else: time.sleep(1) raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within " "timeout period of " + str(timeout))
[ "def", "wait_for_tasks_to_complete", "(", "batch_service_client", ",", "job_ids", ",", "timeout", ")", ":", "timeout_expiration", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "+", "timeout", "print", "(", "\"Monitoring all tasks for 'Completed' state, timeout in {}...\"", ".", "format", "(", "timeout", ")", ",", "end", "=", "''", ")", "while", "datetime", ".", "datetime", ".", "now", "(", ")", "<", "timeout_expiration", ":", "print", "(", "'.'", ",", "end", "=", "''", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# tasks = batch_service_client.task.list(job_id)", "# incomplete_tasks = [task for task in tasks if", "# task.state != batchmodels.TaskState.completed]", "for", "(", "job_id", ",", "_", ")", "in", "job_ids", ":", "tasks", "=", "batch_service_client", ".", "task", ".", "list", "(", "job_id", ")", "incomplete_tasks", "=", "[", "task", "for", "task", "in", "tasks", "if", "task", ".", "state", "!=", "batchmodels", ".", "TaskState", ".", "completed", "]", "if", "incomplete_tasks", ":", "break", "if", "not", "incomplete_tasks", ":", "print", "(", ")", "return", "True", "else", ":", "time", ".", "sleep", "(", "1", ")", "raise", "RuntimeError", "(", "\"ERROR: Tasks did not reach 'Completed' state within \"", "\"timeout period of \"", "+", "str", "(", "timeout", ")", ")" ]
44.794118
22.794118
def toArray(self): """ Returns a copy of this SparseVector as a 1-dimensional NumPy array. """ arr = np.zeros((self.size,), dtype=np.float64) arr[self.indices] = self.values return arr
[ "def", "toArray", "(", "self", ")", ":", "arr", "=", "np", ".", "zeros", "(", "(", "self", ".", "size", ",", ")", ",", "dtype", "=", "np", ".", "float64", ")", "arr", "[", "self", ".", "indices", "]", "=", "self", ".", "values", "return", "arr" ]
32.285714
13.428571
def deferred_emails(): """Checks for deferred email, that otherwise fill up the queue.""" status = SERVER_STATUS['OK'] count = Message.objects.deferred().count() if DEFERRED_WARNING_THRESHOLD <= count < DEFERRED_DANGER_THRESHOLD: status = SERVER_STATUS['WARNING'] if count >= DEFERRED_DANGER_THRESHOLD: status = SERVER_STATUS['DANGER'] return { 'label': 'Deferred Email', 'status': status, 'info': 'There are currently {0} deferred messages.'.format(count) }
[ "def", "deferred_emails", "(", ")", ":", "status", "=", "SERVER_STATUS", "[", "'OK'", "]", "count", "=", "Message", ".", "objects", ".", "deferred", "(", ")", ".", "count", "(", ")", "if", "DEFERRED_WARNING_THRESHOLD", "<=", "count", "<", "DEFERRED_DANGER_THRESHOLD", ":", "status", "=", "SERVER_STATUS", "[", "'WARNING'", "]", "if", "count", ">=", "DEFERRED_DANGER_THRESHOLD", ":", "status", "=", "SERVER_STATUS", "[", "'DANGER'", "]", "return", "{", "'label'", ":", "'Deferred Email'", ",", "'status'", ":", "status", ",", "'info'", ":", "'There are currently {0} deferred messages.'", ".", "format", "(", "count", ")", "}" ]
34.266667
17.6
def addcomponent(self, data): """ A method to create a component in Bugzilla. Takes a dict, with the following elements: product: The product to create the component in component: The name of the component to create description: A one sentence summary of the component default_assignee: The bugzilla login (email address) of the initial owner of the component default_qa_contact (optional): The bugzilla login of the initial QA contact default_cc: (optional) The initial list of users to be CC'ed on new bugs for the component. is_active: (optional) If False, the component is hidden from the component list when filing new bugs. """ data = data.copy() self._component_data_convert(data) return self._proxy.Component.create(data)
[ "def", "addcomponent", "(", "self", ",", "data", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "self", ".", "_component_data_convert", "(", "data", ")", "return", "self", ".", "_proxy", ".", "Component", ".", "create", "(", "data", ")" ]
47.45
18.15
def model_instance_diff(old, new): """ Calculates the differences between two model instances. One of the instances may be ``None`` (i.e., a newly created model or deleted model). This will cause all fields with a value to have changed (from ``None``). :param old: The old state of the model instance. :type old: Model :param new: The new state of the model instance. :type new: Model :return: A dictionary with the names of the changed fields as keys and a two tuple of the old and new field values as value. :rtype: dict """ from auditlog.registry import auditlog if not(old is None or isinstance(old, Model)): raise TypeError("The supplied old instance is not a valid model instance.") if not(new is None or isinstance(new, Model)): raise TypeError("The supplied new instance is not a valid model instance.") diff = {} if old is not None and new is not None: fields = set(old._meta.fields + new._meta.fields) model_fields = auditlog.get_model_fields(new._meta.model) elif old is not None: fields = set(get_fields_in_model(old)) model_fields = auditlog.get_model_fields(old._meta.model) elif new is not None: fields = set(get_fields_in_model(new)) model_fields = auditlog.get_model_fields(new._meta.model) else: fields = set() model_fields = None # Check if fields must be filtered if model_fields and (model_fields['include_fields'] or model_fields['exclude_fields']) and fields: filtered_fields = [] if model_fields['include_fields']: filtered_fields = [field for field in fields if field.name in model_fields['include_fields']] else: filtered_fields = fields if model_fields['exclude_fields']: filtered_fields = [field for field in filtered_fields if field.name not in model_fields['exclude_fields']] fields = filtered_fields for field in fields: old_value = get_field_value(old, field) new_value = get_field_value(new, field) if old_value != new_value: diff[field.name] = (smart_text(old_value), smart_text(new_value)) if len(diff) == 0: diff = None return diff
[ "def", "model_instance_diff", "(", "old", ",", "new", ")", ":", "from", "auditlog", ".", "registry", "import", "auditlog", "if", "not", "(", "old", "is", "None", "or", "isinstance", "(", "old", ",", "Model", ")", ")", ":", "raise", "TypeError", "(", "\"The supplied old instance is not a valid model instance.\"", ")", "if", "not", "(", "new", "is", "None", "or", "isinstance", "(", "new", ",", "Model", ")", ")", ":", "raise", "TypeError", "(", "\"The supplied new instance is not a valid model instance.\"", ")", "diff", "=", "{", "}", "if", "old", "is", "not", "None", "and", "new", "is", "not", "None", ":", "fields", "=", "set", "(", "old", ".", "_meta", ".", "fields", "+", "new", ".", "_meta", ".", "fields", ")", "model_fields", "=", "auditlog", ".", "get_model_fields", "(", "new", ".", "_meta", ".", "model", ")", "elif", "old", "is", "not", "None", ":", "fields", "=", "set", "(", "get_fields_in_model", "(", "old", ")", ")", "model_fields", "=", "auditlog", ".", "get_model_fields", "(", "old", ".", "_meta", ".", "model", ")", "elif", "new", "is", "not", "None", ":", "fields", "=", "set", "(", "get_fields_in_model", "(", "new", ")", ")", "model_fields", "=", "auditlog", ".", "get_model_fields", "(", "new", ".", "_meta", ".", "model", ")", "else", ":", "fields", "=", "set", "(", ")", "model_fields", "=", "None", "# Check if fields must be filtered", "if", "model_fields", "and", "(", "model_fields", "[", "'include_fields'", "]", "or", "model_fields", "[", "'exclude_fields'", "]", ")", "and", "fields", ":", "filtered_fields", "=", "[", "]", "if", "model_fields", "[", "'include_fields'", "]", ":", "filtered_fields", "=", "[", "field", "for", "field", "in", "fields", "if", "field", ".", "name", "in", "model_fields", "[", "'include_fields'", "]", "]", "else", ":", "filtered_fields", "=", "fields", "if", "model_fields", "[", "'exclude_fields'", "]", ":", "filtered_fields", "=", "[", "field", "for", "field", "in", "filtered_fields", "if", "field", ".", "name", "not", "in", "model_fields", "[", "'exclude_fields'", "]", "]", "fields", "=", "filtered_fields", "for", "field", "in", "fields", ":", "old_value", "=", "get_field_value", "(", "old", ",", "field", ")", "new_value", "=", "get_field_value", "(", "new", ",", "field", ")", "if", "old_value", "!=", "new_value", ":", "diff", "[", "field", ".", "name", "]", "=", "(", "smart_text", "(", "old_value", ")", ",", "smart_text", "(", "new_value", ")", ")", "if", "len", "(", "diff", ")", "==", "0", ":", "diff", "=", "None", "return", "diff" ]
38.728814
23.779661
def unique(seq, key=None, return_as=None): """Unique the seq and keep the order. Instead of the slow way: `lambda seq: (x for index, x in enumerate(seq) if seq.index(x)==index)` :param seq: raw sequence. :param return_as: generator for default, or list / set / str... >>> from torequests.utils import unique >>> a = [1,2,3,4,2,3,4] >>> unique(a) <generator object unique.<locals>.<genexpr> at 0x05720EA0> >>> unique(a, str) '1234' >>> unique(a, list) [1, 2, 3, 4] """ seen = set() add = seen.add if key: generator = (x for x in seq if key(x) not in seen and not add(key(x))) else: generator = (x for x in seq if x not in seen and not add(x)) if return_as: if return_as == str: return "".join(map(str, generator)) else: return return_as(generator) else: # python2 not support yield from return generator
[ "def", "unique", "(", "seq", ",", "key", "=", "None", ",", "return_as", "=", "None", ")", ":", "seen", "=", "set", "(", ")", "add", "=", "seen", ".", "add", "if", "key", ":", "generator", "=", "(", "x", "for", "x", "in", "seq", "if", "key", "(", "x", ")", "not", "in", "seen", "and", "not", "add", "(", "key", "(", "x", ")", ")", ")", "else", ":", "generator", "=", "(", "x", "for", "x", "in", "seq", "if", "x", "not", "in", "seen", "and", "not", "add", "(", "x", ")", ")", "if", "return_as", ":", "if", "return_as", "==", "str", ":", "return", "\"\"", ".", "join", "(", "map", "(", "str", ",", "generator", ")", ")", "else", ":", "return", "return_as", "(", "generator", ")", "else", ":", "# python2 not support yield from", "return", "generator" ]
29.0625
20.3125
def add_scope(self, scope_type, scope_name, scope_start, is_method=False): """we identified a scope and add it to positions.""" if self._curr is not None: self._curr['end'] = scope_start - 1 # close last scope self._curr = { 'type': scope_type, 'name': scope_name, 'start': scope_start, 'end': scope_start } if is_method and self._positions: last = self._positions[-1] if not 'methods' in last: last['methods'] = [] last['methods'].append(self._curr) else: self._positions.append(self._curr)
[ "def", "add_scope", "(", "self", ",", "scope_type", ",", "scope_name", ",", "scope_start", ",", "is_method", "=", "False", ")", ":", "if", "self", ".", "_curr", "is", "not", "None", ":", "self", ".", "_curr", "[", "'end'", "]", "=", "scope_start", "-", "1", "# close last scope", "self", ".", "_curr", "=", "{", "'type'", ":", "scope_type", ",", "'name'", ":", "scope_name", ",", "'start'", ":", "scope_start", ",", "'end'", ":", "scope_start", "}", "if", "is_method", "and", "self", ".", "_positions", ":", "last", "=", "self", ".", "_positions", "[", "-", "1", "]", "if", "not", "'methods'", "in", "last", ":", "last", "[", "'methods'", "]", "=", "[", "]", "last", "[", "'methods'", "]", ".", "append", "(", "self", ".", "_curr", ")", "else", ":", "self", ".", "_positions", ".", "append", "(", "self", ".", "_curr", ")" ]
39.125
14.25
def extract_header_comment_key_value_tuples_from_file(file_descriptor): """ Extracts tuples representing comments and localization entries from strings file. Args: file_descriptor (file): The file to read the tuples from Returns: list : List of tuples representing the headers and localization entries. """ file_data = file_descriptor.read() findall_result = re.findall(HEADER_COMMENT_KEY_VALUE_TUPLES_REGEX, file_data, re.MULTILINE | re.DOTALL) returned_list = [] for header_comment, _ignored, raw_comments, key, value in findall_result: comments = re.findall("/\* (.*?) \*/", raw_comments) if len(comments) == 0: comments = [u""] returned_list.append((header_comment, comments, key, value)) return returned_list
[ "def", "extract_header_comment_key_value_tuples_from_file", "(", "file_descriptor", ")", ":", "file_data", "=", "file_descriptor", ".", "read", "(", ")", "findall_result", "=", "re", ".", "findall", "(", "HEADER_COMMENT_KEY_VALUE_TUPLES_REGEX", ",", "file_data", ",", "re", ".", "MULTILINE", "|", "re", ".", "DOTALL", ")", "returned_list", "=", "[", "]", "for", "header_comment", ",", "_ignored", ",", "raw_comments", ",", "key", ",", "value", "in", "findall_result", ":", "comments", "=", "re", ".", "findall", "(", "\"/\\* (.*?) \\*/\"", ",", "raw_comments", ")", "if", "len", "(", "comments", ")", "==", "0", ":", "comments", "=", "[", "u\"\"", "]", "returned_list", ".", "append", "(", "(", "header_comment", ",", "comments", ",", "key", ",", "value", ")", ")", "return", "returned_list" ]
37.428571
26.857143
def dostilt(s, bed_az, bed_dip): """ Rotates "s" tensor to stratigraphic coordinates Parameters __________ s : [x11,x22,x33,x12,x23,x13] - the six tensor elements bed_az : bedding dip direction bed_dip : bedding dip Return s_rot : [x11,x22,x33,x12,x23,x13] - after rotation """ tau, Vdirs = doseigs(s) Vrot = [] for evec in Vdirs: d, i = dotilt(evec[0], evec[1], bed_az, bed_dip) Vrot.append([d, i]) s_rot = doeigs_s(tau, Vrot) return s_rot
[ "def", "dostilt", "(", "s", ",", "bed_az", ",", "bed_dip", ")", ":", "tau", ",", "Vdirs", "=", "doseigs", "(", "s", ")", "Vrot", "=", "[", "]", "for", "evec", "in", "Vdirs", ":", "d", ",", "i", "=", "dotilt", "(", "evec", "[", "0", "]", ",", "evec", "[", "1", "]", ",", "bed_az", ",", "bed_dip", ")", "Vrot", ".", "append", "(", "[", "d", ",", "i", "]", ")", "s_rot", "=", "doeigs_s", "(", "tau", ",", "Vrot", ")", "return", "s_rot" ]
23.809524
18.761905
def get_queries_batch(self, query_get_request, project): """GetQueriesBatch. [Preview API] Gets a list of queries by ids (Maximum 1000) :param :class:`<QueryBatchGetRequest> <azure.devops.v5_1.work_item_tracking.models.QueryBatchGetRequest>` query_get_request: :param str project: Project ID or project name :rtype: [QueryHierarchyItem] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(query_get_request, 'QueryBatchGetRequest') response = self._send(http_method='POST', location_id='549816f9-09b0-4e75-9e81-01fbfcd07426', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('[QueryHierarchyItem]', self._unwrap_collection(response))
[ "def", "get_queries_batch", "(", "self", ",", "query_get_request", ",", "project", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "query_get_request", ",", "'QueryBatchGetRequest'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'549816f9-09b0-4e75-9e81-01fbfcd07426'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'[QueryHierarchyItem]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
57.647059
23.411765
def main(args=sys.argv): """ Main command-line invocation. """ try: opts, args = getopt.gnu_getopt(args[1:], 'p:o:jdt', [ 'jspath=', 'output=', 'private', 'json', 'dependencies', 'test', 'help']) opts = dict(opts) except getopt.GetoptError: usage() sys.exit(2) run_and_exit_if(opts, run_doctests, '--test') run_and_exit_if(opts, usage, '--help') js_paths = get_path_list(opts) docs = CodeBaseDoc(js_paths, '--private' in opts) if args: selected_files = set(docs.keys()) & set(args) else: selected_files = list(docs.keys()) def print_json(): print(docs.to_json(selected_files)) run_and_exit_if(opts, print_json, '--json', '-j') def print_dependencies(): for dependency in find_dependencies(selected_files, docs): print(dependency) run_and_exit_if(opts, print_dependencies, '--dependencies', '-d') output = opts.get('--output') or opts.get('-o') if output is None and len(args) != 1: output = 'apidocs' docs.save_docs(selected_files, output)
[ "def", "main", "(", "args", "=", "sys", ".", "argv", ")", ":", "try", ":", "opts", ",", "args", "=", "getopt", ".", "gnu_getopt", "(", "args", "[", "1", ":", "]", ",", "'p:o:jdt'", ",", "[", "'jspath='", ",", "'output='", ",", "'private'", ",", "'json'", ",", "'dependencies'", ",", "'test'", ",", "'help'", "]", ")", "opts", "=", "dict", "(", "opts", ")", "except", "getopt", ".", "GetoptError", ":", "usage", "(", ")", "sys", ".", "exit", "(", "2", ")", "run_and_exit_if", "(", "opts", ",", "run_doctests", ",", "'--test'", ")", "run_and_exit_if", "(", "opts", ",", "usage", ",", "'--help'", ")", "js_paths", "=", "get_path_list", "(", "opts", ")", "docs", "=", "CodeBaseDoc", "(", "js_paths", ",", "'--private'", "in", "opts", ")", "if", "args", ":", "selected_files", "=", "set", "(", "docs", ".", "keys", "(", ")", ")", "&", "set", "(", "args", ")", "else", ":", "selected_files", "=", "list", "(", "docs", ".", "keys", "(", ")", ")", "def", "print_json", "(", ")", ":", "print", "(", "docs", ".", "to_json", "(", "selected_files", ")", ")", "run_and_exit_if", "(", "opts", ",", "print_json", ",", "'--json'", ",", "'-j'", ")", "def", "print_dependencies", "(", ")", ":", "for", "dependency", "in", "find_dependencies", "(", "selected_files", ",", "docs", ")", ":", "print", "(", "dependency", ")", "run_and_exit_if", "(", "opts", ",", "print_dependencies", ",", "'--dependencies'", ",", "'-d'", ")", "output", "=", "opts", ".", "get", "(", "'--output'", ")", "or", "opts", ".", "get", "(", "'-o'", ")", "if", "output", "is", "None", "and", "len", "(", "args", ")", "!=", "1", ":", "output", "=", "'apidocs'", "docs", ".", "save_docs", "(", "selected_files", ",", "output", ")" ]
30.277778
17.555556
def main(): """ NAME replace_AC_specimens.py DESCRIPTION finds anisotropy corrected data and replaces that specimen with it. puts in pmag_specimen format file SYNTAX replace_AC_specimens.py [command line options] OPTIONS -h prints help message and quits -i allows interactive setting of file names -fu TFILE uncorrected pmag_specimen format file with thellier interpretations created by thellier_magic_redo.py -fc AFILE anisotropy corrected pmag_specimen format file created by thellier_magic_redo.py -F FILE pmag_specimens format output file DEFAULTS TFILE: thellier_specimens.txt AFILE: AC_specimens.txt FILE: TorAC_specimens.txt """ dir_path='.' tspec="thellier_specimens.txt" aspec="AC_specimens.txt" ofile="TorAC_specimens.txt" critfile="pmag_criteria.txt" ACSamplist,Samplist,sigmin=[],[],10000 GoodSamps,SpecOuts=[],[] # get arguments from command line if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-fu' in sys.argv: ind=sys.argv.index('-fu') tspec=sys.argv[ind+1] if '-fc' in sys.argv: ind=sys.argv.index('-fc') aspec=sys.argv[ind+1] if '-F' in sys.argv: ind=sys.argv.index('-F') ofile=sys.argv[ind+1] if '-WD' in sys.argv: ind=sys.argv.index('-WD') dir_path=sys.argv[ind+1] # read in pmag_specimens file tspec=dir_path+'/'+tspec aspec=dir_path+'/'+aspec ofile=dir_path+'/'+ofile Specs,file_type=pmag.magic_read(tspec) Specs,file_type=pmag.magic_read(tspec) Speclist=pmag.get_specs(Specs) ACSpecs,file_type=pmag.magic_read(aspec) ACspeclist=pmag.get_specs(ACSpecs) for spec in Specs: if spec["er_sample_name"] not in Samplist:Samplist.append(spec["er_sample_name"]) for spec in ACSpecs: if spec["er_sample_name"] not in ACSamplist:ACSamplist.append(spec["er_sample_name"]) # for samp in Samplist: useAC,Ints,ACInts,GoodSpecs,AC,UC=0,[],[],[],[],[] for spec in Specs: if spec["er_sample_name"].lower()==samp.lower(): UC.append(spec) if samp in ACSamplist: for spec in ACSpecs: if spec["er_sample_name"].lower()==samp.lower(): AC.append(spec) if len(AC)>0: AClist=[] for spec in AC: SpecOuts.append(spec) AClist.append(spec['er_specimen_name']) print('using AC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))) for spec in UC: if spec['er_specimen_name'] not in AClist: SpecOuts.append(spec) # print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int'])) else: for spec in UC: SpecOuts.append(spec) # print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int'])) SpecOuts,keys=pmag.fillkeys(SpecOuts) pmag.magic_write(ofile,SpecOuts,'pmag_specimens') print('thellier data assessed for AC correction put in ', ofile)
[ "def", "main", "(", ")", ":", "dir_path", "=", "'.'", "tspec", "=", "\"thellier_specimens.txt\"", "aspec", "=", "\"AC_specimens.txt\"", "ofile", "=", "\"TorAC_specimens.txt\"", "critfile", "=", "\"pmag_criteria.txt\"", "ACSamplist", ",", "Samplist", ",", "sigmin", "=", "[", "]", ",", "[", "]", ",", "10000", "GoodSamps", ",", "SpecOuts", "=", "[", "]", ",", "[", "]", "# get arguments from command line", "if", "'-h'", "in", "sys", ".", "argv", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-fu'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-fu'", ")", "tspec", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-fc'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-fc'", ")", "aspec", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-F'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-F'", ")", "ofile", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-WD'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-WD'", ")", "dir_path", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "# read in pmag_specimens file", "tspec", "=", "dir_path", "+", "'/'", "+", "tspec", "aspec", "=", "dir_path", "+", "'/'", "+", "aspec", "ofile", "=", "dir_path", "+", "'/'", "+", "ofile", "Specs", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "tspec", ")", "Specs", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "tspec", ")", "Speclist", "=", "pmag", ".", "get_specs", "(", "Specs", ")", "ACSpecs", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "aspec", ")", "ACspeclist", "=", "pmag", ".", "get_specs", "(", "ACSpecs", ")", "for", "spec", "in", "Specs", ":", "if", "spec", "[", "\"er_sample_name\"", "]", "not", "in", "Samplist", ":", "Samplist", ".", "append", "(", "spec", "[", "\"er_sample_name\"", "]", ")", "for", "spec", "in", "ACSpecs", ":", "if", "spec", "[", "\"er_sample_name\"", "]", "not", "in", "ACSamplist", ":", "ACSamplist", ".", "append", "(", "spec", "[", "\"er_sample_name\"", "]", ")", "#", "for", "samp", "in", "Samplist", ":", "useAC", ",", "Ints", ",", "ACInts", ",", "GoodSpecs", ",", "AC", ",", "UC", "=", "0", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "for", "spec", "in", "Specs", ":", "if", "spec", "[", "\"er_sample_name\"", "]", ".", "lower", "(", ")", "==", "samp", ".", "lower", "(", ")", ":", "UC", ".", "append", "(", "spec", ")", "if", "samp", "in", "ACSamplist", ":", "for", "spec", "in", "ACSpecs", ":", "if", "spec", "[", "\"er_sample_name\"", "]", ".", "lower", "(", ")", "==", "samp", ".", "lower", "(", ")", ":", "AC", ".", "append", "(", "spec", ")", "if", "len", "(", "AC", ")", ">", "0", ":", "AClist", "=", "[", "]", "for", "spec", "in", "AC", ":", "SpecOuts", ".", "append", "(", "spec", ")", "AClist", ".", "append", "(", "spec", "[", "'er_specimen_name'", "]", ")", "print", "(", "'using AC: '", ",", "spec", "[", "'er_specimen_name'", "]", ",", "'%7.1f'", "%", "(", "1e6", "*", "float", "(", "spec", "[", "'specimen_int'", "]", ")", ")", ")", "for", "spec", "in", "UC", ":", "if", "spec", "[", "'er_specimen_name'", "]", "not", "in", "AClist", ":", "SpecOuts", ".", "append", "(", "spec", ")", "# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))", "else", ":", "for", "spec", "in", "UC", ":", "SpecOuts", ".", "append", "(", "spec", ")", "# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))", "SpecOuts", ",", "keys", "=", "pmag", ".", "fillkeys", "(", "SpecOuts", ")", "pmag", ".", "magic_write", "(", "ofile", ",", "SpecOuts", ",", "'pmag_specimens'", ")", "print", "(", "'thellier data assessed for AC correction put in '", ",", "ofile", ")" ]
35.32967
16.384615
def process_line( line, # type: Text filename, # type: str line_number, # type: int finder=None, # type: Optional[PackageFinder] comes_from=None, # type: Optional[str] options=None, # type: Optional[optparse.Values] session=None, # type: Optional[PipSession] wheel_cache=None, # type: Optional[WheelCache] use_pep517=None, # type: Optional[bool] constraint=False # type: bool ): # type: (...) -> Iterator[InstallRequirement] """Process a single requirements line; This can result in creating/yielding requirements, or updating the finder. For lines that contain requirements, the only options that have an effect are from SUPPORTED_OPTIONS_REQ, and they are scoped to the requirement. Other options from SUPPORTED_OPTIONS may be present, but are ignored. For lines that do not contain requirements, the only options that have an effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may be present, but are ignored. These lines may contain multiple options (although our docs imply only one is supported), and all our parsed and affect the finder. :param constraint: If True, parsing a constraints file. :param options: OptionParser options that we may update """ parser = build_parser(line) defaults = parser.get_default_values() defaults.index_url = None if finder: defaults.format_control = finder.format_control args_str, options_str = break_args_options(line) # Prior to 2.7.3, shlex cannot deal with unicode entries if sys.version_info < (2, 7, 3): # https://github.com/python/mypy/issues/1174 options_str = options_str.encode('utf8') # type: ignore # https://github.com/python/mypy/issues/1174 opts, _ = parser.parse_args( shlex.split(options_str), defaults) # type: ignore # preserve for the nested code path line_comes_from = '%s %s (line %s)' % ( '-c' if constraint else '-r', filename, line_number, ) # yield a line requirement if args_str: isolated = options.isolated_mode if options else False if options: cmdoptions.check_install_build_global(options, opts) # get the options that apply to requirements req_options = {} for dest in SUPPORTED_OPTIONS_REQ_DEST: if dest in opts.__dict__ and opts.__dict__[dest]: req_options[dest] = opts.__dict__[dest] yield install_req_from_line( args_str, line_comes_from, constraint=constraint, use_pep517=use_pep517, isolated=isolated, options=req_options, wheel_cache=wheel_cache ) # yield an editable requirement elif opts.editables: isolated = options.isolated_mode if options else False yield install_req_from_editable( opts.editables[0], comes_from=line_comes_from, use_pep517=use_pep517, constraint=constraint, isolated=isolated, wheel_cache=wheel_cache ) # parse a nested requirements file elif opts.requirements or opts.constraints: if opts.requirements: req_path = opts.requirements[0] nested_constraint = False else: req_path = opts.constraints[0] nested_constraint = True # original file is over http if SCHEME_RE.search(filename): # do a url join so relative paths work req_path = urllib_parse.urljoin(filename, req_path) # original file and nested file are paths elif not SCHEME_RE.search(req_path): # do a join so relative paths work req_path = os.path.join(os.path.dirname(filename), req_path) # TODO: Why not use `comes_from='-r {} (line {})'` here as well? parsed_reqs = parse_requirements( req_path, finder, comes_from, options, session, constraint=nested_constraint, wheel_cache=wheel_cache ) for req in parsed_reqs: yield req # percolate hash-checking option upward elif opts.require_hashes: options.require_hashes = opts.require_hashes # set finder options elif finder: if opts.index_url: finder.index_urls = [opts.index_url] if opts.no_index is True: finder.index_urls = [] if opts.extra_index_urls: finder.index_urls.extend(opts.extra_index_urls) if opts.find_links: # FIXME: it would be nice to keep track of the source # of the find_links: support a find-links local path # relative to a requirements file. value = opts.find_links[0] req_dir = os.path.dirname(os.path.abspath(filename)) relative_to_reqs_file = os.path.join(req_dir, value) if os.path.exists(relative_to_reqs_file): value = relative_to_reqs_file finder.find_links.append(value) if opts.pre: finder.allow_all_prereleases = True if opts.trusted_hosts: finder.secure_origins.extend( ("*", host, "*") for host in opts.trusted_hosts)
[ "def", "process_line", "(", "line", ",", "# type: Text", "filename", ",", "# type: str", "line_number", ",", "# type: int", "finder", "=", "None", ",", "# type: Optional[PackageFinder]", "comes_from", "=", "None", ",", "# type: Optional[str]", "options", "=", "None", ",", "# type: Optional[optparse.Values]", "session", "=", "None", ",", "# type: Optional[PipSession]", "wheel_cache", "=", "None", ",", "# type: Optional[WheelCache]", "use_pep517", "=", "None", ",", "# type: Optional[bool]", "constraint", "=", "False", "# type: bool", ")", ":", "# type: (...) -> Iterator[InstallRequirement]", "parser", "=", "build_parser", "(", "line", ")", "defaults", "=", "parser", ".", "get_default_values", "(", ")", "defaults", ".", "index_url", "=", "None", "if", "finder", ":", "defaults", ".", "format_control", "=", "finder", ".", "format_control", "args_str", ",", "options_str", "=", "break_args_options", "(", "line", ")", "# Prior to 2.7.3, shlex cannot deal with unicode entries", "if", "sys", ".", "version_info", "<", "(", "2", ",", "7", ",", "3", ")", ":", "# https://github.com/python/mypy/issues/1174", "options_str", "=", "options_str", ".", "encode", "(", "'utf8'", ")", "# type: ignore", "# https://github.com/python/mypy/issues/1174", "opts", ",", "_", "=", "parser", ".", "parse_args", "(", "shlex", ".", "split", "(", "options_str", ")", ",", "defaults", ")", "# type: ignore", "# preserve for the nested code path", "line_comes_from", "=", "'%s %s (line %s)'", "%", "(", "'-c'", "if", "constraint", "else", "'-r'", ",", "filename", ",", "line_number", ",", ")", "# yield a line requirement", "if", "args_str", ":", "isolated", "=", "options", ".", "isolated_mode", "if", "options", "else", "False", "if", "options", ":", "cmdoptions", ".", "check_install_build_global", "(", "options", ",", "opts", ")", "# get the options that apply to requirements", "req_options", "=", "{", "}", "for", "dest", "in", "SUPPORTED_OPTIONS_REQ_DEST", ":", "if", "dest", "in", "opts", ".", "__dict__", "and", "opts", ".", "__dict__", "[", "dest", "]", ":", "req_options", "[", "dest", "]", "=", "opts", ".", "__dict__", "[", "dest", "]", "yield", "install_req_from_line", "(", "args_str", ",", "line_comes_from", ",", "constraint", "=", "constraint", ",", "use_pep517", "=", "use_pep517", ",", "isolated", "=", "isolated", ",", "options", "=", "req_options", ",", "wheel_cache", "=", "wheel_cache", ")", "# yield an editable requirement", "elif", "opts", ".", "editables", ":", "isolated", "=", "options", ".", "isolated_mode", "if", "options", "else", "False", "yield", "install_req_from_editable", "(", "opts", ".", "editables", "[", "0", "]", ",", "comes_from", "=", "line_comes_from", ",", "use_pep517", "=", "use_pep517", ",", "constraint", "=", "constraint", ",", "isolated", "=", "isolated", ",", "wheel_cache", "=", "wheel_cache", ")", "# parse a nested requirements file", "elif", "opts", ".", "requirements", "or", "opts", ".", "constraints", ":", "if", "opts", ".", "requirements", ":", "req_path", "=", "opts", ".", "requirements", "[", "0", "]", "nested_constraint", "=", "False", "else", ":", "req_path", "=", "opts", ".", "constraints", "[", "0", "]", "nested_constraint", "=", "True", "# original file is over http", "if", "SCHEME_RE", ".", "search", "(", "filename", ")", ":", "# do a url join so relative paths work", "req_path", "=", "urllib_parse", ".", "urljoin", "(", "filename", ",", "req_path", ")", "# original file and nested file are paths", "elif", "not", "SCHEME_RE", ".", "search", "(", "req_path", ")", ":", "# do a join so relative paths work", "req_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ",", "req_path", ")", "# TODO: Why not use `comes_from='-r {} (line {})'` here as well?", "parsed_reqs", "=", "parse_requirements", "(", "req_path", ",", "finder", ",", "comes_from", ",", "options", ",", "session", ",", "constraint", "=", "nested_constraint", ",", "wheel_cache", "=", "wheel_cache", ")", "for", "req", "in", "parsed_reqs", ":", "yield", "req", "# percolate hash-checking option upward", "elif", "opts", ".", "require_hashes", ":", "options", ".", "require_hashes", "=", "opts", ".", "require_hashes", "# set finder options", "elif", "finder", ":", "if", "opts", ".", "index_url", ":", "finder", ".", "index_urls", "=", "[", "opts", ".", "index_url", "]", "if", "opts", ".", "no_index", "is", "True", ":", "finder", ".", "index_urls", "=", "[", "]", "if", "opts", ".", "extra_index_urls", ":", "finder", ".", "index_urls", ".", "extend", "(", "opts", ".", "extra_index_urls", ")", "if", "opts", ".", "find_links", ":", "# FIXME: it would be nice to keep track of the source", "# of the find_links: support a find-links local path", "# relative to a requirements file.", "value", "=", "opts", ".", "find_links", "[", "0", "]", "req_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", "relative_to_reqs_file", "=", "os", ".", "path", ".", "join", "(", "req_dir", ",", "value", ")", "if", "os", ".", "path", ".", "exists", "(", "relative_to_reqs_file", ")", ":", "value", "=", "relative_to_reqs_file", "finder", ".", "find_links", ".", "append", "(", "value", ")", "if", "opts", ".", "pre", ":", "finder", ".", "allow_all_prereleases", "=", "True", "if", "opts", ".", "trusted_hosts", ":", "finder", ".", "secure_origins", ".", "extend", "(", "(", "\"*\"", ",", "host", ",", "\"*\"", ")", "for", "host", "in", "opts", ".", "trusted_hosts", ")" ]
40.456
16.744
def dehtml(text): '''Remove HTML tag in input text and format the texts accordingly. ''' # added by BoPeng to handle html output from kernel # # Do not understand why, but I cannot define the class outside of the # function. try: # python 2 from HTMLParser import HTMLParser except ImportError: # python 3 from html.parser import HTMLParser # added by BoPeng to handle html output from kernel class _DeHTMLParser(HTMLParser): '''This parser analyzes input text, removes HTML tags such as <p>, <br>, <ul>, <li> etc and returns properly formatted texts. ''' def __init__(self): HTMLParser.__init__(self) self.__text = [] def handle_data(self, data): text = data.strip() if len(text) > 0: text = re.sub('[ \t\r\n]+', ' ', text) self.__text.append(text + ' ') def handle_starttag(self, tag, attrs): if tag == 'p': self.__text.append('\n\n\n\n') elif tag == 'br': self.__text.append('\n\n') elif tag == 'ul': self.__text.append('') elif tag == 'li': self.__text.append('\n\n * ') def handle_endtag(self, tag): if tag == 'ul': self.__text.append('\n\n') if tag == 'li': self.__text.append('\n\n') def handle_startendtag(self, tag, attrs): if tag == 'br': self.__text.append('\n\n') def text(self): return ''.join(self.__text).strip() try: parser = _DeHTMLParser() parser.feed(text) parser.close() return parser.text() except Exception as e: return text
[ "def", "dehtml", "(", "text", ")", ":", "# added by BoPeng to handle html output from kernel", "#", "# Do not understand why, but I cannot define the class outside of the", "# function.", "try", ":", "# python 2", "from", "HTMLParser", "import", "HTMLParser", "except", "ImportError", ":", "# python 3", "from", "html", ".", "parser", "import", "HTMLParser", "# added by BoPeng to handle html output from kernel", "class", "_DeHTMLParser", "(", "HTMLParser", ")", ":", "'''This parser analyzes input text, removes HTML tags such as\n <p>, <br>, <ul>, <li> etc and returns properly formatted texts.\n '''", "def", "__init__", "(", "self", ")", ":", "HTMLParser", ".", "__init__", "(", "self", ")", "self", ".", "__text", "=", "[", "]", "def", "handle_data", "(", "self", ",", "data", ")", ":", "text", "=", "data", ".", "strip", "(", ")", "if", "len", "(", "text", ")", ">", "0", ":", "text", "=", "re", ".", "sub", "(", "'[ \\t\\r\\n]+'", ",", "' '", ",", "text", ")", "self", ".", "__text", ".", "append", "(", "text", "+", "' '", ")", "def", "handle_starttag", "(", "self", ",", "tag", ",", "attrs", ")", ":", "if", "tag", "==", "'p'", ":", "self", ".", "__text", ".", "append", "(", "'\\n\\n\\n\\n'", ")", "elif", "tag", "==", "'br'", ":", "self", ".", "__text", ".", "append", "(", "'\\n\\n'", ")", "elif", "tag", "==", "'ul'", ":", "self", ".", "__text", ".", "append", "(", "''", ")", "elif", "tag", "==", "'li'", ":", "self", ".", "__text", ".", "append", "(", "'\\n\\n * '", ")", "def", "handle_endtag", "(", "self", ",", "tag", ")", ":", "if", "tag", "==", "'ul'", ":", "self", ".", "__text", ".", "append", "(", "'\\n\\n'", ")", "if", "tag", "==", "'li'", ":", "self", ".", "__text", ".", "append", "(", "'\\n\\n'", ")", "def", "handle_startendtag", "(", "self", ",", "tag", ",", "attrs", ")", ":", "if", "tag", "==", "'br'", ":", "self", ".", "__text", ".", "append", "(", "'\\n\\n'", ")", "def", "text", "(", "self", ")", ":", "return", "''", ".", "join", "(", "self", ".", "__text", ")", ".", "strip", "(", ")", "try", ":", "parser", "=", "_DeHTMLParser", "(", ")", "parser", ".", "feed", "(", "text", ")", "parser", ".", "close", "(", ")", "return", "parser", ".", "text", "(", ")", "except", "Exception", "as", "e", ":", "return", "text" ]
30.62069
16.448276
def _load(self, config): """ Load this config from an existing config Parameters: ----------- config : filename, config object, or dict to load Returns: -------- params : configuration parameters """ if isstring(config): self.filename = config params = yaml.load(open(config)) elif isinstance(config, Config): # This is the copy constructor... self.filename = config.filename params = copy.deepcopy(config) elif isinstance(config, dict): params = copy.deepcopy(config) elif config is None: params = {} else: raise Exception('Unrecognized input') return params
[ "def", "_load", "(", "self", ",", "config", ")", ":", "if", "isstring", "(", "config", ")", ":", "self", ".", "filename", "=", "config", "params", "=", "yaml", ".", "load", "(", "open", "(", "config", ")", ")", "elif", "isinstance", "(", "config", ",", "Config", ")", ":", "# This is the copy constructor...", "self", ".", "filename", "=", "config", ".", "filename", "params", "=", "copy", ".", "deepcopy", "(", "config", ")", "elif", "isinstance", "(", "config", ",", "dict", ")", ":", "params", "=", "copy", ".", "deepcopy", "(", "config", ")", "elif", "config", "is", "None", ":", "params", "=", "{", "}", "else", ":", "raise", "Exception", "(", "'Unrecognized input'", ")", "return", "params" ]
28.692308
14
def header_length(header): """Calculates the ciphertext message header length, given a complete header. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :rtype: int """ # Because encrypted data key lengths may not be knowable until the ciphertext # is received from the providers, just serialize the header directly. header_length = len(serialize_header(header)) header_length += header.algorithm.iv_len # Header Authentication IV header_length += header.algorithm.auth_len # Header Authentication Tag return header_length
[ "def", "header_length", "(", "header", ")", ":", "# Because encrypted data key lengths may not be knowable until the ciphertext", "# is received from the providers, just serialize the header directly.", "header_length", "=", "len", "(", "serialize_header", "(", "header", ")", ")", "header_length", "+=", "header", ".", "algorithm", ".", "iv_len", "# Header Authentication IV", "header_length", "+=", "header", ".", "algorithm", ".", "auth_len", "# Header Authentication Tag", "return", "header_length" ]
47.153846
21.230769
def validate_metadata_token(self, claims, endpoint): """ If the token endpoint is used in the grant type, the value of this parameter MUST be the same as the value of the "grant_type" parameter passed to the token endpoint defined in the grant type definition. """ self._grant_types.extend(endpoint._grant_types.keys()) claims.setdefault("token_endpoint_auth_methods_supported", ["client_secret_post", "client_secret_basic"]) self.validate_metadata(claims, "token_endpoint_auth_methods_supported", is_list=True) self.validate_metadata(claims, "token_endpoint_auth_signing_alg_values_supported", is_list=True) self.validate_metadata(claims, "token_endpoint", is_required=True, is_url=True)
[ "def", "validate_metadata_token", "(", "self", ",", "claims", ",", "endpoint", ")", ":", "self", ".", "_grant_types", ".", "extend", "(", "endpoint", ".", "_grant_types", ".", "keys", "(", ")", ")", "claims", ".", "setdefault", "(", "\"token_endpoint_auth_methods_supported\"", ",", "[", "\"client_secret_post\"", ",", "\"client_secret_basic\"", "]", ")", "self", ".", "validate_metadata", "(", "claims", ",", "\"token_endpoint_auth_methods_supported\"", ",", "is_list", "=", "True", ")", "self", ".", "validate_metadata", "(", "claims", ",", "\"token_endpoint_auth_signing_alg_values_supported\"", ",", "is_list", "=", "True", ")", "self", ".", "validate_metadata", "(", "claims", ",", "\"token_endpoint\"", ",", "is_required", "=", "True", ",", "is_url", "=", "True", ")" ]
58.846154
32.692308
def smart_str(s, encoding='utf-8', errors='strict'): """ Returns a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects. from django """ if not isinstance(s, basestring): try: return str(s) except UnicodeEncodeError: if isinstance(s, Exception): # An Exception subclass containing non-ASCII data that doesn't # know how to print itself properly. We shouldn't raise a # further exception. return ' '.join([smart_str(arg, encoding, errors) for arg in s]) return unicode(s).encode(encoding, errors) elif isinstance(s, unicode): return s.encode(encoding, errors) elif s and encoding != 'utf-8': return s.decode('utf-8', errors).encode(encoding, errors) else: return s
[ "def", "smart_str", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "if", "not", "isinstance", "(", "s", ",", "basestring", ")", ":", "try", ":", "return", "str", "(", "s", ")", "except", "UnicodeEncodeError", ":", "if", "isinstance", "(", "s", ",", "Exception", ")", ":", "# An Exception subclass containing non-ASCII data that doesn't", "# know how to print itself properly. We shouldn't raise a", "# further exception.", "return", "' '", ".", "join", "(", "[", "smart_str", "(", "arg", ",", "encoding", ",", "errors", ")", "for", "arg", "in", "s", "]", ")", "return", "unicode", "(", "s", ")", ".", "encode", "(", "encoding", ",", "errors", ")", "elif", "isinstance", "(", "s", ",", "unicode", ")", ":", "return", "s", ".", "encode", "(", "encoding", ",", "errors", ")", "elif", "s", "and", "encoding", "!=", "'utf-8'", ":", "return", "s", ".", "decode", "(", "'utf-8'", ",", "errors", ")", ".", "encode", "(", "encoding", ",", "errors", ")", "else", ":", "return", "s" ]
37.416667
19.25
def http_proxy(self): """Set ivy to use an http proxy. Expects a string of the form http://<host>:<port> """ if os.getenv('HTTP_PROXY'): return os.getenv('HTTP_PROXY') if os.getenv('http_proxy'): return os.getenv('http_proxy') return self.get_options().http_proxy
[ "def", "http_proxy", "(", "self", ")", ":", "if", "os", ".", "getenv", "(", "'HTTP_PROXY'", ")", ":", "return", "os", ".", "getenv", "(", "'HTTP_PROXY'", ")", "if", "os", ".", "getenv", "(", "'http_proxy'", ")", ":", "return", "os", ".", "getenv", "(", "'http_proxy'", ")", "return", "self", ".", "get_options", "(", ")", ".", "http_proxy" ]
29.1
9.8
def xml(self, operator='set', indent = ""): """Serialize the metadata field to XML""" xml = indent + "<meta id=\"" + self.key + "\"" if operator != 'set': xml += " operator=\"" + operator + "\"" if not self.value: xml += " />" else: xml += ">" + self.value + "</meta>" return xml
[ "def", "xml", "(", "self", ",", "operator", "=", "'set'", ",", "indent", "=", "\"\"", ")", ":", "xml", "=", "indent", "+", "\"<meta id=\\\"\"", "+", "self", ".", "key", "+", "\"\\\"\"", "if", "operator", "!=", "'set'", ":", "xml", "+=", "\" operator=\\\"\"", "+", "operator", "+", "\"\\\"\"", "if", "not", "self", ".", "value", ":", "xml", "+=", "\" />\"", "else", ":", "xml", "+=", "\">\"", "+", "self", ".", "value", "+", "\"</meta>\"", "return", "xml" ]
35.4
12.5
def _to_add_with_category(self, catid): ''' Used for info2. :param catid: the uid of category ''' catinfo = MCategory.get_by_uid(catid) kwd = { 'uid': self._gen_uid(), 'userid': self.userinfo.user_name if self.userinfo else '', 'gcat0': catid, 'parentname': MCategory.get_by_uid(catinfo.pid).name, 'catname': MCategory.get_by_uid(catid).name, } self.render('autogen/add/add_{0}.html'.format(catid), userinfo=self.userinfo, kwd=kwd)
[ "def", "_to_add_with_category", "(", "self", ",", "catid", ")", ":", "catinfo", "=", "MCategory", ".", "get_by_uid", "(", "catid", ")", "kwd", "=", "{", "'uid'", ":", "self", ".", "_gen_uid", "(", ")", ",", "'userid'", ":", "self", ".", "userinfo", ".", "user_name", "if", "self", ".", "userinfo", "else", "''", ",", "'gcat0'", ":", "catid", ",", "'parentname'", ":", "MCategory", ".", "get_by_uid", "(", "catinfo", ".", "pid", ")", ".", "name", ",", "'catname'", ":", "MCategory", ".", "get_by_uid", "(", "catid", ")", ".", "name", ",", "}", "self", ".", "render", "(", "'autogen/add/add_{0}.html'", ".", "format", "(", "catid", ")", ",", "userinfo", "=", "self", ".", "userinfo", ",", "kwd", "=", "kwd", ")" ]
32.222222
19.111111
def until(coro, coro_test, assert_coro=None, *args, **kw): """ Repeatedly call `coro` coroutine function until `coro_test` returns `True`. This function is the inverse of `paco.whilst()`. This function is a coroutine. Arguments: coro (coroutinefunction): coroutine function to execute. coro_test (coroutinefunction): coroutine function to test. assert_coro (coroutinefunction): optional assertion coroutine used to determine if the test passed or not. *args (mixed): optional variadic arguments to pass to `coro` function. Raises: TypeError: if input arguments are invalid. Returns: list: result values returned by `coro`. Usage:: calls = 0 async def task(): nonlocal calls calls += 1 return calls async def calls_gt_4(): return calls > 4 await paco.until(task, calls_gt_4) # => [1, 2, 3, 4, 5] """ @asyncio.coroutine def assert_coro(value): return not value return (yield from whilst(coro, coro_test, assert_coro=assert_coro, *args, **kw))
[ "def", "until", "(", "coro", ",", "coro_test", ",", "assert_coro", "=", "None", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "@", "asyncio", ".", "coroutine", "def", "assert_coro", "(", "value", ")", ":", "return", "not", "value", "return", "(", "yield", "from", "whilst", "(", "coro", ",", "coro_test", ",", "assert_coro", "=", "assert_coro", ",", "*", "args", ",", "*", "*", "kw", ")", ")" ]
26.604651
23.72093
def init_app(self, app): """ Register this extension with the flask app :param app: A flask application """ # Save this so we can use it later in the extension if not hasattr(app, 'extensions'): # pragma: no cover app.extensions = {} app.extensions['flask-jwt-simple'] = self # Set all the default configurations for this extension self._set_default_configuration_options(app) self._set_error_handler_callbacks(app) # Set propagate exceptions, so all of our error handlers properly # work in production app.config['PROPAGATE_EXCEPTIONS'] = True
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "# Save this so we can use it later in the extension", "if", "not", "hasattr", "(", "app", ",", "'extensions'", ")", ":", "# pragma: no cover", "app", ".", "extensions", "=", "{", "}", "app", ".", "extensions", "[", "'flask-jwt-simple'", "]", "=", "self", "# Set all the default configurations for this extension", "self", ".", "_set_default_configuration_options", "(", "app", ")", "self", ".", "_set_error_handler_callbacks", "(", "app", ")", "# Set propagate exceptions, so all of our error handlers properly", "# work in production", "app", ".", "config", "[", "'PROPAGATE_EXCEPTIONS'", "]", "=", "True" ]
36
16.777778
def _new_connection_file(self): """ Generate a new connection file Taken from jupyter_client/console_app.py Licensed under the BSD license """ # Check if jupyter_runtime_dir exists (Spyder addition) if not osp.isdir(jupyter_runtime_dir()): try: os.makedirs(jupyter_runtime_dir()) except (IOError, OSError): return None cf = '' while not cf: ident = str(uuid.uuid4()).split('-')[-1] cf = os.path.join(jupyter_runtime_dir(), 'kernel-%s.json' % ident) cf = cf if not os.path.exists(cf) else '' return cf
[ "def", "_new_connection_file", "(", "self", ")", ":", "# Check if jupyter_runtime_dir exists (Spyder addition)\r", "if", "not", "osp", ".", "isdir", "(", "jupyter_runtime_dir", "(", ")", ")", ":", "try", ":", "os", ".", "makedirs", "(", "jupyter_runtime_dir", "(", ")", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "return", "None", "cf", "=", "''", "while", "not", "cf", ":", "ident", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", "cf", "=", "os", ".", "path", ".", "join", "(", "jupyter_runtime_dir", "(", ")", ",", "'kernel-%s.json'", "%", "ident", ")", "cf", "=", "cf", "if", "not", "os", ".", "path", ".", "exists", "(", "cf", ")", "else", "''", "return", "cf" ]
35.421053
14.157895
def get_all_migrations(path, databases=None): """ Returns a dictionary of database => [migrations] representing all migrations contained in ``path``. """ # database: [(number, full_path)] possible_migrations = defaultdict(list) try: in_directory = sorted(get_file_list(path)) except OSError: import traceback print "An error occurred while reading migrations from %r:" % path traceback.print_exc() return {} # Iterate through our results and discover which migrations are # actually runnable for full_path in in_directory: child_path, script = os.path.split(full_path) name, ext = os.path.splitext(script) # the database component is default if this is in the root directory # is <directory> if in a subdirectory if path == child_path: db = DEFAULT_DB_ALIAS else: db = os.path.split(child_path)[-1] # filter by database if set if databases and db not in databases: continue match = MIGRATION_NAME_RE.match(name) if match is None: raise MigrationError("Invalid migration file prefix %r " "(must begin with a number)" % name) number = int(match.group(1)) if ext in [".sql", ".py"]: possible_migrations[db].append((number, full_path)) return possible_migrations
[ "def", "get_all_migrations", "(", "path", ",", "databases", "=", "None", ")", ":", "# database: [(number, full_path)]", "possible_migrations", "=", "defaultdict", "(", "list", ")", "try", ":", "in_directory", "=", "sorted", "(", "get_file_list", "(", "path", ")", ")", "except", "OSError", ":", "import", "traceback", "print", "\"An error occurred while reading migrations from %r:\"", "%", "path", "traceback", ".", "print_exc", "(", ")", "return", "{", "}", "# Iterate through our results and discover which migrations are", "# actually runnable", "for", "full_path", "in", "in_directory", ":", "child_path", ",", "script", "=", "os", ".", "path", ".", "split", "(", "full_path", ")", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "script", ")", "# the database component is default if this is in the root directory", "# is <directory> if in a subdirectory", "if", "path", "==", "child_path", ":", "db", "=", "DEFAULT_DB_ALIAS", "else", ":", "db", "=", "os", ".", "path", ".", "split", "(", "child_path", ")", "[", "-", "1", "]", "# filter by database if set", "if", "databases", "and", "db", "not", "in", "databases", ":", "continue", "match", "=", "MIGRATION_NAME_RE", ".", "match", "(", "name", ")", "if", "match", "is", "None", ":", "raise", "MigrationError", "(", "\"Invalid migration file prefix %r \"", "\"(must begin with a number)\"", "%", "name", ")", "number", "=", "int", "(", "match", ".", "group", "(", "1", ")", ")", "if", "ext", "in", "[", "\".sql\"", ",", "\".py\"", "]", ":", "possible_migrations", "[", "db", "]", ".", "append", "(", "(", "number", ",", "full_path", ")", ")", "return", "possible_migrations" ]
33.581395
17.069767
def vowelinstem(self): """vowelinstem() is TRUE <=> k0,...j contains a vowel""" for i in range(self.k0, self.j + 1): if not self.cons(i): return 1 return 0
[ "def", "vowelinstem", "(", "self", ")", ":", "for", "i", "in", "range", "(", "self", ".", "k0", ",", "self", ".", "j", "+", "1", ")", ":", "if", "not", "self", ".", "cons", "(", "i", ")", ":", "return", "1", "return", "0" ]
34.5
11.333333
def _get_snpeff_transcript(self, transcript_info): """Create a transcript based on the snpeff annotation Args: transcript_info (dict): A dict with snpeff info Returns: transcript (puzzle.models.Transcript): A Transcripts """ transcript = Transcript( hgnc_symbol = transcript_info.get('Gene_Name'), transcript_id = transcript_info.get('Feature'), ensembl_id = transcript_info.get('Gene_ID'), biotype = transcript_info.get('Transcript_BioType'), consequence = transcript_info.get('Annotation'), exon = transcript_info.get('Rank'), HGVSc = transcript_info.get('HGVS.c'), HGVSp = transcript_info.get('HGVS.p') ) return transcript
[ "def", "_get_snpeff_transcript", "(", "self", ",", "transcript_info", ")", ":", "transcript", "=", "Transcript", "(", "hgnc_symbol", "=", "transcript_info", ".", "get", "(", "'Gene_Name'", ")", ",", "transcript_id", "=", "transcript_info", ".", "get", "(", "'Feature'", ")", ",", "ensembl_id", "=", "transcript_info", ".", "get", "(", "'Gene_ID'", ")", ",", "biotype", "=", "transcript_info", ".", "get", "(", "'Transcript_BioType'", ")", ",", "consequence", "=", "transcript_info", ".", "get", "(", "'Annotation'", ")", ",", "exon", "=", "transcript_info", ".", "get", "(", "'Rank'", ")", ",", "HGVSc", "=", "transcript_info", ".", "get", "(", "'HGVS.c'", ")", ",", "HGVSp", "=", "transcript_info", ".", "get", "(", "'HGVS.p'", ")", ")", "return", "transcript" ]
41.8
19.5
def create_new_project(): '''Creates a new XBMC Addon directory based on user input''' readline.parse_and_bind('tab: complete') print \ ''' xbmcswift2 - A micro-framework for creating XBMC plugins. xbmc@jonathanbeluch.com -- ''' print 'I\'m going to ask you a few questions to get this project' \ ' started.' opts = {} # Plugin Name opts['plugin_name'] = get_valid_value( 'What is your plugin name?', validate_nonblank ) # Plugin ID opts['plugin_id'] = get_valid_value( 'Enter your plugin id.', validate_pluginid, 'plugin.video.%s' % (opts['plugin_name'].lower().replace(' ', '')) ) # Parent Directory opts['parent_dir'] = get_valid_value( 'Enter parent folder (where to create project)', validate_isfolder, getcwd() ) opts['plugin_dir'] = os.path.join(opts['parent_dir'], opts['plugin_id']) assert not os.path.isdir(opts['plugin_dir']), \ 'A folder named %s already exists in %s.' % (opts['plugin_id'], opts['parent_dir']) # Provider opts['provider_name'] = get_valid_value( 'Enter provider name', validate_nonblank, ) # Create the project folder by copying over skel copytree(SKEL, opts['plugin_dir'], ignore=ignore_patterns('*.pyc')) # Walk through all the new files and fill in with out options for root, dirs, files in os.walk(opts['plugin_dir']): for filename in files: update_file(os.path.join(root, filename), opts) print 'Projects successfully created in %s.' % opts['plugin_dir'] print 'Done.'
[ "def", "create_new_project", "(", ")", ":", "readline", ".", "parse_and_bind", "(", "'tab: complete'", ")", "print", "'''\n xbmcswift2 - A micro-framework for creating XBMC plugins.\n xbmc@jonathanbeluch.com\n --\n'''", "print", "'I\\'m going to ask you a few questions to get this project'", "' started.'", "opts", "=", "{", "}", "# Plugin Name", "opts", "[", "'plugin_name'", "]", "=", "get_valid_value", "(", "'What is your plugin name?'", ",", "validate_nonblank", ")", "# Plugin ID", "opts", "[", "'plugin_id'", "]", "=", "get_valid_value", "(", "'Enter your plugin id.'", ",", "validate_pluginid", ",", "'plugin.video.%s'", "%", "(", "opts", "[", "'plugin_name'", "]", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "''", ")", ")", ")", "# Parent Directory", "opts", "[", "'parent_dir'", "]", "=", "get_valid_value", "(", "'Enter parent folder (where to create project)'", ",", "validate_isfolder", ",", "getcwd", "(", ")", ")", "opts", "[", "'plugin_dir'", "]", "=", "os", ".", "path", ".", "join", "(", "opts", "[", "'parent_dir'", "]", ",", "opts", "[", "'plugin_id'", "]", ")", "assert", "not", "os", ".", "path", ".", "isdir", "(", "opts", "[", "'plugin_dir'", "]", ")", ",", "'A folder named %s already exists in %s.'", "%", "(", "opts", "[", "'plugin_id'", "]", ",", "opts", "[", "'parent_dir'", "]", ")", "# Provider", "opts", "[", "'provider_name'", "]", "=", "get_valid_value", "(", "'Enter provider name'", ",", "validate_nonblank", ",", ")", "# Create the project folder by copying over skel", "copytree", "(", "SKEL", ",", "opts", "[", "'plugin_dir'", "]", ",", "ignore", "=", "ignore_patterns", "(", "'*.pyc'", ")", ")", "# Walk through all the new files and fill in with out options", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "opts", "[", "'plugin_dir'", "]", ")", ":", "for", "filename", "in", "files", ":", "update_file", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ",", "opts", ")", "print", "'Projects successfully created in %s.'", "%", "opts", "[", "'plugin_dir'", "]", "print", "'Done.'" ]
29.8
24.018182
def consume_arguments(self, argument_list): """ Takes arguments from a list while there are parameters that can accept them """ while True: argument_count = len(argument_list) for parameter in self.values(): argument_list = parameter.consume_arguments(argument_list) if len(argument_list) == argument_count: return argument_list
[ "def", "consume_arguments", "(", "self", ",", "argument_list", ")", ":", "while", "True", ":", "argument_count", "=", "len", "(", "argument_list", ")", "for", "parameter", "in", "self", ".", "values", "(", ")", ":", "argument_list", "=", "parameter", ".", "consume_arguments", "(", "argument_list", ")", "if", "len", "(", "argument_list", ")", "==", "argument_count", ":", "return", "argument_list" ]
41.9
12.7
def print_help(self): """Prints usage of all registered commands, collapsing aliases into one record """ seen_aliases = set() print('-'*80) for cmd in sorted(self.cmds): if cmd not in self.builtin_cmds: if cmd not in seen_aliases: if cmd in self.aliases: seen_aliases.update(self.aliases[cmd]) disp = '/'.join(self.aliases[cmd]) else: disp = cmd _, parser = self.cmds[cmd] usage = parser.format_usage() print('%s: %s' % (disp, ' '.join(usage.split()[2:]))) print('External CLIs: %s' % ', '.join(sorted(self.clis)))
[ "def", "print_help", "(", "self", ")", ":", "seen_aliases", "=", "set", "(", ")", "print", "(", "'-'", "*", "80", ")", "for", "cmd", "in", "sorted", "(", "self", ".", "cmds", ")", ":", "if", "cmd", "not", "in", "self", ".", "builtin_cmds", ":", "if", "cmd", "not", "in", "seen_aliases", ":", "if", "cmd", "in", "self", ".", "aliases", ":", "seen_aliases", ".", "update", "(", "self", ".", "aliases", "[", "cmd", "]", ")", "disp", "=", "'/'", ".", "join", "(", "self", ".", "aliases", "[", "cmd", "]", ")", "else", ":", "disp", "=", "cmd", "_", ",", "parser", "=", "self", ".", "cmds", "[", "cmd", "]", "usage", "=", "parser", ".", "format_usage", "(", ")", "print", "(", "'%s: %s'", "%", "(", "disp", ",", "' '", ".", "join", "(", "usage", ".", "split", "(", ")", "[", "2", ":", "]", ")", ")", ")", "print", "(", "'External CLIs: %s'", "%", "', '", ".", "join", "(", "sorted", "(", "self", ".", "clis", ")", ")", ")" ]
41.833333
11.888889
def get_external_store(self, project_name, store_name): """ get the logstore meta info Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type store_name: string :param store_name: the logstore name :return: GetLogStoreResponse :raise: LogException """ headers = {} params = {} resource = "/externalstores/" + store_name (resp, header) = self._send("GET", project_name, None, resource, params, headers) # add storeName if not existing if 'externalStoreName' not in resp: resp['externalStoreName'] = store_name return GetExternalStoreResponse(resp, header)
[ "def", "get_external_store", "(", "self", ",", "project_name", ",", "store_name", ")", ":", "headers", "=", "{", "}", "params", "=", "{", "}", "resource", "=", "\"/externalstores/\"", "+", "store_name", "(", "resp", ",", "header", ")", "=", "self", ".", "_send", "(", "\"GET\"", ",", "project_name", ",", "None", ",", "resource", ",", "params", ",", "headers", ")", "# add storeName if not existing\r", "if", "'externalStoreName'", "not", "in", "resp", ":", "resp", "[", "'externalStoreName'", "]", "=", "store_name", "return", "GetExternalStoreResponse", "(", "resp", ",", "header", ")" ]
31.08
18.64
def truncate_table(self, table): """ Responsys.truncateTable call Accepts: InteractObject table Returns True on success """ table = table.get_soap_object(self.client) return self.call('truncateTable', table)
[ "def", "truncate_table", "(", "self", ",", "table", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "self", ".", "call", "(", "'truncateTable'", ",", "table", ")" ]
26
14.7
def get_location(query, format, api_key): """Get geographic data of a lab in a coherent way for all labs.""" # Play nice with the API... sleep(1) geolocator = OpenCage(api_key=api_key, timeout=10) # Variables for storing the data data = {"city": None, "address_1": None, "postal_code": None, "country": None, "county": None, "state": None, "country_code": None, "latitude": None, "longitude": None, "continent": None} road = "" number = "" # Default None values location_data = {"city": None, "road": None, "house_number": None, "postcode": None, "country": None, "county": None, "state": None, "ISO_3166-1_alpha-2": None, "country_code": None, "lat": None, "lng": None} # Reverse geocoding ... from coordinates to address if format == "reverse": # If the query (coordinates) is not empty if query is None or len(query) < 3: pass else: location = geolocator.reverse(query) if location is not None: location_data = location[0].raw[u'components'] location_data["lat"] = location[0].raw[u'geometry']["lat"] location_data["lng"] = location[0].raw[u'geometry']["lng"] # Direct geocoding ... from address to coordinates and full address if format == "direct": # If the query (address) is not empty if query is None or len(query) < 3: pass else: location = geolocator.geocode(query) if location is not None: location_data = location.raw[u'components'] location_data["lat"] = location.raw[u'geometry']["lat"] location_data["lng"] = location.raw[u'geometry']["lng"] # Extract the meaningful data for component in location_data: if component == "town" or component == "city": data["city"] = location_data[component] if component == "road": road = location_data[component] if component == "house_number": number = location_data[component] if component == "postcode": data["postal_code"] = location_data[component] if component == "country": data["country"] = location_data[component] if component == "county": data["county"] = location_data[component] if component == "state": data["state"] = location_data[component] if component == "ISO_3166-1_alpha-2": data["country_code"] = location_data[component] # The address need to be reconstructed data["address_1"] = unicode(road) + " " + unicode(number) data["latitude"] = location_data["lat"] data["longitude"] = location_data["lng"] # Format the country code to three letters try: country_data = transformations.cca2_to_ccn(data["country_code"]) data["country_code"] = transformations.ccn_to_cca3(country_data) except: data["country_code"] = None # Get the continent try: country_data = transformations.cc_to_cn(data["country_code"]) data["continent"] = transformations.cn_to_ctn(country_data) except: data["continent"] = None # Return the final data return data
[ "def", "get_location", "(", "query", ",", "format", ",", "api_key", ")", ":", "# Play nice with the API...", "sleep", "(", "1", ")", "geolocator", "=", "OpenCage", "(", "api_key", "=", "api_key", ",", "timeout", "=", "10", ")", "# Variables for storing the data", "data", "=", "{", "\"city\"", ":", "None", ",", "\"address_1\"", ":", "None", ",", "\"postal_code\"", ":", "None", ",", "\"country\"", ":", "None", ",", "\"county\"", ":", "None", ",", "\"state\"", ":", "None", ",", "\"country_code\"", ":", "None", ",", "\"latitude\"", ":", "None", ",", "\"longitude\"", ":", "None", ",", "\"continent\"", ":", "None", "}", "road", "=", "\"\"", "number", "=", "\"\"", "# Default None values", "location_data", "=", "{", "\"city\"", ":", "None", ",", "\"road\"", ":", "None", ",", "\"house_number\"", ":", "None", ",", "\"postcode\"", ":", "None", ",", "\"country\"", ":", "None", ",", "\"county\"", ":", "None", ",", "\"state\"", ":", "None", ",", "\"ISO_3166-1_alpha-2\"", ":", "None", ",", "\"country_code\"", ":", "None", ",", "\"lat\"", ":", "None", ",", "\"lng\"", ":", "None", "}", "# Reverse geocoding ... from coordinates to address", "if", "format", "==", "\"reverse\"", ":", "# If the query (coordinates) is not empty", "if", "query", "is", "None", "or", "len", "(", "query", ")", "<", "3", ":", "pass", "else", ":", "location", "=", "geolocator", ".", "reverse", "(", "query", ")", "if", "location", "is", "not", "None", ":", "location_data", "=", "location", "[", "0", "]", ".", "raw", "[", "u'components'", "]", "location_data", "[", "\"lat\"", "]", "=", "location", "[", "0", "]", ".", "raw", "[", "u'geometry'", "]", "[", "\"lat\"", "]", "location_data", "[", "\"lng\"", "]", "=", "location", "[", "0", "]", ".", "raw", "[", "u'geometry'", "]", "[", "\"lng\"", "]", "# Direct geocoding ... from address to coordinates and full address", "if", "format", "==", "\"direct\"", ":", "# If the query (address) is not empty", "if", "query", "is", "None", "or", "len", "(", "query", ")", "<", "3", ":", "pass", "else", ":", "location", "=", "geolocator", ".", "geocode", "(", "query", ")", "if", "location", "is", "not", "None", ":", "location_data", "=", "location", ".", "raw", "[", "u'components'", "]", "location_data", "[", "\"lat\"", "]", "=", "location", ".", "raw", "[", "u'geometry'", "]", "[", "\"lat\"", "]", "location_data", "[", "\"lng\"", "]", "=", "location", ".", "raw", "[", "u'geometry'", "]", "[", "\"lng\"", "]", "# Extract the meaningful data", "for", "component", "in", "location_data", ":", "if", "component", "==", "\"town\"", "or", "component", "==", "\"city\"", ":", "data", "[", "\"city\"", "]", "=", "location_data", "[", "component", "]", "if", "component", "==", "\"road\"", ":", "road", "=", "location_data", "[", "component", "]", "if", "component", "==", "\"house_number\"", ":", "number", "=", "location_data", "[", "component", "]", "if", "component", "==", "\"postcode\"", ":", "data", "[", "\"postal_code\"", "]", "=", "location_data", "[", "component", "]", "if", "component", "==", "\"country\"", ":", "data", "[", "\"country\"", "]", "=", "location_data", "[", "component", "]", "if", "component", "==", "\"county\"", ":", "data", "[", "\"county\"", "]", "=", "location_data", "[", "component", "]", "if", "component", "==", "\"state\"", ":", "data", "[", "\"state\"", "]", "=", "location_data", "[", "component", "]", "if", "component", "==", "\"ISO_3166-1_alpha-2\"", ":", "data", "[", "\"country_code\"", "]", "=", "location_data", "[", "component", "]", "# The address need to be reconstructed", "data", "[", "\"address_1\"", "]", "=", "unicode", "(", "road", ")", "+", "\" \"", "+", "unicode", "(", "number", ")", "data", "[", "\"latitude\"", "]", "=", "location_data", "[", "\"lat\"", "]", "data", "[", "\"longitude\"", "]", "=", "location_data", "[", "\"lng\"", "]", "# Format the country code to three letters", "try", ":", "country_data", "=", "transformations", ".", "cca2_to_ccn", "(", "data", "[", "\"country_code\"", "]", ")", "data", "[", "\"country_code\"", "]", "=", "transformations", ".", "ccn_to_cca3", "(", "country_data", ")", "except", ":", "data", "[", "\"country_code\"", "]", "=", "None", "# Get the continent", "try", ":", "country_data", "=", "transformations", ".", "cc_to_cn", "(", "data", "[", "\"country_code\"", "]", ")", "data", "[", "\"continent\"", "]", "=", "transformations", ".", "cn_to_ctn", "(", "country_data", ")", "except", ":", "data", "[", "\"continent\"", "]", "=", "None", "# Return the final data", "return", "data" ]
37.27957
14.870968
def top10(rest): """ Return the top n (default 10) highest entities by Karmic value. Use negative numbers for the bottom N. """ if rest: topn = int(rest) else: topn = 10 selection = Karma.store.list(topn) res = ' '.join('(%s: %s)' % (', '.join(n), k) for n, k in selection) return res
[ "def", "top10", "(", "rest", ")", ":", "if", "rest", ":", "topn", "=", "int", "(", "rest", ")", "else", ":", "topn", "=", "10", "selection", "=", "Karma", ".", "store", ".", "list", "(", "topn", ")", "res", "=", "' '", ".", "join", "(", "'(%s: %s)'", "%", "(", "', '", ".", "join", "(", "n", ")", ",", "k", ")", "for", "n", ",", "k", "in", "selection", ")", "return", "res" ]
23.833333
19
def highlight_code(text, lexer_name='python', **kwargs): """ Highlights a block of text using ANSI tags based on language syntax. Args: text (str): plain text to highlight lexer_name (str): name of language **kwargs: passed to pygments.lexers.get_lexer_by_name Returns: str: text : highlighted text If pygments is not installed, the plain text is returned. CommandLine: python -c "import pygments.formatters; print(list(pygments.formatters.get_all_formatters()))" Example: >>> import ubelt as ub >>> text = 'import ubelt as ub; print(ub)' >>> new_text = ub.highlight_code(text) >>> print(new_text) """ # Resolve extensions to languages lexer_name = { 'py': 'python', 'h': 'cpp', 'cpp': 'cpp', 'cxx': 'cpp', 'c': 'cpp', }.get(lexer_name.replace('.', ''), lexer_name) try: import pygments import pygments.lexers import pygments.formatters import pygments.formatters.terminal if sys.platform.startswith('win32'): # nocover # Hack on win32 to support colored output import colorama colorama.init() formater = pygments.formatters.terminal.TerminalFormatter(bg='dark') lexer = pygments.lexers.get_lexer_by_name(lexer_name, **kwargs) new_text = pygments.highlight(text, lexer, formater) except ImportError: # nocover import warnings warnings.warn('pygments is not installed, code will not be highlighted') new_text = text return new_text
[ "def", "highlight_code", "(", "text", ",", "lexer_name", "=", "'python'", ",", "*", "*", "kwargs", ")", ":", "# Resolve extensions to languages", "lexer_name", "=", "{", "'py'", ":", "'python'", ",", "'h'", ":", "'cpp'", ",", "'cpp'", ":", "'cpp'", ",", "'cxx'", ":", "'cpp'", ",", "'c'", ":", "'cpp'", ",", "}", ".", "get", "(", "lexer_name", ".", "replace", "(", "'.'", ",", "''", ")", ",", "lexer_name", ")", "try", ":", "import", "pygments", "import", "pygments", ".", "lexers", "import", "pygments", ".", "formatters", "import", "pygments", ".", "formatters", ".", "terminal", "if", "sys", ".", "platform", ".", "startswith", "(", "'win32'", ")", ":", "# nocover", "# Hack on win32 to support colored output", "import", "colorama", "colorama", ".", "init", "(", ")", "formater", "=", "pygments", ".", "formatters", ".", "terminal", ".", "TerminalFormatter", "(", "bg", "=", "'dark'", ")", "lexer", "=", "pygments", ".", "lexers", ".", "get_lexer_by_name", "(", "lexer_name", ",", "*", "*", "kwargs", ")", "new_text", "=", "pygments", ".", "highlight", "(", "text", ",", "lexer", ",", "formater", ")", "except", "ImportError", ":", "# nocover", "import", "warnings", "warnings", ".", "warn", "(", "'pygments is not installed, code will not be highlighted'", ")", "new_text", "=", "text", "return", "new_text" ]
31.8
20.8
def set_state(self, state): """Set the runtime state of the Controller. Use the internal constants to ensure proper state values: - :attr:`Controller.STATE_INITIALIZING` - :attr:`Controller.STATE_ACTIVE` - :attr:`Controller.STATE_IDLE` - :attr:`Controller.STATE_SLEEPING` - :attr:`Controller.STATE_STOP_REQUESTED` - :attr:`Controller.STATE_STOPPING` - :attr:`Controller.STATE_STOPPED` :param int state: The runtime state :raises: ValueError """ if state == self._state: return elif state not in self._STATES.keys(): raise ValueError('Invalid state {}'.format(state)) # Check for invalid transitions if self.is_waiting_to_stop and state not in [self.STATE_STOPPING, self.STATE_STOPPED]: LOGGER.warning('Attempt to set invalid state while waiting to ' 'shutdown: %s ', self._STATES[state]) return elif self.is_stopping and state != self.STATE_STOPPED: LOGGER.warning('Attempt to set invalid post shutdown state: %s', self._STATES[state]) return elif self.is_running and state not in [self.STATE_ACTIVE, self.STATE_IDLE, self.STATE_SLEEPING, self.STATE_STOP_REQUESTED, self.STATE_STOPPING]: LOGGER.warning('Attempt to set invalid post running state: %s', self._STATES[state]) return elif self.is_sleeping and state not in [self.STATE_ACTIVE, self.STATE_IDLE, self.STATE_STOP_REQUESTED, self.STATE_STOPPING]: LOGGER.warning('Attempt to set invalid post sleeping state: %s', self._STATES[state]) return LOGGER.debug('State changed from %s to %s', self._STATES[self._state], self._STATES[state]) self._state = state
[ "def", "set_state", "(", "self", ",", "state", ")", ":", "if", "state", "==", "self", ".", "_state", ":", "return", "elif", "state", "not", "in", "self", ".", "_STATES", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'Invalid state {}'", ".", "format", "(", "state", ")", ")", "# Check for invalid transitions", "if", "self", ".", "is_waiting_to_stop", "and", "state", "not", "in", "[", "self", ".", "STATE_STOPPING", ",", "self", ".", "STATE_STOPPED", "]", ":", "LOGGER", ".", "warning", "(", "'Attempt to set invalid state while waiting to '", "'shutdown: %s '", ",", "self", ".", "_STATES", "[", "state", "]", ")", "return", "elif", "self", ".", "is_stopping", "and", "state", "!=", "self", ".", "STATE_STOPPED", ":", "LOGGER", ".", "warning", "(", "'Attempt to set invalid post shutdown state: %s'", ",", "self", ".", "_STATES", "[", "state", "]", ")", "return", "elif", "self", ".", "is_running", "and", "state", "not", "in", "[", "self", ".", "STATE_ACTIVE", ",", "self", ".", "STATE_IDLE", ",", "self", ".", "STATE_SLEEPING", ",", "self", ".", "STATE_STOP_REQUESTED", ",", "self", ".", "STATE_STOPPING", "]", ":", "LOGGER", ".", "warning", "(", "'Attempt to set invalid post running state: %s'", ",", "self", ".", "_STATES", "[", "state", "]", ")", "return", "elif", "self", ".", "is_sleeping", "and", "state", "not", "in", "[", "self", ".", "STATE_ACTIVE", ",", "self", ".", "STATE_IDLE", ",", "self", ".", "STATE_STOP_REQUESTED", ",", "self", ".", "STATE_STOPPING", "]", ":", "LOGGER", ".", "warning", "(", "'Attempt to set invalid post sleeping state: %s'", ",", "self", ".", "_STATES", "[", "state", "]", ")", "return", "LOGGER", ".", "debug", "(", "'State changed from %s to %s'", ",", "self", ".", "_STATES", "[", "self", ".", "_state", "]", ",", "self", ".", "_STATES", "[", "state", "]", ")", "self", ".", "_state", "=", "state" ]
41.981481
21.111111
def check_args(state, name, missing_msg=None): """Check whether a function argument is specified. This function can follow ``check_function()`` in an SCT chain and verifies whether an argument is specified. If you want to go on and check whether the argument was correctly specified, you can can continue chaining with ``has_equal_value()`` (value-based check) or ``has_equal_ast()`` (AST-based check) This function can also follow ``check_function_def()`` or ``check_lambda_function()`` to see if arguments have been specified. Args: name (str): the name of the argument for which you want to check it is specified. This can also be a number, in which case it refers to the positional arguments. Named argumetns take precedence. missing_msg (str): If specified, this overrides an automatically generated feedback message in case the student did specify the argument. state (State): State object that is passed from the SCT Chain (don't specify this). :Examples: Student and solution code:: import numpy as np arr = np.array([1, 2, 3, 4, 5]) np.mean(arr) SCT:: # Verify whether arr was correctly set in np.mean # has_equal_value() checks the value of arr, used to set argument a Ex().check_function('numpy.mean').check_args('a').has_equal_value() # Verify whether arr was correctly set in np.mean # has_equal_ast() checks the expression used to set argument a Ex().check_function('numpy.mean').check_args('a').has_equal_ast() Student and solution code:: def my_power(x): print("calculating sqrt...") return(x * x) SCT:: Ex().check_function_def('my_power').multi( check_args('x') # will fail if student used y as arg check_args(0) # will still pass if student used y as arg ) """ if missing_msg is None: missing_msg = "Did you specify the {{part}}?" if name in ["*args", "**kwargs"]: # for check_function_def return check_part(state, name, name, missing_msg=missing_msg) else: if isinstance(name, list): # dealing with args or kwargs if name[0] == "args": arg_str = "{} argument passed as a variable length argument".format( get_ord(name[1] + 1) ) else: arg_str = "argument `{}`".format(name[1]) else: arg_str = ( "{} argument".format(get_ord(name + 1)) if isinstance(name, int) else "argument `{}`".format(name) ) return check_part_index(state, "args", name, arg_str, missing_msg=missing_msg)
[ "def", "check_args", "(", "state", ",", "name", ",", "missing_msg", "=", "None", ")", ":", "if", "missing_msg", "is", "None", ":", "missing_msg", "=", "\"Did you specify the {{part}}?\"", "if", "name", "in", "[", "\"*args\"", ",", "\"**kwargs\"", "]", ":", "# for check_function_def", "return", "check_part", "(", "state", ",", "name", ",", "name", ",", "missing_msg", "=", "missing_msg", ")", "else", ":", "if", "isinstance", "(", "name", ",", "list", ")", ":", "# dealing with args or kwargs", "if", "name", "[", "0", "]", "==", "\"args\"", ":", "arg_str", "=", "\"{} argument passed as a variable length argument\"", ".", "format", "(", "get_ord", "(", "name", "[", "1", "]", "+", "1", ")", ")", "else", ":", "arg_str", "=", "\"argument `{}`\"", ".", "format", "(", "name", "[", "1", "]", ")", "else", ":", "arg_str", "=", "(", "\"{} argument\"", ".", "format", "(", "get_ord", "(", "name", "+", "1", ")", ")", "if", "isinstance", "(", "name", ",", "int", ")", "else", "\"argument `{}`\"", ".", "format", "(", "name", ")", ")", "return", "check_part_index", "(", "state", ",", "\"args\"", ",", "name", ",", "arg_str", ",", "missing_msg", "=", "missing_msg", ")" ]
40.333333
29.333333
def describe_enum_value(enum_value): """Build descriptor for Enum instance. Args: enum_value: Enum value to provide descriptor for. Returns: Initialized EnumValueDescriptor instance describing the Enum instance. """ enum_value_descriptor = EnumValueDescriptor() enum_value_descriptor.name = six.text_type(enum_value.name) enum_value_descriptor.number = enum_value.number return enum_value_descriptor
[ "def", "describe_enum_value", "(", "enum_value", ")", ":", "enum_value_descriptor", "=", "EnumValueDescriptor", "(", ")", "enum_value_descriptor", ".", "name", "=", "six", ".", "text_type", "(", "enum_value", ".", "name", ")", "enum_value_descriptor", ".", "number", "=", "enum_value", ".", "number", "return", "enum_value_descriptor" ]
33.307692
18.923077
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0): """ Write the DefaultsInformation structure encoding to the data stream. Args: output_buffer (stream): A data stream in which to encode Attributes structure data, supporting a write method. kmip_version (enum): A KMIPVersion enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 2.0. Raises: InvalidField: Raised if the object defaults field is not defined. VersionNotSupported: Raised when a KMIP version is provided that does not support the DefaultsInformation structure. """ if kmip_version < enums.KMIPVersion.KMIP_2_0: raise exceptions.VersionNotSupported( "KMIP {} does not support the DefaultsInformation " "object.".format( kmip_version.value ) ) local_buffer = BytearrayStream() if self._object_defaults: for object_default in self._object_defaults: object_default.write(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidField( "The DefaultsInformation structure is missing the object " "defaults field." ) self.length = local_buffer.length() super(DefaultsInformation, self).write( output_buffer, kmip_version=kmip_version ) output_buffer.write(local_buffer.buffer)
[ "def", "write", "(", "self", ",", "output_buffer", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_2_0", ")", ":", "if", "kmip_version", "<", "enums", ".", "KMIPVersion", ".", "KMIP_2_0", ":", "raise", "exceptions", ".", "VersionNotSupported", "(", "\"KMIP {} does not support the DefaultsInformation \"", "\"object.\"", ".", "format", "(", "kmip_version", ".", "value", ")", ")", "local_buffer", "=", "BytearrayStream", "(", ")", "if", "self", ".", "_object_defaults", ":", "for", "object_default", "in", "self", ".", "_object_defaults", ":", "object_default", ".", "write", "(", "local_buffer", ",", "kmip_version", "=", "kmip_version", ")", "else", ":", "raise", "exceptions", ".", "InvalidField", "(", "\"The DefaultsInformation structure is missing the object \"", "\"defaults field.\"", ")", "self", ".", "length", "=", "local_buffer", ".", "length", "(", ")", "super", "(", "DefaultsInformation", ",", "self", ")", ".", "write", "(", "output_buffer", ",", "kmip_version", "=", "kmip_version", ")", "output_buffer", ".", "write", "(", "local_buffer", ".", "buffer", ")" ]
39.04878
21.439024
def items(self): """Get an iter of VenvDirs and VenvFiles within the directory.""" contents = self.paths contents = ( BinFile(path.path) if path.is_file else BinDir(path.path) for path in contents ) return contents
[ "def", "items", "(", "self", ")", ":", "contents", "=", "self", ".", "paths", "contents", "=", "(", "BinFile", "(", "path", ".", "path", ")", "if", "path", ".", "is_file", "else", "BinDir", "(", "path", ".", "path", ")", "for", "path", "in", "contents", ")", "return", "contents" ]
33.875
17.5
def settings(self): '''Generator which returns all of the statements in all of the settings tables''' for table in self.tables: if isinstance(table, SettingTable): for statement in table.statements: yield statement
[ "def", "settings", "(", "self", ")", ":", "for", "table", "in", "self", ".", "tables", ":", "if", "isinstance", "(", "table", ",", "SettingTable", ")", ":", "for", "statement", "in", "table", ".", "statements", ":", "yield", "statement" ]
45.5
16.5
def generateIntrospectionXML(objectPath, exportedObjects): """ Generates the introspection XML for an object path or partial object path that matches exported objects. This allows for browsing the exported objects with tools such as d-feet. @rtype: C{string} """ l = [_dtd_decl] l.append('<node name="%s">' % (objectPath,)) obj = exportedObjects.get(objectPath, None) if obj is not None: for i in obj.getInterfaces(): l.append(i.introspectionXml) l.append(_intro) # make sure objectPath ends with '/' to only get partial matches based on # the full path, not a part of a subpath if not objectPath.endswith('/'): objectPath += '/' matches = [] for path in exportedObjects.keys(): if path.startswith(objectPath): path = path[len(objectPath):].partition('/')[0] if path not in matches: matches.append(path) if obj is None and not matches: return None for m in matches: l.append('<node name="%s"/>' % m) l.append('</node>') return '\n'.join(l)
[ "def", "generateIntrospectionXML", "(", "objectPath", ",", "exportedObjects", ")", ":", "l", "=", "[", "_dtd_decl", "]", "l", ".", "append", "(", "'<node name=\"%s\">'", "%", "(", "objectPath", ",", ")", ")", "obj", "=", "exportedObjects", ".", "get", "(", "objectPath", ",", "None", ")", "if", "obj", "is", "not", "None", ":", "for", "i", "in", "obj", ".", "getInterfaces", "(", ")", ":", "l", ".", "append", "(", "i", ".", "introspectionXml", ")", "l", ".", "append", "(", "_intro", ")", "# make sure objectPath ends with '/' to only get partial matches based on", "# the full path, not a part of a subpath", "if", "not", "objectPath", ".", "endswith", "(", "'/'", ")", ":", "objectPath", "+=", "'/'", "matches", "=", "[", "]", "for", "path", "in", "exportedObjects", ".", "keys", "(", ")", ":", "if", "path", ".", "startswith", "(", "objectPath", ")", ":", "path", "=", "path", "[", "len", "(", "objectPath", ")", ":", "]", ".", "partition", "(", "'/'", ")", "[", "0", "]", "if", "path", "not", "in", "matches", ":", "matches", ".", "append", "(", "path", ")", "if", "obj", "is", "None", "and", "not", "matches", ":", "return", "None", "for", "m", "in", "matches", ":", "l", ".", "append", "(", "'<node name=\"%s\"/>'", "%", "m", ")", "l", ".", "append", "(", "'</node>'", ")", "return", "'\\n'", ".", "join", "(", "l", ")" ]
29.351351
17.891892
def delete_scan(self, scan_id): """ Delete a scan if fully finished. """ if self.get_status(scan_id) == ScanStatus.RUNNING: return False self.scans_table.pop(scan_id) if len(self.scans_table) == 0: del self.data_manager self.data_manager = None return True
[ "def", "delete_scan", "(", "self", ",", "scan_id", ")", ":", "if", "self", ".", "get_status", "(", "scan_id", ")", "==", "ScanStatus", ".", "RUNNING", ":", "return", "False", "self", ".", "scans_table", ".", "pop", "(", "scan_id", ")", "if", "len", "(", "self", ".", "scans_table", ")", "==", "0", ":", "del", "self", ".", "data_manager", "self", ".", "data_manager", "=", "None", "return", "True" ]
32.4
12
async def peek(self, task_id): """ Get task without changing its state :param task_id: Task id :return: Task instance """ args = (task_id,) res = await self.conn.call(self.__funcs['peek'], args) return self._create_task(res.body)
[ "async", "def", "peek", "(", "self", ",", "task_id", ")", ":", "args", "=", "(", "task_id", ",", ")", "res", "=", "await", "self", ".", "conn", ".", "call", "(", "self", ".", "__funcs", "[", "'peek'", "]", ",", "args", ")", "return", "self", ".", "_create_task", "(", "res", ".", "body", ")" ]
27
13.363636
def gridfs_namespace(self, plain_src_ns): """Given a plain source namespace, return the corresponding plain target namespace if this namespace is a gridfs collection. """ namespace = self.lookup(plain_src_ns) if namespace and namespace.gridfs: return namespace.dest_name return None
[ "def", "gridfs_namespace", "(", "self", ",", "plain_src_ns", ")", ":", "namespace", "=", "self", ".", "lookup", "(", "plain_src_ns", ")", "if", "namespace", "and", "namespace", ".", "gridfs", ":", "return", "namespace", ".", "dest_name", "return", "None" ]
41.875
7.125
def boost_ranks(job, isoform_expression, merged_mhc_calls, transgene_out, univ_options, rank_boost_options): """ This is the final module in the pipeline. It will call the rank boosting R script. This module corresponds to node 21 in the tree """ job.fileStore.logToMaster('Running boost_ranks on %s' % univ_options['patient']) work_dir = os.path.join(job.fileStore.getLocalTempDir(), univ_options['patient']) os.mkdir(work_dir) input_files = { 'rsem_quant.tsv': isoform_expression, 'mhci_merged_files.tsv': merged_mhc_calls['mhci_merged_files.list'], 'mhcii_merged_files.tsv': merged_mhc_calls['mhcii_merged_files.list'], 'mhci_peptides.faa': transgene_out['transgened_tumor_10_mer_snpeffed.faa'], 'mhcii_peptides.faa': transgene_out['transgened_tumor_15_mer_snpeffed.faa']} input_files = get_files_from_filestore(job, input_files, work_dir, docker=True) output_files = {} for mhc in ('mhci', 'mhcii'): parameters = [mhc, input_files[''.join([mhc, '_merged_files.tsv'])], input_files['rsem_quant.tsv'], input_files[''.join([mhc, '_peptides.faa'])], rank_boost_options[''.join([mhc, '_combo'])] ] docker_call(tool='rankboost', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub']) output_files[mhc] = { ''.join([mhc, '_concise_results.tsv']): job.fileStore.writeGlobalFile(''.join([work_dir, '/', mhc, '_merged_files_concise_results.tsv'])), ''.join([mhc, '_detailed_results.tsv']): job.fileStore.writeGlobalFile(''.join([work_dir, '/', mhc, '_merged_files_detailed_results.tsv']))} export_results(work_dir, univ_options) return output_files
[ "def", "boost_ranks", "(", "job", ",", "isoform_expression", ",", "merged_mhc_calls", ",", "transgene_out", ",", "univ_options", ",", "rank_boost_options", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Running boost_ranks on %s'", "%", "univ_options", "[", "'patient'", "]", ")", "work_dir", "=", "os", ".", "path", ".", "join", "(", "job", ".", "fileStore", ".", "getLocalTempDir", "(", ")", ",", "univ_options", "[", "'patient'", "]", ")", "os", ".", "mkdir", "(", "work_dir", ")", "input_files", "=", "{", "'rsem_quant.tsv'", ":", "isoform_expression", ",", "'mhci_merged_files.tsv'", ":", "merged_mhc_calls", "[", "'mhci_merged_files.list'", "]", ",", "'mhcii_merged_files.tsv'", ":", "merged_mhc_calls", "[", "'mhcii_merged_files.list'", "]", ",", "'mhci_peptides.faa'", ":", "transgene_out", "[", "'transgened_tumor_10_mer_snpeffed.faa'", "]", ",", "'mhcii_peptides.faa'", ":", "transgene_out", "[", "'transgened_tumor_15_mer_snpeffed.faa'", "]", "}", "input_files", "=", "get_files_from_filestore", "(", "job", ",", "input_files", ",", "work_dir", ",", "docker", "=", "True", ")", "output_files", "=", "{", "}", "for", "mhc", "in", "(", "'mhci'", ",", "'mhcii'", ")", ":", "parameters", "=", "[", "mhc", ",", "input_files", "[", "''", ".", "join", "(", "[", "mhc", ",", "'_merged_files.tsv'", "]", ")", "]", ",", "input_files", "[", "'rsem_quant.tsv'", "]", ",", "input_files", "[", "''", ".", "join", "(", "[", "mhc", ",", "'_peptides.faa'", "]", ")", "]", ",", "rank_boost_options", "[", "''", ".", "join", "(", "[", "mhc", ",", "'_combo'", "]", ")", "]", "]", "docker_call", "(", "tool", "=", "'rankboost'", ",", "tool_parameters", "=", "parameters", ",", "work_dir", "=", "work_dir", ",", "dockerhub", "=", "univ_options", "[", "'dockerhub'", "]", ")", "output_files", "[", "mhc", "]", "=", "{", "''", ".", "join", "(", "[", "mhc", ",", "'_concise_results.tsv'", "]", ")", ":", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "''", ".", "join", "(", "[", "work_dir", ",", "'/'", ",", "mhc", ",", "'_merged_files_concise_results.tsv'", "]", ")", ")", ",", "''", ".", "join", "(", "[", "mhc", ",", "'_detailed_results.tsv'", "]", ")", ":", "job", ".", "fileStore", ".", "writeGlobalFile", "(", "''", ".", "join", "(", "[", "work_dir", ",", "'/'", ",", "mhc", ",", "'_merged_files_detailed_results.tsv'", "]", ")", ")", "}", "export_results", "(", "work_dir", ",", "univ_options", ")", "return", "output_files" ]
53.216216
25.648649
def lookup_endpoint(cli): """Looks up the application endpoint from dotcloud""" url = '/applications/{0}/environment'.format(APPNAME) environ = cli.user.get(url).item port = environ['DOTCLOUD_SATELLITE_ZMQ_PORT'] host = socket.gethostbyname(environ['DOTCLOUD_SATELLITE_ZMQ_HOST']) return "tcp://{0}:{1}".format(host, port)
[ "def", "lookup_endpoint", "(", "cli", ")", ":", "url", "=", "'/applications/{0}/environment'", ".", "format", "(", "APPNAME", ")", "environ", "=", "cli", ".", "user", ".", "get", "(", "url", ")", ".", "item", "port", "=", "environ", "[", "'DOTCLOUD_SATELLITE_ZMQ_PORT'", "]", "host", "=", "socket", ".", "gethostbyname", "(", "environ", "[", "'DOTCLOUD_SATELLITE_ZMQ_HOST'", "]", ")", "return", "\"tcp://{0}:{1}\"", ".", "format", "(", "host", ",", "port", ")" ]
48.571429
11.571429
def _get_hydrated_path(field): """Return HydratedPath object for file-type field.""" # Get only file path if whole file object is given. if isinstance(field, str) and hasattr(field, 'file_name'): # field is already actually a HydratedPath object return field if isinstance(field, dict) and 'file' in field: hydrated_path = field['file'] if not hasattr(hydrated_path, 'file_name'): raise TypeError("Filter argument must be a valid file-type field.") return hydrated_path
[ "def", "_get_hydrated_path", "(", "field", ")", ":", "# Get only file path if whole file object is given.", "if", "isinstance", "(", "field", ",", "str", ")", "and", "hasattr", "(", "field", ",", "'file_name'", ")", ":", "# field is already actually a HydratedPath object", "return", "field", "if", "isinstance", "(", "field", ",", "dict", ")", "and", "'file'", "in", "field", ":", "hydrated_path", "=", "field", "[", "'file'", "]", "if", "not", "hasattr", "(", "hydrated_path", ",", "'file_name'", ")", ":", "raise", "TypeError", "(", "\"Filter argument must be a valid file-type field.\"", ")", "return", "hydrated_path" ]
36.785714
19.714286
def deconstruct(self): """Gets the values to pass to :see:__init__ when re-creating this object.""" name, path, args, kwargs = super( HStoreField, self).deconstruct() if self.uniqueness is not None: kwargs['uniqueness'] = self.uniqueness if self.required is not None: kwargs['required'] = self.required return name, path, args, kwargs
[ "def", "deconstruct", "(", "self", ")", ":", "name", ",", "path", ",", "args", ",", "kwargs", "=", "super", "(", "HStoreField", ",", "self", ")", ".", "deconstruct", "(", ")", "if", "self", ".", "uniqueness", "is", "not", "None", ":", "kwargs", "[", "'uniqueness'", "]", "=", "self", ".", "uniqueness", "if", "self", ".", "required", "is", "not", "None", ":", "kwargs", "[", "'required'", "]", "=", "self", ".", "required", "return", "name", ",", "path", ",", "args", ",", "kwargs" ]
29.214286
14.571429
def next_state(self): """This is a method that will be called when the time remaining ends. The current state can be: roasting, cooling, idle, sleeping, connecting, or unkown.""" self.active_recipe_item += 1 if self.active_recipe_item >= len(self.recipe): # we're done! return # show state step on screen print("--------------------------------------------") print("Setting next process step: %d" % self.active_recipe_item) print("time:%d, target: %ddegF, fan: %d, state: %s" % (self.recipe[self.active_recipe_item]['time_remaining'], self.recipe[self.active_recipe_item]['target_temp'], self.recipe[self.active_recipe_item]['fan_speed'], self.recipe[self.active_recipe_item]['state'] )) print("--------------------------------------------") # set values for next state self.roaster.time_remaining = ( self.recipe[self.active_recipe_item]['time_remaining']) self.roaster.target_temp = ( self.recipe[self.active_recipe_item]['target_temp']) self.roaster.fan_speed = ( self.recipe[self.active_recipe_item]['fan_speed']) # set state if(self.recipe[self.active_recipe_item]['state'] == 'roasting'): self.roaster.roast() elif(self.recipe[self.active_recipe_item]['state'] == 'cooling'): self.roaster.cool() elif(self.recipe[self.active_recipe_item]['state'] == 'idle'): self.roaster.idle() elif(self.recipe[self.active_recipe_item]['state'] == 'cooling'): self.roaster.sleep()
[ "def", "next_state", "(", "self", ")", ":", "self", ".", "active_recipe_item", "+=", "1", "if", "self", ".", "active_recipe_item", ">=", "len", "(", "self", ".", "recipe", ")", ":", "# we're done!", "return", "# show state step on screen", "print", "(", "\"--------------------------------------------\"", ")", "print", "(", "\"Setting next process step: %d\"", "%", "self", ".", "active_recipe_item", ")", "print", "(", "\"time:%d, target: %ddegF, fan: %d, state: %s\"", "%", "(", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'time_remaining'", "]", ",", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'target_temp'", "]", ",", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'fan_speed'", "]", ",", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'state'", "]", ")", ")", "print", "(", "\"--------------------------------------------\"", ")", "# set values for next state", "self", ".", "roaster", ".", "time_remaining", "=", "(", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'time_remaining'", "]", ")", "self", ".", "roaster", ".", "target_temp", "=", "(", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'target_temp'", "]", ")", "self", ".", "roaster", ".", "fan_speed", "=", "(", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'fan_speed'", "]", ")", "# set state", "if", "(", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'state'", "]", "==", "'roasting'", ")", ":", "self", ".", "roaster", ".", "roast", "(", ")", "elif", "(", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'state'", "]", "==", "'cooling'", ")", ":", "self", ".", "roaster", ".", "cool", "(", ")", "elif", "(", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'state'", "]", "==", "'idle'", ")", ":", "self", ".", "roaster", ".", "idle", "(", ")", "elif", "(", "self", ".", "recipe", "[", "self", ".", "active_recipe_item", "]", "[", "'state'", "]", "==", "'cooling'", ")", ":", "self", ".", "roaster", ".", "sleep", "(", ")" ]
49.176471
18
def deny(cls, action, **kwargs): """Deny the given action need. :param action: The action to deny. :returns: A :class:`invenio_access.models.ActionNeedMixin` instance. """ return cls.create(action, exclude=True, **kwargs)
[ "def", "deny", "(", "cls", ",", "action", ",", "*", "*", "kwargs", ")", ":", "return", "cls", ".", "create", "(", "action", ",", "exclude", "=", "True", ",", "*", "*", "kwargs", ")" ]
36.571429
14.714286
def generate_private_key(key_type): """ Generate a random private key using sensible parameters. :param str key_type: The type of key to generate. One of: ``rsa``. """ if key_type == u'rsa': return rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend()) raise ValueError(key_type)
[ "def", "generate_private_key", "(", "key_type", ")", ":", "if", "key_type", "==", "u'rsa'", ":", "return", "rsa", ".", "generate_private_key", "(", "public_exponent", "=", "65537", ",", "key_size", "=", "2048", ",", "backend", "=", "default_backend", "(", ")", ")", "raise", "ValueError", "(", "key_type", ")" ]
35.1
15.5
def gramm_to_promille(gramm, age, weight, height, sex): """Return the blood alcohol content (per mill) for a person with the given body stats and amount of alcohol (in gramm) in blood """ bw = calculate_bw(age, weight, height, sex) return (gramm * W) / (PB * bw)
[ "def", "gramm_to_promille", "(", "gramm", ",", "age", ",", "weight", ",", "height", ",", "sex", ")", ":", "bw", "=", "calculate_bw", "(", "age", ",", "weight", ",", "height", ",", "sex", ")", "return", "(", "gramm", "*", "W", ")", "/", "(", "PB", "*", "bw", ")" ]
45
8.5
def _form_datetimes(days, msecs): """Calculate seconds since EPOCH from days and milliseconds for each of IASI scan.""" all_datetimes = [] for i in range(days.size): day = int(days[i]) msec = msecs[i] scanline_datetimes = [] for j in range(int(VALUES_PER_SCAN_LINE / 4)): usec = 1000 * (j * VIEW_TIME_ADJUSTMENT + msec) delta = (dt.timedelta(days=day, microseconds=usec)) for k in range(4): scanline_datetimes.append(delta.total_seconds()) all_datetimes.append(scanline_datetimes) return np.array(all_datetimes, dtype=np.float64)
[ "def", "_form_datetimes", "(", "days", ",", "msecs", ")", ":", "all_datetimes", "=", "[", "]", "for", "i", "in", "range", "(", "days", ".", "size", ")", ":", "day", "=", "int", "(", "days", "[", "i", "]", ")", "msec", "=", "msecs", "[", "i", "]", "scanline_datetimes", "=", "[", "]", "for", "j", "in", "range", "(", "int", "(", "VALUES_PER_SCAN_LINE", "/", "4", ")", ")", ":", "usec", "=", "1000", "*", "(", "j", "*", "VIEW_TIME_ADJUSTMENT", "+", "msec", ")", "delta", "=", "(", "dt", ".", "timedelta", "(", "days", "=", "day", ",", "microseconds", "=", "usec", ")", ")", "for", "k", "in", "range", "(", "4", ")", ":", "scanline_datetimes", ".", "append", "(", "delta", ".", "total_seconds", "(", ")", ")", "all_datetimes", ".", "append", "(", "scanline_datetimes", ")", "return", "np", ".", "array", "(", "all_datetimes", ",", "dtype", "=", "np", ".", "float64", ")" ]
39
16.5625
def get_default_handler(self, **kw): """Return the default logging handler based on the local environment. :type kw: dict :param kw: keyword args passed to handler constructor :rtype: :class:`logging.Handler` :returns: The default log handler based on the environment """ gke_cluster_name = retrieve_metadata_server(_GKE_CLUSTER_NAME) if ( _APPENGINE_FLEXIBLE_ENV_VM in os.environ or _APPENGINE_INSTANCE_ID in os.environ ): return AppEngineHandler(self, **kw) elif gke_cluster_name is not None: return ContainerEngineHandler(**kw) else: return CloudLoggingHandler(self, **kw)
[ "def", "get_default_handler", "(", "self", ",", "*", "*", "kw", ")", ":", "gke_cluster_name", "=", "retrieve_metadata_server", "(", "_GKE_CLUSTER_NAME", ")", "if", "(", "_APPENGINE_FLEXIBLE_ENV_VM", "in", "os", ".", "environ", "or", "_APPENGINE_INSTANCE_ID", "in", "os", ".", "environ", ")", ":", "return", "AppEngineHandler", "(", "self", ",", "*", "*", "kw", ")", "elif", "gke_cluster_name", "is", "not", "None", ":", "return", "ContainerEngineHandler", "(", "*", "*", "kw", ")", "else", ":", "return", "CloudLoggingHandler", "(", "self", ",", "*", "*", "kw", ")" ]
35.35
17.65
def dateint_range(first_dateint, last_dateint): """Returns all dateints in the given dateint range. Arguments --------- first_dateint : int An integer object decipting a specific calendaric day; e.g. 20161225. last_dateint : int An integer object decipting a specific calendaric day; e.g. 20170108. Returns ------- iterable An iterable of ints representing all days in the given dateint range. Example ------- >>> dateint_range(20170228, 20170301) [20170228, 20170301] >>> dateint_range(20170225, 20170301) [20170225, 20170226, 20170227, 20170228, 20170301] """ first_datetime = dateint_to_datetime(first_dateint) last_datetime = dateint_to_datetime(last_dateint) delta = last_datetime - first_datetime delta_in_hours = math.ceil(delta.total_seconds() / 3600) delta_in_days = math.ceil(delta_in_hours / 24) + 1 dateint_set = set() for delta_i in range(0, delta_in_days * 24, 24): datetime_i = first_datetime + timedelta(hours=delta_i) dateint_i = datetime_to_dateint(datetime_i) if dateint_i <= last_dateint: dateint_set.add(dateint_i) return sorted(dateint_set)
[ "def", "dateint_range", "(", "first_dateint", ",", "last_dateint", ")", ":", "first_datetime", "=", "dateint_to_datetime", "(", "first_dateint", ")", "last_datetime", "=", "dateint_to_datetime", "(", "last_dateint", ")", "delta", "=", "last_datetime", "-", "first_datetime", "delta_in_hours", "=", "math", ".", "ceil", "(", "delta", ".", "total_seconds", "(", ")", "/", "3600", ")", "delta_in_days", "=", "math", ".", "ceil", "(", "delta_in_hours", "/", "24", ")", "+", "1", "dateint_set", "=", "set", "(", ")", "for", "delta_i", "in", "range", "(", "0", ",", "delta_in_days", "*", "24", ",", "24", ")", ":", "datetime_i", "=", "first_datetime", "+", "timedelta", "(", "hours", "=", "delta_i", ")", "dateint_i", "=", "datetime_to_dateint", "(", "datetime_i", ")", "if", "dateint_i", "<=", "last_dateint", ":", "dateint_set", ".", "add", "(", "dateint_i", ")", "return", "sorted", "(", "dateint_set", ")" ]
34.823529
18.941176
def sample_from_discretized_mix_logistic(pred, seed=None): """Sampling from a discretized mixture of logistics. Args: pred: A [batch, height, width, num_mixtures*10] tensor of floats comprising one unconstrained mixture probability, three means (one per channel), three standard deviations (one per channel), and three coefficients which linearly parameterize dependence across channels. seed: Random seed. Returns: A tensor of shape [batch, height, width, 3] with real intensities scaled between -1 and 1. """ logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params( pred) # Sample mixture indicator given logits using the gumbel max trick. num_mixtures = shape_list(logits)[-1] gumbel_noise = -tf.log(-tf.log( tf.random_uniform( tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed))) sel = tf.one_hot( tf.argmax(logits + gumbel_noise, -1), depth=num_mixtures, dtype=tf.float32) # Select mixture component's parameters. sel = tf.expand_dims(sel, -1) locs = tf.reduce_sum(locs * sel, 3) log_scales = tf.reduce_sum(log_scales * sel, 3) coeffs = tf.reduce_sum(coeffs * sel, 3) # Sample from 3-D logistic & clip to interval. Note we don't round to the # nearest 8-bit value when sampling. uniform_noise = tf.random_uniform( tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed) logistic_noise = tf.log(uniform_noise) - tf.log1p(-uniform_noise) x = locs + tf.exp(log_scales) * logistic_noise x0 = x[..., 0] x1 = x[..., 1] + coeffs[..., 0] * x0 x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1 x = tf.stack([x0, x1, x2], axis=-1) x = tf.clip_by_value(x, -1., 1.) return x
[ "def", "sample_from_discretized_mix_logistic", "(", "pred", ",", "seed", "=", "None", ")", ":", "logits", ",", "locs", ",", "log_scales", ",", "coeffs", "=", "split_to_discretized_mix_logistic_params", "(", "pred", ")", "# Sample mixture indicator given logits using the gumbel max trick.", "num_mixtures", "=", "shape_list", "(", "logits", ")", "[", "-", "1", "]", "gumbel_noise", "=", "-", "tf", ".", "log", "(", "-", "tf", ".", "log", "(", "tf", ".", "random_uniform", "(", "tf", ".", "shape", "(", "logits", ")", ",", "minval", "=", "1e-5", ",", "maxval", "=", "1.", "-", "1e-5", ",", "seed", "=", "seed", ")", ")", ")", "sel", "=", "tf", ".", "one_hot", "(", "tf", ".", "argmax", "(", "logits", "+", "gumbel_noise", ",", "-", "1", ")", ",", "depth", "=", "num_mixtures", ",", "dtype", "=", "tf", ".", "float32", ")", "# Select mixture component's parameters.", "sel", "=", "tf", ".", "expand_dims", "(", "sel", ",", "-", "1", ")", "locs", "=", "tf", ".", "reduce_sum", "(", "locs", "*", "sel", ",", "3", ")", "log_scales", "=", "tf", ".", "reduce_sum", "(", "log_scales", "*", "sel", ",", "3", ")", "coeffs", "=", "tf", ".", "reduce_sum", "(", "coeffs", "*", "sel", ",", "3", ")", "# Sample from 3-D logistic & clip to interval. Note we don't round to the", "# nearest 8-bit value when sampling.", "uniform_noise", "=", "tf", ".", "random_uniform", "(", "tf", ".", "shape", "(", "locs", ")", ",", "minval", "=", "1e-5", ",", "maxval", "=", "1.", "-", "1e-5", ",", "seed", "=", "seed", ")", "logistic_noise", "=", "tf", ".", "log", "(", "uniform_noise", ")", "-", "tf", ".", "log1p", "(", "-", "uniform_noise", ")", "x", "=", "locs", "+", "tf", ".", "exp", "(", "log_scales", ")", "*", "logistic_noise", "x0", "=", "x", "[", "...", ",", "0", "]", "x1", "=", "x", "[", "...", ",", "1", "]", "+", "coeffs", "[", "...", ",", "0", "]", "*", "x0", "x2", "=", "x", "[", "...", ",", "2", "]", "+", "coeffs", "[", "...", ",", "1", "]", "*", "x0", "+", "coeffs", "[", "...", ",", "2", "]", "*", "x1", "x", "=", "tf", ".", "stack", "(", "[", "x0", ",", "x1", ",", "x2", "]", ",", "axis", "=", "-", "1", ")", "x", "=", "tf", ".", "clip_by_value", "(", "x", ",", "-", "1.", ",", "1.", ")", "return", "x" ]
36.234043
20.255319
def process_all_json_files(build_dir): """Return a list of pages to index""" html_files = [] for root, _, files in os.walk(build_dir): for filename in fnmatch.filter(files, '*.fjson'): if filename in ['search.fjson', 'genindex.fjson', 'py-modindex.fjson']: continue html_files.append(os.path.join(root, filename)) page_list = [] for filename in html_files: try: result = process_file(filename) if result: page_list.append(result) # we're unsure which exceptions can be raised except: # noqa pass return page_list
[ "def", "process_all_json_files", "(", "build_dir", ")", ":", "html_files", "=", "[", "]", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "build_dir", ")", ":", "for", "filename", "in", "fnmatch", ".", "filter", "(", "files", ",", "'*.fjson'", ")", ":", "if", "filename", "in", "[", "'search.fjson'", ",", "'genindex.fjson'", ",", "'py-modindex.fjson'", "]", ":", "continue", "html_files", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "page_list", "=", "[", "]", "for", "filename", "in", "html_files", ":", "try", ":", "result", "=", "process_file", "(", "filename", ")", "if", "result", ":", "page_list", ".", "append", "(", "result", ")", "# we're unsure which exceptions can be raised", "except", ":", "# noqa", "pass", "return", "page_list" ]
35.315789
13.894737
def parse(cls, fptr, offset, length): """Parse component definition box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ComponentDefinitionBox Instance of the current component definition box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) # Read the number of components. num_components, = struct.unpack_from('>H', read_buffer) data = struct.unpack_from('>' + 'HHH' * num_components, read_buffer, offset=2) index = data[0:num_components * 6:3] channel_type = data[1:num_components * 6:3] association = data[2:num_components * 6:3] return cls(index=tuple(index), channel_type=tuple(channel_type), association=tuple(association), length=length, offset=offset)
[ "def", "parse", "(", "cls", ",", "fptr", ",", "offset", ",", "length", ")", ":", "num_bytes", "=", "offset", "+", "length", "-", "fptr", ".", "tell", "(", ")", "read_buffer", "=", "fptr", ".", "read", "(", "num_bytes", ")", "# Read the number of components.", "num_components", ",", "=", "struct", ".", "unpack_from", "(", "'>H'", ",", "read_buffer", ")", "data", "=", "struct", ".", "unpack_from", "(", "'>'", "+", "'HHH'", "*", "num_components", ",", "read_buffer", ",", "offset", "=", "2", ")", "index", "=", "data", "[", "0", ":", "num_components", "*", "6", ":", "3", "]", "channel_type", "=", "data", "[", "1", ":", "num_components", "*", "6", ":", "3", "]", "association", "=", "data", "[", "2", ":", "num_components", "*", "6", ":", "3", "]", "return", "cls", "(", "index", "=", "tuple", "(", "index", ")", ",", "channel_type", "=", "tuple", "(", "channel_type", ")", ",", "association", "=", "tuple", "(", "association", ")", ",", "length", "=", "length", ",", "offset", "=", "offset", ")" ]
32.212121
16.181818
def format(self, subtitles): """Turn a string containing the subs xml document into the formatted subtitle string @param str|crunchyroll.models.StyledSubtitle sub_xml_text @return str """ logger.debug('Formatting subtitles (id=%s) with %s', subtitles.id, self.__class__.__name__) return self._format(subtitles).encode('utf-8')
[ "def", "format", "(", "self", ",", "subtitles", ")", ":", "logger", ".", "debug", "(", "'Formatting subtitles (id=%s) with %s'", ",", "subtitles", ".", "id", ",", "self", ".", "__class__", ".", "__name__", ")", "return", "self", ".", "_format", "(", "subtitles", ")", ".", "encode", "(", "'utf-8'", ")" ]
38.6
15.9
def ensure_topic(self): """Verify the pub/sub topic exists. Returns the topic qualified name. """ client = self.session.client('pubsub', 'v1', 'projects.topics') topic = self.get_topic_param() try: client.execute_command('get', {'topic': topic}) except HttpError as e: if e.resp.status != 404: raise else: return topic # bug in discovery doc.. apis say body must be empty but its required in the # discovery api for create. client.execute_command('create', {'name': topic, 'body': {}}) return topic
[ "def", "ensure_topic", "(", "self", ")", ":", "client", "=", "self", ".", "session", ".", "client", "(", "'pubsub'", ",", "'v1'", ",", "'projects.topics'", ")", "topic", "=", "self", ".", "get_topic_param", "(", ")", "try", ":", "client", ".", "execute_command", "(", "'get'", ",", "{", "'topic'", ":", "topic", "}", ")", "except", "HttpError", "as", "e", ":", "if", "e", ".", "resp", ".", "status", "!=", "404", ":", "raise", "else", ":", "return", "topic", "# bug in discovery doc.. apis say body must be empty but its required in the", "# discovery api for create.", "client", ".", "execute_command", "(", "'create'", ",", "{", "'name'", ":", "topic", ",", "'body'", ":", "{", "}", "}", ")", "return", "topic" ]
33.157895
18.526316
def dcmanonym( dcmpth, displayonly=False, patient='anonymised', physician='anonymised', dob='19800101', verbose=True): ''' Anonymise DICOM file(s) Arguments: > dcmpth: it can be passed as a single DICOM file, or a folder containing DICOM files, or a list of DICOM file paths. > patient: the name of the patient. > physician:the name of the referring physician. > dob: patient's date of birth. > verbose: display processing output. ''' #> check if a single DICOM file if isinstance(dcmpth, basestring) and os.path.isfile(dcmpth): dcmlst = [dcmpth] if verbose: print 'i> recognised the input argument as a single DICOM file.' #> check if a folder containing DICOM files elif isinstance(dcmpth, basestring) and os.path.isdir(dcmpth): dircontent = os.listdir(dcmpth) #> create a list of DICOM files inside the folder dcmlst = [os.path.join(dcmpth,d) for d in dircontent if os.path.isfile(os.path.join(dcmpth,d)) and d.endswith(dcmext)] if verbose: print 'i> recognised the input argument as the folder containing DICOM files.' #> check if a folder containing DICOM files elif isinstance(dcmpth, list): if not all([os.path.isfile(d) and d.endswith(dcmext) for d in dcmpth]): raise IOError('Not all files in the list are DICOM files.') dcmlst = dcmpth if verbose: print 'i> recognised the input argument as the list of DICOM file paths.' #> check if dictionary of data input <datain> elif isinstance(dcmpth, dict) and 'corepath' in dcmpth: dcmlst = list_dcm_datain(dcmpth) if verbose: print 'i> recognised the input argument as the dictionary of scanner data.' else: raise IOError('Unrecognised input!') for dcmf in dcmlst: #> read the file dhdr = dcm.dcmread(dcmf) #> get the basic info about the DICOM file dcmtype = dcminfo(dhdr, verbose=False) if verbose: print '-------------------------------' print 'i> the DICOM file is for:', dcmtype #> anonymise mMR data. if 'mmr' in dcmtype: if [0x029, 0x1120] in dhdr and dhdr[0x029, 0x1120].name=='[CSA Series Header Info]': csafield = dhdr[0x029, 0x1120] csa = csafield.value elif [0x029, 0x1020] in dhdr and dhdr[0x029, 0x1020].name=='[CSA Series Header Info]': csafield = dhdr[0x029, 0x1020] csa = csafield.value else: csa = '' # string length considered for replacement strlen = 200 idx = [m.start() for m in re.finditer(r'([Pp]atients{0,1}[Nn]ame)', csa)] if idx and verbose: print ' > found sensitive information deep in DICOM headers:', dcmtype #> run the anonymisation iupdate = 0 for i in idx: ci = i - iupdate if displayonly: print ' > sensitive info:' print ' ', csa[ci:ci+strlen] continue rplcmnt = re.sub( r'(\{\s*\"{1,2}\W*\w+\W*\w+\W*\"{1,2}\s*\})', '{ ""' +patient+ '"" }', csa[ci:ci+strlen] ) #> update string csa = csa[:ci] + rplcmnt + csa[ci+strlen:] print ' > removed sensitive information.' #> correct for the number of removed letters iupdate = strlen-len(rplcmnt) #> update DICOM if not displayonly and csa!='': csafield.value = csa #> Patient's name if [0x010,0x010] in dhdr: if displayonly: print ' > sensitive info:', dhdr[0x010,0x010].name print ' ', dhdr[0x010,0x010].value else: dhdr[0x010,0x010].value = patient if verbose: print ' > anonymised patients name' #> date of birth if [0x010,0x030] in dhdr: if displayonly: print ' > sensitive info:', dhdr[0x010,0x030].name print ' ', dhdr[0x010,0x030].value else: dhdr[0x010,0x030].value = dob if verbose: print ' > anonymised date of birh' #> physician's name if [0x008, 0x090] in dhdr: if displayonly: print ' > sensitive info:', dhdr[0x008,0x090].name print ' ', dhdr[0x008,0x090].value else: dhdr[0x008,0x090].value = physician if verbose: print ' > anonymised physician name' dhdr.save_as(dcmf)
[ "def", "dcmanonym", "(", "dcmpth", ",", "displayonly", "=", "False", ",", "patient", "=", "'anonymised'", ",", "physician", "=", "'anonymised'", ",", "dob", "=", "'19800101'", ",", "verbose", "=", "True", ")", ":", "#> check if a single DICOM file", "if", "isinstance", "(", "dcmpth", ",", "basestring", ")", "and", "os", ".", "path", ".", "isfile", "(", "dcmpth", ")", ":", "dcmlst", "=", "[", "dcmpth", "]", "if", "verbose", ":", "print", "'i> recognised the input argument as a single DICOM file.'", "#> check if a folder containing DICOM files", "elif", "isinstance", "(", "dcmpth", ",", "basestring", ")", "and", "os", ".", "path", ".", "isdir", "(", "dcmpth", ")", ":", "dircontent", "=", "os", ".", "listdir", "(", "dcmpth", ")", "#> create a list of DICOM files inside the folder", "dcmlst", "=", "[", "os", ".", "path", ".", "join", "(", "dcmpth", ",", "d", ")", "for", "d", "in", "dircontent", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "dcmpth", ",", "d", ")", ")", "and", "d", ".", "endswith", "(", "dcmext", ")", "]", "if", "verbose", ":", "print", "'i> recognised the input argument as the folder containing DICOM files.'", "#> check if a folder containing DICOM files", "elif", "isinstance", "(", "dcmpth", ",", "list", ")", ":", "if", "not", "all", "(", "[", "os", ".", "path", ".", "isfile", "(", "d", ")", "and", "d", ".", "endswith", "(", "dcmext", ")", "for", "d", "in", "dcmpth", "]", ")", ":", "raise", "IOError", "(", "'Not all files in the list are DICOM files.'", ")", "dcmlst", "=", "dcmpth", "if", "verbose", ":", "print", "'i> recognised the input argument as the list of DICOM file paths.'", "#> check if dictionary of data input <datain>", "elif", "isinstance", "(", "dcmpth", ",", "dict", ")", "and", "'corepath'", "in", "dcmpth", ":", "dcmlst", "=", "list_dcm_datain", "(", "dcmpth", ")", "if", "verbose", ":", "print", "'i> recognised the input argument as the dictionary of scanner data.'", "else", ":", "raise", "IOError", "(", "'Unrecognised input!'", ")", "for", "dcmf", "in", "dcmlst", ":", "#> read the file", "dhdr", "=", "dcm", ".", "dcmread", "(", "dcmf", ")", "#> get the basic info about the DICOM file", "dcmtype", "=", "dcminfo", "(", "dhdr", ",", "verbose", "=", "False", ")", "if", "verbose", ":", "print", "'-------------------------------'", "print", "'i> the DICOM file is for:'", ",", "dcmtype", "#> anonymise mMR data.", "if", "'mmr'", "in", "dcmtype", ":", "if", "[", "0x029", ",", "0x1120", "]", "in", "dhdr", "and", "dhdr", "[", "0x029", ",", "0x1120", "]", ".", "name", "==", "'[CSA Series Header Info]'", ":", "csafield", "=", "dhdr", "[", "0x029", ",", "0x1120", "]", "csa", "=", "csafield", ".", "value", "elif", "[", "0x029", ",", "0x1020", "]", "in", "dhdr", "and", "dhdr", "[", "0x029", ",", "0x1020", "]", ".", "name", "==", "'[CSA Series Header Info]'", ":", "csafield", "=", "dhdr", "[", "0x029", ",", "0x1020", "]", "csa", "=", "csafield", ".", "value", "else", ":", "csa", "=", "''", "# string length considered for replacement", "strlen", "=", "200", "idx", "=", "[", "m", ".", "start", "(", ")", "for", "m", "in", "re", ".", "finditer", "(", "r'([Pp]atients{0,1}[Nn]ame)'", ",", "csa", ")", "]", "if", "idx", "and", "verbose", ":", "print", "' > found sensitive information deep in DICOM headers:'", ",", "dcmtype", "#> run the anonymisation ", "iupdate", "=", "0", "for", "i", "in", "idx", ":", "ci", "=", "i", "-", "iupdate", "if", "displayonly", ":", "print", "' > sensitive info:'", "print", "' '", ",", "csa", "[", "ci", ":", "ci", "+", "strlen", "]", "continue", "rplcmnt", "=", "re", ".", "sub", "(", "r'(\\{\\s*\\\"{1,2}\\W*\\w+\\W*\\w+\\W*\\\"{1,2}\\s*\\})'", ",", "'{ \"\"'", "+", "patient", "+", "'\"\" }'", ",", "csa", "[", "ci", ":", "ci", "+", "strlen", "]", ")", "#> update string", "csa", "=", "csa", "[", ":", "ci", "]", "+", "rplcmnt", "+", "csa", "[", "ci", "+", "strlen", ":", "]", "print", "' > removed sensitive information.'", "#> correct for the number of removed letters", "iupdate", "=", "strlen", "-", "len", "(", "rplcmnt", ")", "#> update DICOM", "if", "not", "displayonly", "and", "csa", "!=", "''", ":", "csafield", ".", "value", "=", "csa", "#> Patient's name", "if", "[", "0x010", ",", "0x010", "]", "in", "dhdr", ":", "if", "displayonly", ":", "print", "' > sensitive info:'", ",", "dhdr", "[", "0x010", ",", "0x010", "]", ".", "name", "print", "' '", ",", "dhdr", "[", "0x010", ",", "0x010", "]", ".", "value", "else", ":", "dhdr", "[", "0x010", ",", "0x010", "]", ".", "value", "=", "patient", "if", "verbose", ":", "print", "' > anonymised patients name'", "#> date of birth", "if", "[", "0x010", ",", "0x030", "]", "in", "dhdr", ":", "if", "displayonly", ":", "print", "' > sensitive info:'", ",", "dhdr", "[", "0x010", ",", "0x030", "]", ".", "name", "print", "' '", ",", "dhdr", "[", "0x010", ",", "0x030", "]", ".", "value", "else", ":", "dhdr", "[", "0x010", ",", "0x030", "]", ".", "value", "=", "dob", "if", "verbose", ":", "print", "' > anonymised date of birh'", "#> physician's name", "if", "[", "0x008", ",", "0x090", "]", "in", "dhdr", ":", "if", "displayonly", ":", "print", "' > sensitive info:'", ",", "dhdr", "[", "0x008", ",", "0x090", "]", ".", "name", "print", "' '", ",", "dhdr", "[", "0x008", ",", "0x090", "]", ".", "value", "else", ":", "dhdr", "[", "0x008", ",", "0x090", "]", ".", "value", "=", "physician", "if", "verbose", ":", "print", "' > anonymised physician name'", "dhdr", ".", "save_as", "(", "dcmf", ")" ]
35.242647
22.036765
def healthy(self): """Return 200 is healthy, else 500. Override is_healthy() to change the health check. """ try: if self.is_healthy(): return "OK", 200 else: return "FAIL", 500 except Exception as e: self.app.logger.exception(e) return str(e), 500
[ "def", "healthy", "(", "self", ")", ":", "try", ":", "if", "self", ".", "is_healthy", "(", ")", ":", "return", "\"OK\"", ",", "200", "else", ":", "return", "\"FAIL\"", ",", "500", "except", "Exception", "as", "e", ":", "self", ".", "app", ".", "logger", ".", "exception", "(", "e", ")", "return", "str", "(", "e", ")", ",", "500" ]
23.8
16.733333
def install_brew(target_path): """ Install brew to the target path """ if not os.path.exists(target_path): try: os.makedirs(target_path) except OSError: logger.warn("Unable to create directory %s for brew." % target_path) logger.warn("Skipping...") return extract_targz(HOMEBREW_URL, target_path, remove_common_prefix=True)
[ "def", "install_brew", "(", "target_path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "target_path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "target_path", ")", "except", "OSError", ":", "logger", ".", "warn", "(", "\"Unable to create directory %s for brew.\"", "%", "target_path", ")", "logger", ".", "warn", "(", "\"Skipping...\"", ")", "return", "extract_targz", "(", "HOMEBREW_URL", ",", "target_path", ",", "remove_common_prefix", "=", "True", ")" ]
39
15.5
def should_retry_on_error(self, error): """rules for retry :param error: ProtocolException that returns from Server """ if self.is_streaming_request: # not retry for streaming request return False retry_flag = self.headers.get('re', retry.DEFAULT) if retry_flag == retry.NEVER: return False if isinstance(error, StreamClosedError): return True if error.code in [ErrorCode.bad_request, ErrorCode.cancelled, ErrorCode.unhealthy]: return False elif error.code in [ErrorCode.busy, ErrorCode.declined]: return True elif error.code is ErrorCode.timeout: return retry_flag is not retry.CONNECTION_ERROR elif error.code in [ErrorCode.network_error, ErrorCode.fatal, ErrorCode.unexpected]: return retry_flag is not retry.TIMEOUT else: return False
[ "def", "should_retry_on_error", "(", "self", ",", "error", ")", ":", "if", "self", ".", "is_streaming_request", ":", "# not retry for streaming request", "return", "False", "retry_flag", "=", "self", ".", "headers", ".", "get", "(", "'re'", ",", "retry", ".", "DEFAULT", ")", "if", "retry_flag", "==", "retry", ".", "NEVER", ":", "return", "False", "if", "isinstance", "(", "error", ",", "StreamClosedError", ")", ":", "return", "True", "if", "error", ".", "code", "in", "[", "ErrorCode", ".", "bad_request", ",", "ErrorCode", ".", "cancelled", ",", "ErrorCode", ".", "unhealthy", "]", ":", "return", "False", "elif", "error", ".", "code", "in", "[", "ErrorCode", ".", "busy", ",", "ErrorCode", ".", "declined", "]", ":", "return", "True", "elif", "error", ".", "code", "is", "ErrorCode", ".", "timeout", ":", "return", "retry_flag", "is", "not", "retry", ".", "CONNECTION_ERROR", "elif", "error", ".", "code", "in", "[", "ErrorCode", ".", "network_error", ",", "ErrorCode", ".", "fatal", ",", "ErrorCode", ".", "unexpected", "]", ":", "return", "retry_flag", "is", "not", "retry", ".", "TIMEOUT", "else", ":", "return", "False" ]
31.59375
17.375
def process_download_path(self): """ Processes the download path. It checks if the path exists and the scraper has write permissions. """ if os.path.exists(self.download_path): if not os.access(self.download_path, os.W_OK): raise DirectoryAccessError elif os.access(os.path.dirname(self.download_path), os.W_OK): os.makedirs(self.download_path) else: raise DirectoryCreateError return True
[ "def", "process_download_path", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "download_path", ")", ":", "if", "not", "os", ".", "access", "(", "self", ".", "download_path", ",", "os", ".", "W_OK", ")", ":", "raise", "DirectoryAccessError", "elif", "os", ".", "access", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "download_path", ")", ",", "os", ".", "W_OK", ")", ":", "os", ".", "makedirs", "(", "self", ".", "download_path", ")", "else", ":", "raise", "DirectoryCreateError", "return", "True" ]
35.785714
13.285714
def getSegmentKeys(self, segment): """ Get the different keys for 1 defined segment :param segment: Segment to find. Ex : PV1, PID """ return list(filter(lambda x: x.startswith(segment), self.getAliasedKeys()))
[ "def", "getSegmentKeys", "(", "self", ",", "segment", ")", ":", "return", "list", "(", "filter", "(", "lambda", "x", ":", "x", ".", "startswith", "(", "segment", ")", ",", "self", ".", "getAliasedKeys", "(", ")", ")", ")" ]
42.166667
13.833333
def _add_scheme(): """ urllib.parse doesn't support the mongodb scheme, but it's easy to make it so. """ lists = [ urllib.parse.uses_relative, urllib.parse.uses_netloc, urllib.parse.uses_query, ] for l in lists: l.append('mongodb')
[ "def", "_add_scheme", "(", ")", ":", "lists", "=", "[", "urllib", ".", "parse", ".", "uses_relative", ",", "urllib", ".", "parse", ".", "uses_netloc", ",", "urllib", ".", "parse", ".", "uses_query", ",", "]", "for", "l", "in", "lists", ":", "l", ".", "append", "(", "'mongodb'", ")" ]
23.333333
15.5
def use(self, plugin, arguments={}): """Add plugin to use during compilation. plugin: Plugin to include. arguments: Dictionary of arguments to pass to the import. """ self.plugins[plugin] = dict(arguments) return self.plugins
[ "def", "use", "(", "self", ",", "plugin", ",", "arguments", "=", "{", "}", ")", ":", "self", ".", "plugins", "[", "plugin", "]", "=", "dict", "(", "arguments", ")", "return", "self", ".", "plugins" ]
30.375
11.75
def execute(self): """ Invoke the redispy pipeline.execute() method and take all the values returned in sequential order of commands and map them to the Future objects we returned when each command was queued inside the pipeline. Also invoke all the callback functions queued up. :param raise_on_error: boolean :return: None """ stack = self._stack callbacks = self._callbacks promises = [] if stack: def process(): """ take all the commands and pass them to redis. this closure has the context of the stack :return: None """ # get the connection to redis pipe = ConnectionManager.get(self.connection_name) # keep track of all the commands call_stack = [] # build a corresponding list of the futures futures = [] # we need to do this because we need to make sure # all of these are callable. # there shouldn't be any non-callables. for item, args, kwargs, future in stack: f = getattr(pipe, item) if callable(f): futures.append(future) call_stack.append((f, args, kwargs)) # here's where we actually pass the commands to the # underlying redis-py pipeline() object. for f, args, kwargs in call_stack: f(*args, **kwargs) # execute the redis-py pipeline. # map all of the results into the futures. for i, v in enumerate(pipe.execute()): futures[i].set(v) promises.append(process) # collect all the other pipelines for other named connections attached. promises += [p.execute for p in self._pipelines.values()] if len(promises) == 1: promises[0]() else: # if there are no promises, this is basically a no-op. TaskManager.wait(*[TaskManager.promise(p) for p in promises]) for cb in callbacks: cb()
[ "def", "execute", "(", "self", ")", ":", "stack", "=", "self", ".", "_stack", "callbacks", "=", "self", ".", "_callbacks", "promises", "=", "[", "]", "if", "stack", ":", "def", "process", "(", ")", ":", "\"\"\"\n take all the commands and pass them to redis.\n this closure has the context of the stack\n :return: None\n \"\"\"", "# get the connection to redis", "pipe", "=", "ConnectionManager", ".", "get", "(", "self", ".", "connection_name", ")", "# keep track of all the commands", "call_stack", "=", "[", "]", "# build a corresponding list of the futures", "futures", "=", "[", "]", "# we need to do this because we need to make sure", "# all of these are callable.", "# there shouldn't be any non-callables.", "for", "item", ",", "args", ",", "kwargs", ",", "future", "in", "stack", ":", "f", "=", "getattr", "(", "pipe", ",", "item", ")", "if", "callable", "(", "f", ")", ":", "futures", ".", "append", "(", "future", ")", "call_stack", ".", "append", "(", "(", "f", ",", "args", ",", "kwargs", ")", ")", "# here's where we actually pass the commands to the", "# underlying redis-py pipeline() object.", "for", "f", ",", "args", ",", "kwargs", "in", "call_stack", ":", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# execute the redis-py pipeline.", "# map all of the results into the futures.", "for", "i", ",", "v", "in", "enumerate", "(", "pipe", ".", "execute", "(", ")", ")", ":", "futures", "[", "i", "]", ".", "set", "(", "v", ")", "promises", ".", "append", "(", "process", ")", "# collect all the other pipelines for other named connections attached.", "promises", "+=", "[", "p", ".", "execute", "for", "p", "in", "self", ".", "_pipelines", ".", "values", "(", ")", "]", "if", "len", "(", "promises", ")", "==", "1", ":", "promises", "[", "0", "]", "(", ")", "else", ":", "# if there are no promises, this is basically a no-op.", "TaskManager", ".", "wait", "(", "*", "[", "TaskManager", ".", "promise", "(", "p", ")", "for", "p", "in", "promises", "]", ")", "for", "cb", "in", "callbacks", ":", "cb", "(", ")" ]
35.83871
19.064516
def old(self): """Assess to the state value(s) at beginning of the time step, which has been processed most recently. When using *HydPy* in the normal manner. But it can be helpful for demonstration and debugging purposes. """ value = getattr(self.fastaccess_old, self.name, None) if value is None: raise RuntimeError( 'No value/values of sequence %s has/have ' 'not been defined so far.' % objecttools.elementphrase(self)) else: if self.NDIM: value = numpy.asarray(value) return value
[ "def", "old", "(", "self", ")", ":", "value", "=", "getattr", "(", "self", ".", "fastaccess_old", ",", "self", ".", "name", ",", "None", ")", "if", "value", "is", "None", ":", "raise", "RuntimeError", "(", "'No value/values of sequence %s has/have '", "'not been defined so far.'", "%", "objecttools", ".", "elementphrase", "(", "self", ")", ")", "else", ":", "if", "self", ".", "NDIM", ":", "value", "=", "numpy", ".", "asarray", "(", "value", ")", "return", "value" ]
39.75
15.6875
def get_profile_pic_from_id(self, id): """ Get full profile pic from an id The ID must be on your contact book to successfully get their profile picture. :param id: ID :type id: str """ profile_pic = self.wapi_functions.getProfilePicFromId(id) if profile_pic: return b64decode(profile_pic) else: return False
[ "def", "get_profile_pic_from_id", "(", "self", ",", "id", ")", ":", "profile_pic", "=", "self", ".", "wapi_functions", ".", "getProfilePicFromId", "(", "id", ")", "if", "profile_pic", ":", "return", "b64decode", "(", "profile_pic", ")", "else", ":", "return", "False" ]
28.571429
12.857143
def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags)
[ "def", "create_tags", "(", "filesystemid", ",", "tags", ",", "keyid", "=", "None", ",", "key", "=", "None", ",", "profile", "=", "None", ",", "region", "=", "None", ",", "*", "*", "kwargs", ")", ":", "client", "=", "_get_conn", "(", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "region", "=", "region", ")", "new_tags", "=", "[", "]", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "tags", ")", ":", "new_tags", ".", "append", "(", "{", "'Key'", ":", "k", ",", "'Value'", ":", "v", "}", ")", "client", ".", "create_tags", "(", "FileSystemId", "=", "filesystemid", ",", "Tags", "=", "new_tags", ")" ]
27.909091
24.515152
def create(self, item, **kwargs): """ Creates a new item. You pass in an item containing initial values. Any attribute names defined in ``prototype`` that are missing from the item will be added using the default value defined in ``prototype``. """ response = self._new_response() if self._prototype_handler.check(item, 'create', response): self._encrypt(item) params = {'Item': item} self._call_ddb_method(self.table.put_item, params, response) if response.status == 'success': response.data = item response.prepare() return response
[ "def", "create", "(", "self", ",", "item", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "_new_response", "(", ")", "if", "self", ".", "_prototype_handler", ".", "check", "(", "item", ",", "'create'", ",", "response", ")", ":", "self", ".", "_encrypt", "(", "item", ")", "params", "=", "{", "'Item'", ":", "item", "}", "self", ".", "_call_ddb_method", "(", "self", ".", "table", ".", "put_item", ",", "params", ",", "response", ")", "if", "response", ".", "status", "==", "'success'", ":", "response", ".", "data", "=", "item", "response", ".", "prepare", "(", ")", "return", "response" ]
43.125
13.875
def add_text(self, text, position=None, font_size=50, color=None, font=None, shadow=False, name=None, loc=None): """ Adds text to plot object in the top left corner by default Parameters ---------- text : str The text to add the the rendering position : tuple(float) Length 2 tuple of the pixelwise position to place the bottom left corner of the text box. Default is to find the top left corner of the renderering window and place text box up there. font : string, optional Font name may be courier, times, or arial shadow : bool, optional Adds a black shadow to the text. Defaults to False name : str, optional The name for the added actor so that it can be easily updated. If an actor of this name already exists in the rendering window, it will be replaced by the new actor. loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. Returns ------- textActor : vtk.vtkTextActor Text actor added to plot """ if font is None: font = rcParams['font']['family'] if font_size is None: font_size = rcParams['font']['size'] if color is None: color = rcParams['font']['color'] if position is None: # Set the position of the text to the top left corner window_size = self.window_size x = (window_size[0] * 0.02) / self.shape[0] y = (window_size[1] * 0.85) / self.shape[0] position = [x, y] self.textActor = vtk.vtkTextActor() self.textActor.SetPosition(position) self.textActor.GetTextProperty().SetFontSize(font_size) self.textActor.GetTextProperty().SetColor(parse_color(color)) self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font]) self.textActor.GetTextProperty().SetShadow(shadow) self.textActor.SetInput(text) self.add_actor(self.textActor, reset_camera=False, name=name, loc=loc) return self.textActor
[ "def", "add_text", "(", "self", ",", "text", ",", "position", "=", "None", ",", "font_size", "=", "50", ",", "color", "=", "None", ",", "font", "=", "None", ",", "shadow", "=", "False", ",", "name", "=", "None", ",", "loc", "=", "None", ")", ":", "if", "font", "is", "None", ":", "font", "=", "rcParams", "[", "'font'", "]", "[", "'family'", "]", "if", "font_size", "is", "None", ":", "font_size", "=", "rcParams", "[", "'font'", "]", "[", "'size'", "]", "if", "color", "is", "None", ":", "color", "=", "rcParams", "[", "'font'", "]", "[", "'color'", "]", "if", "position", "is", "None", ":", "# Set the position of the text to the top left corner", "window_size", "=", "self", ".", "window_size", "x", "=", "(", "window_size", "[", "0", "]", "*", "0.02", ")", "/", "self", ".", "shape", "[", "0", "]", "y", "=", "(", "window_size", "[", "1", "]", "*", "0.85", ")", "/", "self", ".", "shape", "[", "0", "]", "position", "=", "[", "x", ",", "y", "]", "self", ".", "textActor", "=", "vtk", ".", "vtkTextActor", "(", ")", "self", ".", "textActor", ".", "SetPosition", "(", "position", ")", "self", ".", "textActor", ".", "GetTextProperty", "(", ")", ".", "SetFontSize", "(", "font_size", ")", "self", ".", "textActor", ".", "GetTextProperty", "(", ")", ".", "SetColor", "(", "parse_color", "(", "color", ")", ")", "self", ".", "textActor", ".", "GetTextProperty", "(", ")", ".", "SetFontFamily", "(", "FONT_KEYS", "[", "font", "]", ")", "self", ".", "textActor", ".", "GetTextProperty", "(", ")", ".", "SetShadow", "(", "shadow", ")", "self", ".", "textActor", ".", "SetInput", "(", "text", ")", "self", ".", "add_actor", "(", "self", ".", "textActor", ",", "reset_camera", "=", "False", ",", "name", "=", "name", ",", "loc", "=", "loc", ")", "return", "self", ".", "textActor" ]
37.810345
19.810345
def rouge(hypotheses, references): """Calculates average rouge scores for a list of hypotheses and references""" # Filter out hyps that are of 0 length # hyps_and_refs = zip(hypotheses, references) # hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0] # hypotheses, references = zip(*hyps_and_refs) # Calculate ROUGE-1 F1, precision, recall scores rouge_1 = [ rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references) ] rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1)) # Calculate ROUGE-2 F1, precision, recall scores rouge_2 = [ rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references) ] rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2)) # Calculate ROUGE-L F1, precision, recall scores rouge_l = [ rouge_l_sentence_level([hyp], [ref]) for hyp, ref in zip(hypotheses, references) ] rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l)) return { "rouge_1/f_score": rouge_1_f, "rouge_1/r_score": rouge_1_r, "rouge_1/p_score": rouge_1_p, "rouge_2/f_score": rouge_2_f, "rouge_2/r_score": rouge_2_r, "rouge_2/p_score": rouge_2_p, "rouge_l/f_score": rouge_l_f, "rouge_l/r_score": rouge_l_r, "rouge_l/p_score": rouge_l_p, }
[ "def", "rouge", "(", "hypotheses", ",", "references", ")", ":", "# Filter out hyps that are of 0 length", "# hyps_and_refs = zip(hypotheses, references)", "# hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]", "# hypotheses, references = zip(*hyps_and_refs)", "# Calculate ROUGE-1 F1, precision, recall scores", "rouge_1", "=", "[", "rouge_n", "(", "[", "hyp", "]", ",", "[", "ref", "]", ",", "1", ")", "for", "hyp", ",", "ref", "in", "zip", "(", "hypotheses", ",", "references", ")", "]", "rouge_1_f", ",", "rouge_1_p", ",", "rouge_1_r", "=", "map", "(", "np", ".", "mean", ",", "zip", "(", "*", "rouge_1", ")", ")", "# Calculate ROUGE-2 F1, precision, recall scores", "rouge_2", "=", "[", "rouge_n", "(", "[", "hyp", "]", ",", "[", "ref", "]", ",", "2", ")", "for", "hyp", ",", "ref", "in", "zip", "(", "hypotheses", ",", "references", ")", "]", "rouge_2_f", ",", "rouge_2_p", ",", "rouge_2_r", "=", "map", "(", "np", ".", "mean", ",", "zip", "(", "*", "rouge_2", ")", ")", "# Calculate ROUGE-L F1, precision, recall scores", "rouge_l", "=", "[", "rouge_l_sentence_level", "(", "[", "hyp", "]", ",", "[", "ref", "]", ")", "for", "hyp", ",", "ref", "in", "zip", "(", "hypotheses", ",", "references", ")", "]", "rouge_l_f", ",", "rouge_l_p", ",", "rouge_l_r", "=", "map", "(", "np", ".", "mean", ",", "zip", "(", "*", "rouge_l", ")", ")", "return", "{", "\"rouge_1/f_score\"", ":", "rouge_1_f", ",", "\"rouge_1/r_score\"", ":", "rouge_1_r", ",", "\"rouge_1/p_score\"", ":", "rouge_1_p", ",", "\"rouge_2/f_score\"", ":", "rouge_2_f", ",", "\"rouge_2/r_score\"", ":", "rouge_2_r", ",", "\"rouge_2/p_score\"", ":", "rouge_2_p", ",", "\"rouge_l/f_score\"", ":", "rouge_l_f", ",", "\"rouge_l/r_score\"", ":", "rouge_l_r", ",", "\"rouge_l/p_score\"", ":", "rouge_l_p", ",", "}" ]
34.153846
18.461538
def changes(self): """:class:`AuditLogChanges`: The list of changes this entry has.""" obj = AuditLogChanges(self, self._changes) del self._changes return obj
[ "def", "changes", "(", "self", ")", ":", "obj", "=", "AuditLogChanges", "(", "self", ",", "self", ".", "_changes", ")", "del", "self", ".", "_changes", "return", "obj" ]
37.2
13.8