text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def call_async(self, func: Callable, *args, **kwargs): """ Call the given callable in the event loop thread. This method lets you call asynchronous code from a worker thread. Do not use it from within the event loop thread. If the callable returns an awaitable, it is resolved before returning to the caller. :param func: a regular function or a coroutine function :param args: positional arguments to call the callable with :param kwargs: keyword arguments to call the callable with :return: the return value of the call """ return asyncio_extras.call_async(self.loop, func, *args, **kwargs)
[ "def", "call_async", "(", "self", ",", "func", ":", "Callable", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "asyncio_extras", ".", "call_async", "(", "self", ".", "loop", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")...
41.8125
25.4375
def _ic_decode(self, msg): """IC: Send Valid Or Invalid User Code Format.""" code = msg[4:16] if re.match(r'(0\d){6}', code): code = re.sub(r'0(\d)', r'\1', code) return {'code': code, 'user': int(msg[16:19])-1, 'keypad': int(msg[19:21])-1}
[ "def", "_ic_decode", "(", "self", ",", "msg", ")", ":", "code", "=", "msg", "[", "4", ":", "16", "]", "if", "re", ".", "match", "(", "r'(0\\d){6}'", ",", "code", ")", ":", "code", "=", "re", ".", "sub", "(", "r'0(\\d)'", ",", "r'\\1'", ",", "co...
42
8.428571
def fits(self, current_count, current_size, max_size, new_span): """Checks if the new span fits in the max payload size.""" return current_size + len(new_span) <= max_size
[ "def", "fits", "(", "self", ",", "current_count", ",", "current_size", ",", "max_size", ",", "new_span", ")", ":", "return", "current_size", "+", "len", "(", "new_span", ")", "<=", "max_size" ]
61.666667
13
def report(self, output_file=sys.stdout): """Report analysis outcome in human readable form.""" max_perf = self.results['max_perf'] if self._args and self._args.verbose >= 3: print('{}'.format(pformat(self.results)), file=output_file) if self._args and self._args.verbose >= 1: print('{}'.format(pformat(self.results['verbose infos'])), file=output_file) print('Bottlenecks:', file=output_file) print(' level | a. intensity | performance | peak bandwidth | peak bandwidth kernel', file=output_file) print('--------+--------------+-----------------+-------------------+----------------------', file=output_file) print(' CPU | | {!s:>15} | |'.format( max_perf[self._args.unit]), file=output_file) for b in self.results['mem bottlenecks']: print('{level:>7} | {arithmetic intensity:>5.2} FLOP/B | {0!s:>15} |' ' {bandwidth!s:>17} | {bw kernel:<8}'.format( b['performance'][self._args.unit], **b), file=output_file) print('', file=output_file) if self.results['min performance']['FLOP/s'] > max_perf['FLOP/s']: # CPU bound print('CPU bound. {!s} due to CPU max. FLOP/s'.format(max_perf), file=output_file) else: # Cache or mem bound print('Cache or mem bound.', file=output_file) bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']] print('{!s} due to {} transfer bottleneck (with bw from {} benchmark)'.format( bottleneck['performance'][self._args.unit], bottleneck['level'], bottleneck['bw kernel']), file=output_file) print('Arithmetic Intensity: {:.2f} FLOP/B'.format(bottleneck['arithmetic intensity']), file=output_file)
[ "def", "report", "(", "self", ",", "output_file", "=", "sys", ".", "stdout", ")", ":", "max_perf", "=", "self", ".", "results", "[", "'max_perf'", "]", "if", "self", ".", "_args", "and", "self", ".", "_args", ".", "verbose", ">=", "3", ":", "print", ...
52.205128
23.923077
def compile(self, values): """ Compiles the tagset and returns a str containing the result """ def is_international(tag): return tag.endswith('_') def get_country_code(tag): return tag[-2:] def strip_country_code(tag): return tag[:-2] replacements = list(self.tagset.items()) str = "" domestic = [t for t in replacements if not is_international(t[0])] for key, replacement in domestic: try: str = str + replacement % values[key] except KeyError as e: if self.mandatory: raise e international = [t for t in replacements if is_international(t[0])] for key, replacement in international: try: x = [t for t in values.items() if strip_country_code(t[0]) == key] int_values_for_key = [(get_country_code(t[0]),t[1]) for t in x] for v in int_values_for_key: str = str + replacement % v except KeyError as e: if self.mandatory: raise e return str
[ "def", "compile", "(", "self", ",", "values", ")", ":", "def", "is_international", "(", "tag", ")", ":", "return", "tag", ".", "endswith", "(", "'_'", ")", "def", "get_country_code", "(", "tag", ")", ":", "return", "tag", "[", "-", "2", ":", "]", "...
32.714286
18.142857
def ns(self, value): """The ns property. Args: value (string). the property value. """ if value == self._defaults['ns'] and 'ns' in self._values: del self._values['ns'] else: self._values['ns'] = value
[ "def", "ns", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'ns'", "]", "and", "'ns'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'ns'", "]", "else", ":", "self", ".", "_valu...
27.7
14.7
def random_subset(self, relative_size, balance_labels=False, label_list_ids=None): """ Create a subview of random utterances with a approximate size relative to the full corpus. By default x random utterances are selected with x equal to ``relative_size * corpus.num_utterances``. Args: relative_size (float): A value between 0 and 1. (0.5 will create a subset with approximately 50% of the full corpus size) balance_labels (bool): If True, the labels of the selected utterances are balanced as far as possible. So the count/duration of every label within the subset is equal. label_list_ids (list): List of label-list ids. If none is given, all label-lists are considered for balancing. Otherwise only the ones that are in the list are considered. Returns: Subview: The subview representing the subset. """ num_utterances_in_subset = round(relative_size * self.corpus.num_utterances) all_utterance_ids = sorted(list(self.corpus.utterances.keys())) if balance_labels: all_label_values = self.corpus.all_label_values(label_list_ids=label_list_ids) utterance_with_label_counts = collections.defaultdict(dict) for utterance_idx, utterance in self.corpus.utterances.items(): utterance_with_label_counts[utterance_idx] = utterance.label_count(label_list_ids=label_list_ids) subset_utterance_ids = utils.select_balanced_subset(utterance_with_label_counts, num_utterances_in_subset, list(all_label_values), seed=self.rand.random()) else: subset_utterance_ids = self.rand.sample(all_utterance_ids, num_utterances_in_subset) filter = subview.MatchingUtteranceIdxFilter(utterance_idxs=set(subset_utterance_ids)) return subview.Subview(self.corpus, filter_criteria=[filter])
[ "def", "random_subset", "(", "self", ",", "relative_size", ",", "balance_labels", "=", "False", ",", "label_list_ids", "=", "None", ")", ":", "num_utterances_in_subset", "=", "round", "(", "relative_size", "*", "self", ".", "corpus", ".", "num_utterances", ")", ...
57.973684
41.184211
def singleCalc(self, m={'Al2O3': 13.01, 'Alpha': 0.6, 'Ba': 188.0, 'Be': 0.85, 'CaO': 8.35, 'Ce': 28.2, 'Co': 45.2, 'Cr': 117.0, 'Cs': 0.83, 'Cu': 53.5, 'Dy': 5.58, 'Er': 2.96, 'Eu': 1.79, 'Fe2O3': 14.47, 'FeO': 5.51, 'Ga': 19.4, 'Gd': 5.24, 'Hf': 3.38, 'Ho': 1.1, 'K2O': 0.72, 'LOI': 5.05, 'La': 11.4, 'Label': 'ZhangSH2016', 'Li': 15.0, 'Lu': 0.39, 'Mg#': 41.9, 'MgO': 5.26, 'MnO': 0.21, 'Na2O': 1.88, 'Nb': 12.6, 'Nd': 18.4, 'Ni': 69.4, 'P2O5': 0.23, 'Pb': 3.17, 'Pr': 3.95, 'Rb': 18.4, 'Sc': 37.4, 'SiO2': 48.17, 'Size': 10, 'Sm': 5.08, 'Sr': 357, 'Ta': 0.77, 'Tb': 0.88, 'Th': 1.85, 'TiO2': 2.56, 'Tl': 0.06, 'Tm': 0.44, 'Total': 99.91, 'U': 0.41, 'V': 368.0, 'Y': 29.7, 'Yb': 2.68, 'Zn': 100.0, 'Zr': 130.0, }): DataResult={} DataWeight={} DataVolume={} DataCalced={} DataResult.update({'Label': m['Label']}) DataWeight.update({'Label': m['Label']}) DataVolume.update({'Label': m['Label']}) DataCalced.update({'Label': m['Label']}) DataResult.update({'Width': m['Width']}) DataWeight.update({'Width': m['Width']}) DataVolume.update({'Width': m['Width']}) DataCalced.update({'Width': m['Width']}) DataResult.update({'Style': m['Style']}) DataWeight.update({'Style': m['Style']}) DataVolume.update({'Style': m['Style']}) DataCalced.update({'Style': m['Style']}) DataResult.update({'Alpha': m['Alpha']}) DataWeight.update({'Alpha': m['Alpha']}) DataVolume.update({'Alpha': m['Alpha']}) DataCalced.update({'Alpha': m['Alpha']}) DataResult.update({'Size': m['Size']}) DataWeight.update({'Size': m['Size']}) DataVolume.update({'Size': m['Size']}) DataCalced.update({'Size': m['Size']}) DataResult.update({'Color': m['Color']}) DataWeight.update({'Color': m['Color']}) DataVolume.update({'Color': m['Color']}) DataCalced.update({'Color': m['Color']}) DataResult.update({'Marker': m['Marker']}) DataWeight.update({'Marker': m['Marker']}) DataVolume.update({'Marker': m['Marker']}) DataCalced.update({'Marker': m['Marker']}) WholeMass = 0 EachMole = {} for j in self.Elements: ''' Get the Whole Mole of the dataset ''' try: T_TMP = m[j] except(KeyError): T_TMP = 0 if j == 'Sr': TMP = T_TMP / (87.62 / 103.619 * 10000) elif j == 'Ba': TMP = T_TMP / (137.327 / 153.326 * 10000) elif j == 'Ni': TMP = T_TMP / (58.6934 / 74.69239999999999 * 10000) elif j == 'Cr': TMP = T_TMP / ((2 * 51.9961) / 151.98919999999998 * 10000) elif j == 'Zr': # Zr Multi 2 here TMP = T_TMP / ((2 * 91.224) / 123.22200000000001 * 10000) else: TMP = T_TMP V = TMP try: WholeMass += float(V) except ValueError: pass WeightCorrectionFactor = (100 / WholeMass) for j in self.Elements: ''' Get the Mole percentage of each element ''' try: T_TMP = m[j] except(KeyError): T_TMP = 0 if j == 'Sr': TMP = T_TMP / (87.62 / 103.619 * 10000) elif j == 'Ba': TMP = T_TMP / (137.327 / 153.326 * 10000) elif j == 'Ni': TMP = T_TMP / (58.6934 / 74.69239999999999 * 10000) elif j == 'Cr': TMP = T_TMP / ((2 * 51.9961) / 151.98919999999998 * 10000) elif j == 'Zr': # Zr not Multiple by 2 Here TMP = T_TMP / ((91.224) / 123.22200000000001 * 10000) else: TMP = T_TMP try: M = TMP / self.BaseMass[j] * WeightCorrectionFactor except TypeError: pass # M= TMP/NewMass(j) * WeightCorrectionFactor EachMole.update({j: M}) # self.DataMole.append(EachMole) DataCalculating = EachMole Fe3 = DataCalculating['Fe2O3'] Fe2 = DataCalculating['FeO'] Mg = DataCalculating['MgO'] Ca = DataCalculating['CaO'] Na = DataCalculating['Na2O'] try: DataCalced.update({'Fe3+/(Total Fe) in rock (Mole)': 100 * Fe3 * 2 / (Fe3 * 2 + Fe2)}) except(ZeroDivisionError): DataCalced.update({'Fe3+/(Total Fe) in rock (Mole)': 0}) pass try: DataCalced.update({'Mg/(Mg+Total Fe) in rock (Mole)': 100 * Mg / (Mg + Fe3 * 2 + Fe2)}) except(ZeroDivisionError): DataCalced.update({'Mg/(Mg+Total Fe) in rock (Mole)': 0}) pass try: DataCalced.update({'Mg/(Mg+Fe2+) in rock (Mole)': 100 * Mg / (Mg + Fe2)}) except(ZeroDivisionError): DataCalced.update({'Mg/(Mg+Fe2+) in rock (Mole)': 0}) pass try: DataCalced.update({'Ca/(Ca+Na) in rock (Mole)': 100 * Ca / (Ca + Na * 2)}) except(ZeroDivisionError): DataCalced.update({'Ca/(Ca+Na) in rock (Mole)': 0}) pass DataCalculating['CaO'] += DataCalculating['Sr'] DataCalculating['Sr'] = 0 DataCalculating['K2O'] += 2 * DataCalculating['Ba'] DataCalculating['Ba'] = 0 try: if DataCalculating['CaO'] >= 10 / 3 * DataCalculating['P2O5']: DataCalculating['CaO'] -= 10 / 3 * DataCalculating['P2O5'] else: DataCalculating['CaO'] = 0 except(ZeroDivisionError): pass DataCalculating['P2O5'] = DataCalculating['P2O5'] / 1.5 Apatite = DataCalculating['P2O5'] # IF(S19>=T15,S19-T15,0) if DataCalculating['F'] >= DataCalculating['P2O5']: DataCalculating['F'] -= DataCalculating['P2O5'] else: DataCalculating['F'] = 0 if DataCalculating['F'] >= DataCalculating['P2O5']: DataCalculating['F'] -= DataCalculating['P2O5'] else: DataCalculating['F'] = 0 if DataCalculating['Na2O'] >= DataCalculating['Cl']: DataCalculating['Na2O'] -= DataCalculating['Cl'] else: DataCalculating['Na2O'] = 0 Halite = DataCalculating['Cl'] # IF(U12>=(U19/2),U12-(U19/2),0) if DataCalculating['CaO'] >= 0.5 * DataCalculating['F']: DataCalculating['CaO'] -= 0.5 * DataCalculating['F'] else: DataCalculating['CaO'] = 0 DataCalculating['F'] *= 0.5 Fluorite = DataCalculating['F'] # =IF(V17>0,IF(V13>=V17,'Thenardite',IF(V13>0,'Both','Anhydrite')),'None') AorT = 0 if DataCalculating['SO3'] <= 0: AorT = 'None' else: if DataCalculating['Na2O'] >= DataCalculating['SO3']: AorT = 'Thenardite' else: if DataCalculating['Na2O'] > 0: AorT = 'Both' else: AorT = 'Anhydrite' # =IF(W26='Anhydrite',V17,IF(W26='Both',V12,0)) # =IF(W26='Thenardite',V17,IF(W26='Both',V17-W17,0)) if AorT == 'Anhydrite': DataCalculating['Sr'] = 0 elif AorT == 'Thenardite': DataCalculating['Sr'] = DataCalculating['SO3'] DataCalculating['SO3'] = 0 elif AorT == 'Both': DataCalculating['Sr'] = DataCalculating['SO3'] - DataCalculating['CaO'] DataCalculating['SO3'] = DataCalculating['CaO'] else: DataCalculating['SO3'] = 0 DataCalculating['Sr'] = 0 DataCalculating['CaO'] -= DataCalculating['SO3'] DataCalculating['Na2O'] -= DataCalculating['Sr'] Anhydrite = DataCalculating['SO3'] Thenardite = DataCalculating['Sr'] Pyrite = 0.5 * DataCalculating['S'] # =IF(W9>=(W18*0.5),W9-(W18*0.5),0) if DataCalculating['FeO'] >= DataCalculating['S'] * 0.5: DataCalculating['FeO'] -= DataCalculating['S'] * 0.5 else: DataCalculating['FeO'] = 0 # =IF(X24>0,IF(X9>=X24,'Chromite',IF(X9>0,'Both','Magnesiochromite')),'None') if DataCalculating['Cr'] > 0: if DataCalculating['FeO'] >= DataCalculating['Cr']: CorM = 'Chromite' elif DataCalculating['FeO'] > 0: CorM = 'Both' else: CorM = 'Magnesiochromite' else: CorM = 'None' # =IF(Y26='Chromite',X24,IF(Y26='Both',X9,0)) # =IF(Y26='Magnesiochromite',X24,IF(Y26='Both',X24-Y24,0)) if CorM == 'Chromite': DataCalculating['Cr'] = DataCalculating['Cr'] DataCalculating['Ni'] = 0 elif CorM == 'Magnesiochromite': DataCalculating['Ni'] = DataCalculating['Cr'] DataCalculating['Cr'] = 0 elif CorM == 'Both': DataCalculating['Ni'] = DataCalculating['Cr'] - DataCalculating['FeO'] DataCalculating['Cr'] = DataCalculating['FeO'] else: DataCalculating['Cr'] = 0 DataCalculating['Ni'] = 0 DataCalculating['MgO'] -= DataCalculating['Ni'] Magnesiochromite = DataCalculating['Ni'] Chromite = DataCalculating['Cr'] # =IF(X9>=Y24,X9-Y24,0) if DataCalculating['FeO'] >= DataCalculating['Cr']: DataCalculating['FeO'] -= DataCalculating['Cr'] else: DataCalculating['FeO'] = 0 # =IF(Y6>0,IF(Y9>=Y6,'Ilmenite',IF(Y9>0,'Both','Sphene')),'None') if DataCalculating['TiO2'] < 0: IorS = 'None' else: if DataCalculating['FeO'] >= DataCalculating['TiO2']: IorS = 'Ilmenite' else: if DataCalculating['FeO'] > 0: IorS = 'Both' else: IorS = 'Sphene' # =IF(Z26='Ilmenite',Y6,IF(Z26='Both',Y9,0)) # =IF(Z26='Sphene',Y6,IF(Z26='Both',Y6-Z6,0)) if IorS == 'Ilmenite': DataCalculating['TiO2'] = DataCalculating['TiO2'] DataCalculating['MnO'] = 0 elif IorS == 'Sphene': DataCalculating['MnO'] = DataCalculating['TiO2'] DataCalculating['TiO2'] = 0 elif IorS == 'Both': DataCalculating['MnO'] = DataCalculating['TiO2'] - DataCalculating['FeO'] DataCalculating['TiO2'] = DataCalculating['FeO'] else: DataCalculating['TiO2'] = 0 DataCalculating['MnO'] = 0 DataCalculating['FeO'] -= DataCalculating['TiO2'] Ilmenite = DataCalculating['TiO2'] # =IF(Z16>0,IF(Z12>=Z16,'Calcite',IF(Z12>0,'Both','Na2CO3')),'None') if DataCalculating['CO2'] <= 0: CorN = 'None' else: if DataCalculating['CaO'] >= DataCalculating['CO2']: CorN = 'Calcite' else: if DataCalculating['CaO'] > 0: CorN = 'Both' else: CorN = 'Na2CO3' # =IF(AA26='Calcite',Z16,IF(AA26='Both',Z12,0)) # =IF(AA26='Na2CO3',Z16,IF(AA26='Both',Z16-AA16,0)) if CorN == 'None': DataCalculating['CO2'] = 0 DataCalculating['SO3'] = 0 elif CorN == 'Calcite': DataCalculating['CO2'] = DataCalculating['CO2'] DataCalculating['SO3'] = 0 elif CorN == 'Na2CO3': DataCalculating['SO3'] = DataCalculating['SO3'] DataCalculating['CO2'] = 0 elif CorN == 'Both': DataCalculating['SO3'] = DataCalculating['CO2'] - DataCalculating['CaO'] DataCalculating['CO2'] = DataCalculating['CaO'] DataCalculating['CaO'] -= DataCalculating['CO2'] Calcite = DataCalculating['CO2'] Na2CO3 = DataCalculating['SO3'] # =IF(AA17>Z13,0,Z13-AA17) if DataCalculating['SO3'] > DataCalculating['Na2O']: DataCalculating['Na2O'] = 0 else: DataCalculating['Na2O'] -= DataCalculating['SO3'] DataCalculating['SiO2'] -= DataCalculating['Zr'] Zircon = DataCalculating['Zr'] # =IF(AB14>0,IF(AB7>=AB14,'Orthoclase',IF(AB7>0,'Both','K2SiO3')),'None') if DataCalculating['K2O'] <= 0: OorK = 'None' else: if DataCalculating['Al2O3'] >= DataCalculating['K2O']: OorK = 'Orthoclase' else: if DataCalculating['Al2O3'] > 0: OorK = 'Both' else: OorK = 'K2SiO3' # =IF(AC26='Orthoclase',AB14,IF(AC26='Both',AB7,0)) # =IF(AC26='K2SiO3',AB14,IF(AC26='Both',AB14-AB7,0)) if OorK == 'None': DataCalculating['K2O'] = 0 DataCalculating['P2O5'] = 0 elif OorK == 'Orthoclase': DataCalculating['K2O'] = DataCalculating['K2O'] DataCalculating['P2O5'] = 0 elif OorK == 'K2SiO3': DataCalculating['P2O5'] = DataCalculating['K2O'] DataCalculating['K2O'] = 0 elif OorK == 'Both': DataCalculating['P2O5'] = DataCalculating['K2O'] - DataCalculating['Al2O3'] DataCalculating['K2O'] = DataCalculating['Al2O3'] DataCalculating['Al2O3'] -= DataCalculating['K2O'] # =IF(AC13>0,IF(AC7>=AC13,'Albite',IF(AC7>0,'Both','Na2SiO3')),'None') if DataCalculating['Na2O'] <= 0: AorN = 'None' else: if DataCalculating['Al2O3'] >= DataCalculating['Na2O']: AorN = 'Albite' else: if DataCalculating['Al2O3'] > 0: AorN = 'Both' else: AorN = 'Na2SiO3' # =IF(AND(AC7>=AC13,AC7>0),AC7-AC13,0) if DataCalculating['Al2O3'] >= DataCalculating['Na2O'] and DataCalculating['Al2O3'] > 0: DataCalculating['Al2O3'] -= DataCalculating['Na2O'] else: DataCalculating['Al2O3'] = 0 # =IF(AD26='Albite',AC13,IF(AD26='Both',AC7,0)) # =IF(AD26='Na2SiO3',AC13,IF(AD26='Both',AC13-AD13,0)) if AorN == 'Albite': DataCalculating['Cl'] = 0 elif AorN == 'Both': DataCalculating['Cl'] = DataCalculating['Na2O'] - DataCalculating['Al2O3'] DataCalculating['Na2O'] = DataCalculating['Al2O3'] elif AorN == 'Na2SiO3': DataCalculating['Cl'] = DataCalculating['Na2O'] DataCalculating['Na2O'] = 0 elif AorN == 'None': DataCalculating['Na2O'] = 0 DataCalculating['Cl'] = 0 # =IF(AD7>0,IF(AD12>0,'Anorthite','None'),'None') ''' Seem like should be =IF(AD7>0,IF(AD12>AD7,'Anorthite','Corundum'),'None') If Al2O3 is left after alloting orthoclase and albite, then: Anorthite = Al2O3, CaO = CaO - Al2O3, SiO2 = SiO2 - 2 Al2O3, Al2O3 = 0 If Al2O3 exceeds CaO in the preceding calculation, then: Anorthite = CaO, Al2O3 = Al2O3 - CaO, SiO2 = SiO2 - 2 CaO Corundum = Al2O3, CaO =0, Al2O3 = 0 if DataCalculating['Al2O3']<=0: AorC='None' else: if DataCalculating['CaO']>DataCalculating['Al2O3']: AorC= 'Anorthite' else: Aorc='Corundum' ''' if DataCalculating['Al2O3'] <= 0: AorC = 'None' else: if DataCalculating['CaO'] > 0: AorC = 'Anorthite' else: Aorc = 'None' # =IF(AE26='Anorthite',IF(AD12>AD7,0,AD7-AD12),AD7) # =IF(AE26='Anorthite',IF(AD7>AD12,0,AD12-AD7),AD12) # =IF(AE26='Anorthite',IF(AD7>AD12,AD12,AD7),0) if AorC == 'Anorthite': if DataCalculating['Al2O3'] >= DataCalculating['CaO']: DataCalculating['Sr'] = DataCalculating['CaO'] DataCalculating['Al2O3'] -= DataCalculating['CaO'] DataCalculating['CaO'] = 0 else: DataCalculating['Sr'] = DataCalculating['Al2O3'] DataCalculating['CaO'] -= DataCalculating['Al2O3'] DataCalculating['Al2O3'] = 0 else: DataCalculating['Sr'] = 0 Corundum = DataCalculating['Al2O3'] Anorthite = DataCalculating['Sr'] # =IF(AE10>0,IF(AE12>=AE10,'Sphene',IF(AE12>0,'Both','Rutile')),'None') if DataCalculating['MnO'] <= 0: SorR = 'None' else: if DataCalculating['CaO'] >= DataCalculating['MnO']: SorR = 'Sphene' elif DataCalculating['CaO'] > 0: SorR = 'Both' else: SorR = 'Rutile' # =IF(AF26='Sphene',AE10,IF(AF26='Both',AE12,0)) # =IF(AF26='Rutile',AE10,IF(AF26='Both',AE10-AE12,0)) if SorR == 'Sphene': DataCalculating['MnO'] = DataCalculating['MnO'] DataCalculating['S'] = 0 elif SorR == 'Rutile': DataCalculating['S'] = DataCalculating['MnO'] DataCalculating['MnO'] = 0 elif SorR == 'Both': DataCalculating['S'] = DataCalculating['MnO'] - DataCalculating['CaO'] DataCalculating['MnO'] = DataCalculating['CaO'] elif SorR == 'None': DataCalculating['MnO'] = 0 DataCalculating['S'] = 0 DataCalculating['CaO'] -= DataCalculating['MnO'] Rutile = DataCalculating['S'] # =IF(AND(AF20>0),IF(AF8>=AF20,'Acmite',IF(AF8>0,'Both','Na2SiO3')),'None') if DataCalculating['Cl'] <= 0: ACorN = 'None' else: if DataCalculating['Fe2O3'] >= DataCalculating['Cl']: ACorN = 'Acmite' else: if DataCalculating['Fe2O3'] > 0: ACorN = 'Both' else: ACorN = 'Na2SiO3' # =IF(AG26='Acmite',AF20,IF(AG26='Both',AF8,0)) # =IF(AG26='Na2SiO3',AF20,IF(AG26='Both',AF20-AG19,0)) if ACorN == 'Acmite': DataCalculating['F'] = DataCalculating['Cl'] DataCalculating['Cl'] = 0 elif ACorN == 'Na2SiO3': DataCalculating['Cl'] = DataCalculating['Cl'] DataCalculating['F'] = 0 elif ACorN == 'Both': DataCalculating['F'] = DataCalculating['Fe2O3'] DataCalculating['Cl'] = DataCalculating['Cl'] - DataCalculating['F'] elif ACorN == 'None': DataCalculating['F'] = 0 DataCalculating['Cl'] = 0 DataCalculating['Fe2O3'] -= DataCalculating['F'] Acmite = DataCalculating['F'] # =IF(AG8>0,IF(AG9>=AG8,'Magnetite',IF(AG9>0,'Both','Hematite')),'None') if DataCalculating['Fe2O3'] <= 0: MorH = 'None' else: if DataCalculating['FeO'] >= DataCalculating['Fe2O3']: MorH = 'Magnetite' else: if DataCalculating['FeO'] > 0: MorH = 'Both' else: MorH = 'Hematite' # =IF(AH26='Magnetite',AG8,IF(AH26='Both',AG9,0)) # =IF(AH26='Hematite',AG8,IF(AH26='Both',AG8-AG9,0)) if MorH == 'Magnetite': DataCalculating['Fe2O3'] = DataCalculating['Fe2O3'] DataCalculating['Ba'] = 0 elif MorH == 'Hematite': DataCalculating['Fe2O3'] = 0 DataCalculating['Ba'] = DataCalculating['FeO'] elif MorH == 'Both': DataCalculating['Fe2O3'] = DataCalculating['FeO'] DataCalculating['Ba'] = DataCalculating['Fe2O3'] - DataCalculating['FeO'] elif MorH == 'None': DataCalculating['Fe2O3'] = 0 DataCalculating['Ba'] == 0 DataCalculating['FeO'] -= DataCalculating['Fe2O3'] Magnetite = DataCalculating['Fe2O3'] Hematite = DataCalculating['Ba'] # =IF(AH11>0,AH11/(AH11+AH9),0) Fe2 = DataCalculating['FeO'] Mg = DataCalculating['MgO'] if Mg > 0: DataCalced.update({'Mg/(Mg+Fe2+) in silicates': 100 * Mg / (Mg + Fe2)}) else: DataCalced.update({'Mg/(Mg+Fe2+) in silicates': 0}) DataCalculating['FeO'] += DataCalculating['MgO'] DataCalculating['MgO'] = 0 # =IF(AI12>0,IF(AI9>=AI12,'Diopside',IF(AI9>0,'Both','Wollastonite')),'None') if DataCalculating['CaO'] <= 0: DorW = 'None' else: if DataCalculating['FeO'] >= DataCalculating['CaO']: DorW = 'Diopside' else: if DataCalculating['FeO'] > 0: DorW = 'Both' else: DorW = 'Wollastonite' # =IF(AJ26='Diopside',AI12,IF(AJ26='Both',AI9,0)) # =IF(AJ26='Wollastonite',AI12,IF(AJ26='Both',AI12-AI9,0)) if DorW == 'Diopside': DataCalculating['CaO'] = DataCalculating['CaO'] DataCalculating['S'] = 0 elif DorW == 'Wollastonite': DataCalculating['S'] = DataCalculating['CaO'] DataCalculating['CaO'] = 0 elif DorW == 'Both': DataCalculating['S'] = DataCalculating['CaO'] - DataCalculating['FeO'] DataCalculating['CaO'] = DataCalculating['FeO'] elif DorW == 'None': DataCalculating['CaO'] = 0 DataCalculating['S'] = 0 DataCalculating['FeO'] -= DataCalculating['CaO'] Diopside = DataCalculating['CaO'] Quartz = DataCalculating['SiO2'] Zircon = DataCalculating['Zr'] K2SiO3 = DataCalculating['P2O5'] Na2SiO3 = DataCalculating['Cl'] Sphene = DataCalculating['MnO'] Hypersthene = DataCalculating['FeO'] Albite = DataCalculating['Na2O'] Orthoclase = DataCalculating['K2O'] Wollastonite = DataCalculating['S'] # =AJ5-(AL6)-(AL7)-(AL8*2)-(AL12)-(AL9)-(AL10*4)-(AL11*2)-(AL13)-(AL14*6)-(AL15*6)-(AL16) Quartz -= (Zircon + K2SiO3 + Anorthite * 2 + Na2SiO3 + Acmite * 4 + Diopside * 2 + Sphene + Hypersthene + Albite * 6 + Orthoclase * 6 + Wollastonite) # =IF(AL5>0,AL5,0) if Quartz > 0: Quartz = Quartz else: Quartz = 0 # =IF(AL13>0,IF(AL5>=0,'Hypersthene',IF(AL13+(2*AL5)>0,'Both','Olivine')),'None') if Hypersthene <= 0: HorO = 'None' else: if Quartz >= 0: HorO = 'Hypersthene' else: if Hypersthene + 2 * Quartz > 0: HorO = 'Both' else: HorO = 'Olivine' # =IF(AN26='Hypersthene',AL13,IF(AN26='Both',AL13+(2*AL5),0)) # =IF(AN26='Olivine',AL13*0.5,IF(AN26='Both',ABS(AL5),0)) Old_Hypersthene = Hypersthene if HorO == 'Hypersthene': Hypersthene = Hypersthene Olivine = 0 elif HorO == 'Both': Hypersthene = Hypersthene + Quartz * 2 Olivine = abs(Quartz) elif HorO == 'Olivine': Olivine = Hypersthene / 2 Hypersthene = 0 elif HorO == 'None': Hypersthene = 0 Olivine = 0 # =AL5+AL13-(AN13+AN17) Quartz += Old_Hypersthene - (Hypersthene + Olivine) # =IF(AL12>0,IF(AN5>=0,'Sphene',IF(AL12+AN5>0,'Both','Perovskite')),'None') if Sphene <= 0: SorP = 'None' else: if Quartz >= 0: SorP = 'Sphene' else: if Sphene + Quartz > 0: SorP = 'Both' else: SorP = 'Perovskite' # =IF(AO26='Sphene',AL12,IF(AO26='Both',AL12+AN5,0)) # =IF(AO26='Perovskite',AL12,IF(AO26='Both',AL12-AO12,0)) Old_Sphene = Sphene if SorP == 'Sphene': Sphene = Sphene Perovskite = 0 elif SorP == 'Perovskite': Perovskite = Sphene Sphene = 0 elif SorP == 'Both': Sphene += Quartz Perovskite = Old_Sphene - Sphene elif SorP == 'None': Sphene = 0 Perovskite = 0 Quartz += Old_Sphene - Sphene # =IF(AL14>0,IF(AO5>=0,'Albite',IF(AL14+(AO5/4)>0,'Both','Nepheline')),'None') if Albite <= 0: AlorNe = 'None' else: if Quartz >= 0: AlorNe = 'Albite' else: if Albite + (Quartz / 4) > 0: AlorNe = 'Both' else: AlorNe = 'Nepheline' # =AO5+(6*AL14)-(AP14*6)-(AP19*2) # =IF(AP26='Albite',AL14,IF(AP26='Both',AL14+(AO5/4),0)) # =IF(AP26='Nepheline',AL14,IF(AP26='Both',AL14-AP14,0)) Old_Albite = Albite if AlorNe == 'Albite': Albite = Albite Nepheline = 0 elif AlorNe == 'Nepheline': Nepheline = Albite Albite = 0 elif AlorNe == 'Both': Albite += Quartz / 4 Nepheline = Old_Albite - Albite elif AlorNe == 'None': Nepheline = 0 Albite = 0 Quartz += (6 * Old_Albite) - (Albite * 6) - (Nepheline * 2) # =IF(AL8=0,0,AL8/(AL8+(AP14*2))) if Anorthite == 0: DataCalced.update({'Plagioclase An content': 0}) else: DataCalced.update({'Plagioclase An content': 100 * Anorthite / (Anorthite + 2 * Albite)}) # =IF(AL15>0,IF(AP5>=0,'Orthoclase',IF(AL15+(AP5/2)>0,'Both','Leucite')),'None') if Orthoclase <= 0: OorL = 'None' else: if Quartz >= 0: OorL = 'Orthoclase' else: if Orthoclase + Quartz / 2 > 0: OorL = 'Both' else: OorL = 'Leucite' # =IF(AQ26='Orthoclase',AL15,IF(AQ26='Both',AL15+(AP5/2),0)) # =IF(AQ26='Leucite',AL15,IF(AQ26='Both',AL15-AQ15,0)) Old_Orthoclase = Orthoclase if OorL == 'Orthoclase': Orthoclase = Orthoclase Leucite = 0 elif OorL == 'Leucite': Leucite = Orthoclase Orthoclase = 0 elif OorL == 'Both': Orthoclase += Quartz / 2 Leucite = Old_Orthoclase - Orthoclase elif OorL == 'None': Orthoclase = 0 Leucite = 0 # =AP5+(AL15*6)-(AQ15*6)-(AQ20*4) Quartz += (Old_Orthoclase * 6) - (Orthoclase * 6) - (Leucite * 4) # =IF(AL16>0,IF(AQ5>=0,'Wollastonite',IF(AL16+(AQ5*2)>0,'Both','Larnite')),'None') if Wollastonite <= 0: WorB = 'None' else: if Quartz >= 0: WorB = 'Wollastonite' else: if Wollastonite + Quartz / 2 > 0: WorB = 'Both' else: WorB = 'Larnite' # =IF(AR26='Wollastonite',AL16,IF(AR26='Both',AL16+(2*AQ5),0)) # =IF(AR26='Larnite',AL16/2,IF(AR26='Both',(AL16-AR16)/2,0)) Old_Wollastonite = Wollastonite if WorB == 'Wollastonite': Wollastonite = Wollastonite Larnite = 0 elif WorB == 'Larnite': Larnite = Wollastonite / 2 Wollastonite = 0 elif WorB == 'Both': Wollastonite += Quartz * 2 Larnite = (Old_Wollastonite - Wollastonite) / 2 elif WorB == 'None': Wollastonite = 0 Larnite = 0 # =AQ5+AL16-AR16-AR21 Quartz += Old_Wollastonite - Wollastonite - Larnite # =IF(AL11>0,IF(AR5>=0,'Diopside',IF(AL11+AR5>0,'Both','LarniteOlivine')),'None') if Diopside <= 0: DorL = 'None' else: if Quartz >= 0: DorL = 'Diopside' else: if Diopside + Quartz > 0: DorL = 'Both' else: DorL = 'LarniteOlivine' # =IF(AS26='Diopside',AL11,IF(AS26='Both',AL11+AR5,0)) # =(IF(AS26='LarniteOlivine',AL11/2,IF(AS26='Both',(AL11-AS11)/2,0)))+AN17 # =(IF(AS26='LarniteOlivine',AL11/2,IF(AS26='Both',(AL11-AS11)/2,0)))+AR21 Old_Diopside = Diopside Old_Larnite = Larnite Old_Olivine = Olivine if DorL == 'Diopside': Diopside = Diopside elif DorL == 'LarniteOlivine': Larnite += Diopside / 2 Olivine += Diopside / 2 Diopside = 0 elif DorL == 'Both': Diopside += Quartz Larnite += Old_Diopside - Diopside Olivine += Old_Diopside - Diopside elif DorL == 'None': Diopside = 0 # =AR5+(AL11*2)+AN17+AR21-AS21-(AS11*2)-AS17 Quartz += (Old_Diopside * 2) + Old_Olivine + Old_Larnite - Larnite - (Diopside * 2) - Olivine # =IF(AQ20>0,IF(AS5>=0,'Leucite',IF(AQ20+(AS5/2)>0,'Both','Kalsilite')),'None') if Leucite <= 0: LorK = 'None' else: if Quartz >= 0: LorK = 'Leucite' else: if Leucite + Quartz / 2 > 0: LorK = 'Both' else: LorK = 'Kalsilite' # =IF(AT26='Leucite',AQ20,IF(AT26='Both',AQ20+(AS5/2),0)) # =IF(AT26='Kalsilite',AQ20,IF(AT26='Both',AQ20-AT20,0)) Old_Leucite = Leucite if LorK == 'Leucite': Leucite = Leucite Kalsilite = 0 elif LorK == 'Kalsilite': Kalsilite = Leucite Leucite = 0 elif LorK == 'Both': Leucite += Quartz / 2 Kalsilite = Old_Leucite - Leucite elif LorK == 'None': Leucite = 0 Kalsilite = 0 # =AS5+(AQ20*4)-(AT20*4)-(AT22*2) Quartz += Old_Leucite * 4 - Leucite * 4 - Kalsilite * 2 Q = Quartz A = Orthoclase P = Anorthite + Albite F = Nepheline + Leucite + Kalsilite DataResult.update({'Quartz': Quartz}) DataResult.update({'Zircon': Zircon}) DataResult.update({'K2SiO3': K2SiO3}) DataResult.update({'Anorthite': Anorthite}) DataResult.update({'Na2SiO3': Na2SiO3}) DataResult.update({'Acmite': Acmite}) DataResult.update({'Diopside': Diopside}) DataResult.update({'Sphene': Sphene}) DataResult.update({'Hypersthene': Hypersthene}) DataResult.update({'Albite': Albite}) DataResult.update({'Orthoclase': Orthoclase}) DataResult.update({'Wollastonite': Wollastonite}) DataResult.update({'Olivine': Olivine}) DataResult.update({'Perovskite': Perovskite}) DataResult.update({'Nepheline': Nepheline}) DataResult.update({'Leucite': Leucite}) DataResult.update({'Larnite': Larnite}) DataResult.update({'Kalsilite': Kalsilite}) DataResult.update({'Apatite': Apatite}) DataResult.update({'Halite': Halite}) DataResult.update({'Fluorite': Fluorite}) DataResult.update({'Anhydrite': Anhydrite}) DataResult.update({'Thenardite': Thenardite}) DataResult.update({'Pyrite': Pyrite}) DataResult.update({'Magnesiochromite': Magnesiochromite}) DataResult.update({'Chromite': Chromite}) DataResult.update({'Ilmenite': Ilmenite}) DataResult.update({'Calcite': Calcite}) DataResult.update({'Na2CO3': Na2CO3}) DataResult.update({'Corundum': Corundum}) DataResult.update({'Rutile': Rutile}) DataResult.update({'Magnetite': Magnetite}) DataResult.update({'Hematite': Hematite}) DataResult.update({'Q Mole': Q}) DataResult.update({'A Mole': A}) DataResult.update({'P Mole': P}) DataResult.update({'F Mole': F}) DataWeight.update({'Quartz': Quartz * self.DataBase['Quartz'][0]}) DataWeight.update({'Zircon': Zircon * self.DataBase['Zircon'][0]}) DataWeight.update({'K2SiO3': K2SiO3 * self.DataBase['K2SiO3'][0]}) DataWeight.update({'Anorthite': Anorthite * self.DataBase['Anorthite'][0]}) DataWeight.update({'Na2SiO3': Na2SiO3 * self.DataBase['Na2SiO3'][0]}) DataWeight.update({'Acmite': Acmite * self.DataBase['Acmite'][0]}) DataWeight.update({'Diopside': Diopside * self.DataBase['Diopside'][0]}) DataWeight.update({'Sphene': Sphene * self.DataBase['Sphene'][0]}) DataWeight.update({'Hypersthene': Hypersthene * self.DataBase['Hypersthene'][0]}) DataWeight.update({'Albite': Albite * self.DataBase['Albite'][0]}) DataWeight.update({'Orthoclase': Orthoclase * self.DataBase['Orthoclase'][0]}) DataWeight.update({'Wollastonite': Wollastonite * self.DataBase['Wollastonite'][0]}) DataWeight.update({'Olivine': Olivine * self.DataBase['Olivine'][0]}) DataWeight.update({'Perovskite': Perovskite * self.DataBase['Perovskite'][0]}) DataWeight.update({'Nepheline': Nepheline * self.DataBase['Nepheline'][0]}) DataWeight.update({'Leucite': Leucite * self.DataBase['Leucite'][0]}) DataWeight.update({'Larnite': Larnite * self.DataBase['Larnite'][0]}) DataWeight.update({'Kalsilite': Kalsilite * self.DataBase['Kalsilite'][0]}) DataWeight.update({'Apatite': Apatite * self.DataBase['Apatite'][0]}) DataWeight.update({'Halite': Halite * self.DataBase['Halite'][0]}) DataWeight.update({'Fluorite': Fluorite * self.DataBase['Fluorite'][0]}) DataWeight.update({'Anhydrite': Anhydrite * self.DataBase['Anhydrite'][0]}) DataWeight.update({'Thenardite': Thenardite * self.DataBase['Thenardite'][0]}) DataWeight.update({'Pyrite': Pyrite * self.DataBase['Pyrite'][0]}) DataWeight.update({'Magnesiochromite': Magnesiochromite * self.DataBase['Magnesiochromite'][0]}) DataWeight.update({'Chromite': Chromite * self.DataBase['Chromite'][0]}) DataWeight.update({'Ilmenite': Ilmenite * self.DataBase['Ilmenite'][0]}) DataWeight.update({'Calcite': Calcite * self.DataBase['Calcite'][0]}) DataWeight.update({'Na2CO3': Na2CO3 * self.DataBase['Na2CO3'][0]}) DataWeight.update({'Corundum': Corundum * self.DataBase['Corundum'][0]}) DataWeight.update({'Rutile': Rutile * self.DataBase['Rutile'][0]}) DataWeight.update({'Magnetite': Magnetite * self.DataBase['Magnetite'][0]}) DataWeight.update({'Hematite': Hematite * self.DataBase['Hematite'][0]}) DataWeight.update({'Q Weight': Quartz * self.DataBase['Quartz'][0]}) DataWeight.update({'A Weight': Orthoclase * self.DataBase['Orthoclase'][0]}) DataWeight.update({'P Weight': Anorthite * self.DataBase['Anorthite'][0] + Albite * self.DataBase['Albite'][0]}) DataWeight.update({'F Weight': Nepheline * self.DataBase['Nepheline'][0] + Leucite * self.DataBase['Leucite'][0] + Kalsilite * self.DataBase['Kalsilite'][0]}) WholeVolume = 0 WholeMole = 0 tmpVolume = [] tmpVolume.append(Quartz * self.DataBase['Quartz'][0] / self.DataBase['Quartz'][1]) tmpVolume.append(Zircon * self.DataBase['Zircon'][0] / self.DataBase['Zircon'][1]) tmpVolume.append(K2SiO3 * self.DataBase['K2SiO3'][0] / self.DataBase['K2SiO3'][1]) tmpVolume.append(Anorthite * self.DataBase['Anorthite'][0] / self.DataBase['Anorthite'][1]) tmpVolume.append(Na2SiO3 * self.DataBase['Na2SiO3'][0] / self.DataBase['Na2SiO3'][1]) tmpVolume.append(Acmite * self.DataBase['Acmite'][0] / self.DataBase['Acmite'][1]) tmpVolume.append(Diopside * self.DataBase['Diopside'][0] / self.DataBase['Diopside'][1]) tmpVolume.append(Sphene * self.DataBase['Sphene'][0] / self.DataBase['Sphene'][1]) tmpVolume.append(Hypersthene * self.DataBase['Hypersthene'][0] / self.DataBase['Hypersthene'][1]) tmpVolume.append(Albite * self.DataBase['Albite'][0] / self.DataBase['Albite'][1]) tmpVolume.append(Orthoclase * self.DataBase['Orthoclase'][0] / self.DataBase['Orthoclase'][1]) tmpVolume.append(Wollastonite * self.DataBase['Wollastonite'][0] / self.DataBase['Wollastonite'][1]) tmpVolume.append(Olivine * self.DataBase['Olivine'][0] / self.DataBase['Olivine'][1]) tmpVolume.append(Perovskite * self.DataBase['Perovskite'][0] / self.DataBase['Perovskite'][1]) tmpVolume.append(Nepheline * self.DataBase['Nepheline'][0] / self.DataBase['Nepheline'][1]) tmpVolume.append(Leucite * self.DataBase['Leucite'][0] / self.DataBase['Leucite'][1]) tmpVolume.append(Larnite * self.DataBase['Larnite'][0] / self.DataBase['Larnite'][1]) tmpVolume.append(Kalsilite * self.DataBase['Kalsilite'][0] / self.DataBase['Kalsilite'][1]) tmpVolume.append(Apatite * self.DataBase['Apatite'][0] / self.DataBase['Apatite'][1]) tmpVolume.append(Halite * self.DataBase['Halite'][0] / self.DataBase['Halite'][1]) tmpVolume.append(Fluorite * self.DataBase['Fluorite'][0] / self.DataBase['Fluorite'][1]) tmpVolume.append(Anhydrite * self.DataBase['Anhydrite'][0] / self.DataBase['Anhydrite'][1]) tmpVolume.append(Thenardite * self.DataBase['Thenardite'][0] / self.DataBase['Thenardite'][1]) tmpVolume.append(Pyrite * self.DataBase['Pyrite'][0] / self.DataBase['Pyrite'][1]) tmpVolume.append(Magnesiochromite * self.DataBase['Magnesiochromite'][0] / self.DataBase['Magnesiochromite'][1]) tmpVolume.append(Chromite * self.DataBase['Chromite'][0] / self.DataBase['Chromite'][1]) tmpVolume.append(Ilmenite * self.DataBase['Ilmenite'][0] / self.DataBase['Ilmenite'][1]) tmpVolume.append(Calcite * self.DataBase['Calcite'][0] / self.DataBase['Calcite'][1]) tmpVolume.append(Na2CO3 * self.DataBase['Na2CO3'][0] / self.DataBase['Na2CO3'][1]) tmpVolume.append(Corundum * self.DataBase['Corundum'][0] / self.DataBase['Corundum'][1]) tmpVolume.append(Rutile * self.DataBase['Rutile'][0] / self.DataBase['Rutile'][1]) tmpVolume.append(Magnetite * self.DataBase['Magnetite'][0] / self.DataBase['Magnetite'][1]) tmpVolume.append(Hematite * self.DataBase['Hematite'][0] / self.DataBase['Hematite'][1]) WholeVolume = sum(tmpVolume) DataVolume.update( {'Quartz': (Quartz * self.DataBase['Quartz'][0] / self.DataBase['Quartz'][1]) / WholeVolume * 100}) DataVolume.update( {'Zircon': (Zircon * self.DataBase['Zircon'][0] / self.DataBase['Zircon'][1]) / WholeVolume * 100}) DataVolume.update( {'K2SiO3': (K2SiO3 * self.DataBase['K2SiO3'][0] / self.DataBase['K2SiO3'][1]) / WholeVolume * 100}) DataVolume.update({'Anorthite': (Anorthite * self.DataBase['Anorthite'][0] / self.DataBase['Anorthite'][ 1]) / WholeVolume * 100}) DataVolume.update( {'Na2SiO3': (Na2SiO3 * self.DataBase['Na2SiO3'][0] / self.DataBase['Na2SiO3'][1]) / WholeVolume * 100}) DataVolume.update( {'Acmite': (Acmite * self.DataBase['Acmite'][0] / self.DataBase['Acmite'][1]) / WholeVolume * 100}) DataVolume.update( {'Diopside': (Diopside * self.DataBase['Diopside'][0] / self.DataBase['Diopside'][1]) / WholeVolume * 100}) DataVolume.update( {'Sphene': (Sphene * self.DataBase['Sphene'][0] / self.DataBase['Sphene'][1]) / WholeVolume * 100}) DataVolume.update({'Hypersthene': (Hypersthene * self.DataBase['Hypersthene'][0] / self.DataBase['Hypersthene'][ 1]) / WholeVolume * 100}) DataVolume.update( {'Albite': (Albite * self.DataBase['Albite'][0] / self.DataBase['Albite'][1]) / WholeVolume * 100}) DataVolume.update({'Orthoclase': (Orthoclase * self.DataBase['Orthoclase'][0] / self.DataBase['Orthoclase'][ 1]) / WholeVolume * 100}) DataVolume.update({'Wollastonite': (Wollastonite * self.DataBase['Wollastonite'][0] / self.DataBase['Wollastonite'][1]) / WholeVolume * 100}) DataVolume.update( {'Olivine': (Olivine * self.DataBase['Olivine'][0] / self.DataBase['Olivine'][1]) / WholeVolume * 100}) DataVolume.update({'Perovskite': (Perovskite * self.DataBase['Perovskite'][0] / self.DataBase['Perovskite'][ 1]) / WholeVolume * 100}) DataVolume.update({'Nepheline': (Nepheline * self.DataBase['Nepheline'][0] / self.DataBase['Nepheline'][ 1]) / WholeVolume * 100}) DataVolume.update( {'Leucite': (Leucite * self.DataBase['Leucite'][0] / self.DataBase['Leucite'][1]) / WholeVolume * 100}) DataVolume.update( {'Larnite': (Larnite * self.DataBase['Larnite'][0] / self.DataBase['Larnite'][1]) / WholeVolume * 100}) DataVolume.update({'Kalsilite': (Kalsilite * self.DataBase['Kalsilite'][0] / self.DataBase['Kalsilite'][ 1]) / WholeVolume * 100}) DataVolume.update( {'Apatite': (Apatite * self.DataBase['Apatite'][0] / self.DataBase['Apatite'][1]) / WholeVolume * 100}) DataVolume.update( {'Halite': (Halite * self.DataBase['Halite'][0] / self.DataBase['Halite'][1]) / WholeVolume * 100}) DataVolume.update( {'Fluorite': (Fluorite * self.DataBase['Fluorite'][0] / self.DataBase['Fluorite'][1]) / WholeVolume * 100}) DataVolume.update({'Anhydrite': (Anhydrite * self.DataBase['Anhydrite'][0] / self.DataBase['Anhydrite'][ 1]) / WholeVolume * 100}) DataVolume.update({'Thenardite': (Thenardite * self.DataBase['Thenardite'][0] / self.DataBase['Thenardite'][ 1]) / WholeVolume * 100}) DataVolume.update( {'Pyrite': (Pyrite * self.DataBase['Pyrite'][0] / self.DataBase['Pyrite'][1]) / WholeVolume * 100}) DataVolume.update({'Magnesiochromite': (Magnesiochromite * self.DataBase['Magnesiochromite'][0] / self.DataBase['Magnesiochromite'][1]) / WholeVolume * 100}) DataVolume.update( {'Chromite': (Chromite * self.DataBase['Chromite'][0] / self.DataBase['Chromite'][1]) / WholeVolume * 100}) DataVolume.update( {'Ilmenite': (Ilmenite * self.DataBase['Ilmenite'][0] / self.DataBase['Ilmenite'][1]) / WholeVolume * 100}) DataVolume.update( {'Calcite': (Calcite * self.DataBase['Calcite'][0] / self.DataBase['Calcite'][1]) / WholeVolume * 100}) DataVolume.update( {'Na2CO3': (Na2CO3 * self.DataBase['Na2CO3'][0] / self.DataBase['Na2CO3'][1]) / WholeVolume * 100}) DataVolume.update( {'Corundum': (Corundum * self.DataBase['Corundum'][0] / self.DataBase['Corundum'][1]) / WholeVolume * 100}) DataVolume.update( {'Rutile': (Rutile * self.DataBase['Rutile'][0] / self.DataBase['Rutile'][1]) / WholeVolume * 100}) DataVolume.update({'Magnetite': (Magnetite * self.DataBase['Magnetite'][0] / self.DataBase['Magnetite'][ 1]) / WholeVolume * 100}) DataVolume.update( {'Hematite': (Hematite * self.DataBase['Hematite'][0] / self.DataBase['Hematite'][1]) / WholeVolume * 100}) DataVolume.update({'Q': DataVolume['Quartz']}) DataVolume.update({'A': DataVolume['Orthoclase']}) DataVolume.update({'P': DataVolume['Anorthite'] + DataVolume['Albite']}) DataVolume.update({'F': DataVolume['Nepheline'] + DataVolume['Leucite'] + DataVolume['Kalsilite']}) DI = 0 # for i in ['Quartz', 'Anorthite', 'Albite', 'Orthoclase', 'Nepheline', 'Leucite', 'Kalsilite']: # exec('DI+=' + i + '*self.DataBase[\'' + i + '\'][0]') DI = Quartz + Anorthite + Albite + Orthoclase + Nepheline + Leucite + Kalsilite DiWeight=0 DiVolume=0 DiWeight = DataWeight['Quartz']+DataWeight['Anorthite']+DataWeight['Albite']+DataWeight['Orthoclase']+DataWeight['Nepheline']+DataWeight['Leucite']+DataWeight['Kalsilite'] DiVolume = DataVolume['Quartz']+DataVolume['Anorthite']+DataVolume['Albite']+DataVolume['Orthoclase']+DataVolume['Nepheline']+DataVolume['Leucite']+DataVolume['Kalsilite'] # print('\n\n DI is\n',DI,'\n\n') DataCalced.update({'Differentiation Index Weight': DiWeight}) DataCalced.update({'Differentiation Index Volume': DiVolume}) return (DataResult, DataWeight, DataVolume, DataCalced)
[ "def", "singleCalc", "(", "self", ",", "m", "=", "{", "'Al2O3'", ":", "13.01", ",", "'Alpha'", ":", "0.6", ",", "'Ba'", ":", "188.0", ",", "'Be'", ":", "0.85", ",", "'CaO'", ":", "8.35", ",", "'Ce'", ":", "28.2", ",", "'Co'", ":", "45.2", ",", ...
37.005059
24.504216
def _convert_hdxobjects(self, hdxobjects): # type: (List[HDXObjectUpperBound]) -> List[HDXObjectUpperBound] """Helper function to convert supplied list of HDX objects to a list of dict Args: hdxobjects (List[T <= HDXObject]): List of HDX objects to convert Returns: List[Dict]: List of HDX objects converted to simple dictionaries """ newhdxobjects = list() for hdxobject in hdxobjects: newhdxobjects.append(hdxobject.data) return newhdxobjects
[ "def", "_convert_hdxobjects", "(", "self", ",", "hdxobjects", ")", ":", "# type: (List[HDXObjectUpperBound]) -> List[HDXObjectUpperBound]", "newhdxobjects", "=", "list", "(", ")", "for", "hdxobject", "in", "hdxobjects", ":", "newhdxobjects", ".", "append", "(", "hdxobje...
38.071429
19.428571
def from_dict(d): """ Re-create the Specs from a dictionary representation. :param Dict[str, Any] d: The dictionary representation. :return: The restored Specs. :rtype: Specs """ return Specs( qubits_specs=sorted([QubitSpecs(id=int(q), fRO=qspecs.get('fRO'), f1QRB=qspecs.get('f1QRB'), T1=qspecs.get('T1'), T2=qspecs.get('T2'), fActiveReset=qspecs.get('fActiveReset')) for q, qspecs in d["1Q"].items()], key=lambda qubit_specs: qubit_specs.id), edges_specs=sorted([EdgeSpecs(targets=[int(q) for q in e.split('-')], fBellState=especs.get('fBellState'), fCZ=especs.get('fCZ'), fCZ_std_err=especs.get('fCZ_std_err'), fCPHASE=especs.get('fCPHASE')) for e, especs in d["2Q"].items()], key=lambda edge_specs: edge_specs.targets) )
[ "def", "from_dict", "(", "d", ")", ":", "return", "Specs", "(", "qubits_specs", "=", "sorted", "(", "[", "QubitSpecs", "(", "id", "=", "int", "(", "q", ")", ",", "fRO", "=", "qspecs", ".", "get", "(", "'fRO'", ")", ",", "f1QRB", "=", "qspecs", "....
52.2
25.4
def insult(rest): "Generate a random insult from datahamster" # not supplying any style will automatically redirect to a random url = 'http://autoinsult.datahamster.com/' ins_type = random.randrange(4) ins_url = url + "?style={ins_type}".format(**locals()) insre = re.compile('<div class="insult" id="insult">(.*?)</div>') resp = requests.get(ins_url) resp.raise_for_status() insult = insre.search(resp.text).group(1) if not insult: return if rest: insultee = rest.strip() karma.Karma.store.change(insultee, -1) if ins_type in (0, 2): cinsre = re.compile(r'\b(your)\b', re.IGNORECASE) insult = cinsre.sub("%s's" % insultee, insult) elif ins_type in (1, 3): cinsre = re.compile(r'^([TY])') insult = cinsre.sub( lambda m: "%s, %s" % ( insultee, m.group(1).lower()), insult) return insult
[ "def", "insult", "(", "rest", ")", ":", "# not supplying any style will automatically redirect to a random", "url", "=", "'http://autoinsult.datahamster.com/'", "ins_type", "=", "random", ".", "randrange", "(", "4", ")", "ins_url", "=", "url", "+", "\"?style={ins_type}\""...
33.541667
14.791667
def build_reduce_code(self, result, select, reduce): """ Builds a reduce operation on the selected target range. """ select = select.replace('/', '.') select = select.replace(' ', '') if reduce == 'add': reduce_op = '+' acc_start = 0 else: reduce_op = '*' acc_start = 1 #bits = select.split('[*]') bits = re.split('\[.*\]', select) seps = re.findall('\[.*\]', select) code = ['self.{0} = {1}'.format(result, acc_start)] code += ['self.{0}_shadow = {1}'.format(result, acc_start)] code += ['try:'] if len(bits) == 1: target = select code += [' self.{0} = self.{1}'.format(result, target)] code += [' self.{0}_shadow = self.{1}'.format(result, target)] elif len(bits) == 2: sep = seps[0][1:-1] if sep == '*': array = bits[0] ref = bits[1] code += [' acc = {0}'.format(acc_start)] code += [' for o in self.{0}:'.format(array)] code += [' acc = acc {0} o{1}'.format(reduce_op, ref)] code += [' self.{0} = acc'.format(result)] code += [' self.{0}_shadow = acc'.format(result)] else: bits2 = sep.split('=') if len(bits2) > 1: array = bits[0] ref = bits[1] code += [' acc = {0}'.format(acc_start)] code += [' for o in self.{0}:'.format(array)] code += [' if o.{0} == {1}:'.format(bits2[0], bits2[1])] code += [' acc = acc {0} o{1}'.format(reduce_op, ref)] code += [' self.{0} = acc'.format(result)] code += [' self.{0}_shadow = acc'.format(result)] else: raise SimbuildError("Invalid reduce target - '{0}'".format(select)) else: raise SimbuildError("Invalid reduce target - '{0}'".format(select)) code += ['except:'] code += [' pass'] return code
[ "def", "build_reduce_code", "(", "self", ",", "result", ",", "select", ",", "reduce", ")", ":", "select", "=", "select", ".", "replace", "(", "'/'", ",", "'.'", ")", "select", "=", "select", ".", "replace", "(", "' '", ",", "''", ")", "if", "reduce",...
36.2
21.633333
def update(self, group_id, name=None, order=None, collapsed=None): """Update a Component Group :param int group_id: Component Group ID :param str name: Name of the component group :param int order: Order of the group :param int collapsed: Collapse the group? :return: Updated component group data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#put-component-group """ data = ApiParams() data['group'] = group_id data['name'] = name data['order'] = order data['collapsed'] = collapsed return self._put('components/groups/%s' % group_id, data=data)['data']
[ "def", "update", "(", "self", ",", "group_id", ",", "name", "=", "None", ",", "order", "=", "None", ",", "collapsed", "=", "None", ")", ":", "data", "=", "ApiParams", "(", ")", "data", "[", "'group'", "]", "=", "group_id", "data", "[", "'name'", "]...
39.352941
16.529412
def _invoke_submit(self, iterobj, is_dict, is_itmcoll, mres, global_kw): """ Internal function to invoke the actual submit_single function :param iterobj: The raw object returned as the next item of the iterator :param is_dict: True if iterator is a dictionary :param is_itmcoll: True if the iterator contains Item objects :param mres: The multi result object :param global_kw: The global settings :return: The return value of :meth:`submit_single` """ if is_itmcoll: item, key_options = next(iterobj) key = item.key value = item.value result = item else: if is_dict: key, value = next(iterobj) if not self.VALUES_ALLOWED and not is_itmcoll: raise ArgumentError.pyexc( 'Values not allowed for this command', obj=value) else: key = next(iterobj) value = None key_options = {} item = None result = self.make_result(key, value) result.rc = -1 # Attempt to get the encoded key: key, value, key_options = self.make_entry_params(key, value, key_options) c_key, c_len = create_key(self.parent._tc, key) rc = self.submit_single(c_key, c_len, value, item, key_options, global_kw, mres) if rc: raise pycbc_exc_lcb(rc) try: if key in mres and not self.DUPKEY_OK: # For tests: self.parent._warn_dupkey(key) mres[key] = result except TypeError: raise pycbc_exc_enc(obj=key)
[ "def", "_invoke_submit", "(", "self", ",", "iterobj", ",", "is_dict", ",", "is_itmcoll", ",", "mres", ",", "global_kw", ")", ":", "if", "is_itmcoll", ":", "item", ",", "key_options", "=", "next", "(", "iterobj", ")", "key", "=", "item", ".", "key", "va...
36.26087
18.565217
def to_string_short(self): """ see also :meth:`to_string` :return: a shorter abreviated string reprentation of the parameter """ opt = np.get_printoptions() np.set_printoptions(threshold=8, edgeitems=3, linewidth=opt['linewidth']-len(self.uniquetwig)-2) str_ = super(FloatArrayParameter, self).to_string_short() np.set_printoptions(**opt) return str_
[ "def", "to_string_short", "(", "self", ")", ":", "opt", "=", "np", ".", "get_printoptions", "(", ")", "np", ".", "set_printoptions", "(", "threshold", "=", "8", ",", "edgeitems", "=", "3", ",", "linewidth", "=", "opt", "[", "'linewidth'", "]", "-", "le...
37.545455
19.545455
def njsd(network, ref_gene_expression_dict, query_gene_expression_dict, gene_set): """Calculate Jensen-Shannon divergence between query and reference gene expression profile. """ gene_jsd_dict = dict() reference_genes = ref_gene_expression_dict.keys() assert len(reference_genes) != 'Reference gene expression profile should have > 0 genes.' for gene in gene_set: if gene not in network.nodes: continue neighbors = find_neighbors(network, gene) query_expression_vec = get_neighbor_expression_vector(neighbors, query_gene_expression_dict) ref_expression_vec = get_neighbor_expression_vector(neighbors, ref_gene_expression_dict) assert len(query_expression_vec) == len(ref_expression_vec), 'Topology of reference network and query network differs. Please check.' # A gene which has non-expressed neighbors is ignored. if np.sum(query_expression_vec) == 0 and np.sum(ref_expression_vec) == 0: continue query_p_vec = exp2prob(query_expression_vec) ref_p_vec = exp2prob(ref_expression_vec) gene_jsd_dict[gene] = jsd(query_p_vec, ref_p_vec) return np.mean(list(gene_jsd_dict.values()))
[ "def", "njsd", "(", "network", ",", "ref_gene_expression_dict", ",", "query_gene_expression_dict", ",", "gene_set", ")", ":", "gene_jsd_dict", "=", "dict", "(", ")", "reference_genes", "=", "ref_gene_expression_dict", ".", "keys", "(", ")", "assert", "len", "(", ...
45.111111
29.407407
def generate(str, alg): """Generates an PIL image avatar based on the given input String. Acts as the main accessor to pagan.""" img = Image.new(IMAGE_MODE, IMAGE_SIZE, BACKGROUND_COLOR) hashcode = hash_input(str, alg) pixelmap = setup_pixelmap(hashcode) draw_image(pixelmap, img) return img
[ "def", "generate", "(", "str", ",", "alg", ")", ":", "img", "=", "Image", ".", "new", "(", "IMAGE_MODE", ",", "IMAGE_SIZE", ",", "BACKGROUND_COLOR", ")", "hashcode", "=", "hash_input", "(", "str", ",", "alg", ")", "pixelmap", "=", "setup_pixelmap", "(", ...
39
10.125
def add_reverse_arcs(graph, capac=None): """Utility function for flow algorithms that need for every arc (u,v), the existence of an (v,u) arc, by default with zero capacity. graph can be in adjacency list, possibly with capacity matrix capac. or graph can be in adjacency dictionary, then capac parameter is ignored. :param capac: arc capacity matrix :param graph: in listlist representation, or in listdict representation, in this case capac is ignored :complexity: linear :returns: nothing, but graph is modified """ for u in range(len(graph)): for v in graph[u]: if u not in graph[v]: if type(graph[v]) is list: graph[v].append(u) if capac: capac[v][u] = 0 else: assert type(graph[v]) is dict graph[v][u] = 0
[ "def", "add_reverse_arcs", "(", "graph", ",", "capac", "=", "None", ")", ":", "for", "u", "in", "range", "(", "len", "(", "graph", ")", ")", ":", "for", "v", "in", "graph", "[", "u", "]", ":", "if", "u", "not", "in", "graph", "[", "v", "]", "...
42.285714
14.428571
def process_user_input(self): """ Gets the next single character and decides what to do with it """ user_input = self.get_input() try: num = int(user_input) except Exception: return if 0 < num < len(self.items) + 1: self.current_option = num - 1 self.select() return user_input
[ "def", "process_user_input", "(", "self", ")", ":", "user_input", "=", "self", ".", "get_input", "(", ")", "try", ":", "num", "=", "int", "(", "user_input", ")", "except", "Exception", ":", "return", "if", "0", "<", "num", "<", "len", "(", "self", "....
25.133333
15.133333
def assert_has_permission(self, scope_required): """ Warn that the required scope is not found in the scopes granted to the currently authenticated user. :: # The admin user should have client admin permissions uaa.assert_has_permission('admin', 'clients.admin') """ if not self.authenticated: raise ValueError("Must first authenticate()") if scope_required not in self.get_scopes(): logging.warning("Authenticated as %s" % (self.client['id'])) logging.warning("Have scopes: %s" % (str.join(',', self.get_scopes()))) logging.warning("Insufficient scope %s for operation" % (scope_required)) raise ValueError("Client does not have permission.") return True
[ "def", "assert_has_permission", "(", "self", ",", "scope_required", ")", ":", "if", "not", "self", ".", "authenticated", ":", "raise", "ValueError", "(", "\"Must first authenticate()\"", ")", "if", "scope_required", "not", "in", "self", ".", "get_scopes", "(", "...
35.818182
25.454545
def tags(cls, filename, namespace=None): """Extract tags from file.""" return cls._raster_opener(filename).tags(ns=namespace)
[ "def", "tags", "(", "cls", ",", "filename", ",", "namespace", "=", "None", ")", ":", "return", "cls", ".", "_raster_opener", "(", "filename", ")", ".", "tags", "(", "ns", "=", "namespace", ")" ]
46.333333
7.333333
def zremrangebyscore(self, name, min, max): """ Remove a range of element by between score ``min_value`` and ``max_value`` both included. :param name: str the name of the redis key :param min: :param max: :return: Future() """ with self.pipe as pipe: return pipe.zremrangebyscore(self.redis_key(name), min, max)
[ "def", "zremrangebyscore", "(", "self", ",", "name", ",", "min", ",", "max", ")", ":", "with", "self", ".", "pipe", "as", "pipe", ":", "return", "pipe", ".", "zremrangebyscore", "(", "self", ".", "redis_key", "(", "name", ")", ",", "min", ",", "max",...
32.416667
15.583333
def playToneList(self, playList = None): """! \~english Play tone from a tone list @param playList a array of tones \~chinese 播放音调列表 @param playList: 音调数组 \~english @note <b>playList</b> format:\n \~chinese @note <b>playList</b> 格式:\n \~ <pre> [ {"freq": 440, "reps": 1, "delay": 0.08, "muteDelay": 0.15}, {"freq": 567, "reps": 3, "delay": 0.08, "muteDelay": 0.15}, ... ] </pre>\n \~english \e delay: >= 0(s) if 0 means do not delay. tone play will be Stop immediately <br> \e muteDelay: 0.15 >= 0(s) If 0 means no pause after playing, play the next note immediately \~chinese \e delay: >= 0(s)如果是 0 意味着不延迟。 音调会立即停止播放 <br> \e muteDelay: >= 0(s)如果是 0 表示播放音符结束后没有停顿,立刻播放下一个音符 """ if playList == None: return False for t in playList: self.playTone(t["freq"], t["reps"], t["delay"], t["muteDelay"]) self.stopTone() return True
[ "def", "playToneList", "(", "self", ",", "playList", "=", "None", ")", ":", "if", "playList", "==", "None", ":", "return", "False", "for", "t", "in", "playList", ":", "self", ".", "playTone", "(", "t", "[", "\"freq\"", "]", ",", "t", "[", "\"reps\"",...
32.666667
21.181818
def wait(*coros_or_futures, limit=0, timeout=None, loop=None, return_exceptions=False, return_when='ALL_COMPLETED'): """ Wait for the Futures and coroutine objects given by the sequence futures to complete, with optional concurrency limit. Coroutines will be wrapped in Tasks. ``timeout`` can be used to control the maximum number of seconds to wait before returning. timeout can be an int or float. If timeout is not specified or None, there is no limit to the wait time. If ``return_exceptions`` is True, exceptions in the tasks are treated the same as successful results, and gathered in the result list; otherwise, the first raised exception will be immediately propagated to the returned future. ``return_when`` indicates when this function should return. It must be one of the following constants of the concurrent.futures module. All futures must share the same event loop. This functions is mostly compatible with Python standard ``asyncio.wait()``. Arguments: *coros_or_futures (iter|list): an iterable collection yielding coroutines functions. limit (int): optional concurrency execution limit. Use ``0`` for no limit. timeout (int/float): maximum number of seconds to wait before returning. return_exceptions (bool): exceptions in the tasks are treated the same as successful results, instead of raising them. return_when (str): indicates when this function should return. loop (asyncio.BaseEventLoop): optional event loop to use. *args (mixed): optional variadic argument to pass to the coroutines function. Returns: tuple: Returns two sets of Future: (done, pending). Raises: TypeError: in case of invalid coroutine object. ValueError: in case of empty set of coroutines or futures. TimeoutError: if execution takes more than expected. Usage:: async def sum(x, y): return x + y done, pending = await paco.wait( sum(1, 2), sum(3, 4)) [task.result() for task in done] # => [3, 7] """ # Support iterable as first argument for better interoperability if len(coros_or_futures) == 1 and isiter(coros_or_futures[0]): coros_or_futures = coros_or_futures[0] # If no coroutines to schedule, return empty list # Mimics asyncio behaviour. if len(coros_or_futures) == 0: raise ValueError('paco: set of coroutines/futures is empty') # Create concurrent executor pool = ConcurrentExecutor(limit=limit, loop=loop, coros=coros_or_futures) # Wait until all the tasks finishes return (yield from pool.run(timeout=timeout, return_when=return_when, return_exceptions=return_exceptions))
[ "def", "wait", "(", "*", "coros_or_futures", ",", "limit", "=", "0", ",", "timeout", "=", "None", ",", "loop", "=", "None", ",", "return_exceptions", "=", "False", ",", "return_when", "=", "'ALL_COMPLETED'", ")", ":", "# Support iterable as first argument for be...
37.753247
21.961039
def verify_is(self, first, second, msg=None): """ Soft assert for whether the parameters evaluate to the same object :params want: the object to compare against :params second: the object to compare with :params msg: (Optional) msg explaining the difference """ try: self.assert_is(first, second, msg) except AssertionError, e: if msg: m = "%s:\n%s" % (msg, str(e)) else: m = str(e) self.verification_erorrs.append(m)
[ "def", "verify_is", "(", "self", ",", "first", ",", "second", ",", "msg", "=", "None", ")", ":", "try", ":", "self", ".", "assert_is", "(", "first", ",", "second", ",", "msg", ")", "except", "AssertionError", ",", "e", ":", "if", "msg", ":", "m", ...
34.1875
14.4375
def sources(self): """ Get the sources for a given experience_id, which is tied to a specific language :param experience_id: int; video content id :return: sources dict """ api_url = self.sources_api_url.format(experience_id=self.experience_id) res = self.get(api_url, params={"pinst_id": self.pinst_id}) return self.session.http.json(res)
[ "def", "sources", "(", "self", ")", ":", "api_url", "=", "self", ".", "sources_api_url", ".", "format", "(", "experience_id", "=", "self", ".", "experience_id", ")", "res", "=", "self", ".", "get", "(", "api_url", ",", "params", "=", "{", "\"pinst_id\"",...
43.888889
17.666667
def intelligently_find_filenames(line, TeX=False, ext=False, commas_okay=False): """Intelligently find filenames. Find the filename in the line. We don't support all filenames! Just eps and ps for now. :param: line (string): the line we want to get a filename out of :return: filename ([string, ...]): what is probably the name of the file(s) """ files_included = ['ERROR'] if commas_okay: valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.,%#]+' else: valid_for_filename = '\\s*[A-Za-z0-9\\-\\=\\+/\\\\_\\.%#]+' if ext: valid_for_filename += '\.e*ps[texfi2]*' if TeX: valid_for_filename += '[\.latex]*' file_inclusion = re.findall('=' + valid_for_filename + '[ ,]', line) if len(file_inclusion) > 0: # right now it looks like '=FILENAME,' or '=FILENAME ' for file_included in file_inclusion: files_included.append(file_included[1:-1]) file_inclusion = re.findall('(?:[ps]*file=|figure=)' + valid_for_filename + '[,\\]} ]*', line) if len(file_inclusion) > 0: # still has the = for file_included in file_inclusion: part_before_equals = file_included.split('=')[0] if len(part_before_equals) != file_included: file_included = file_included[ len(part_before_equals) + 1:].strip() if file_included not in files_included: files_included.append(file_included) file_inclusion = re.findall( '["\'{\\[]' + valid_for_filename + '[}\\],"\']', line) if len(file_inclusion) > 0: # right now it's got the {} or [] or "" or '' around it still for file_included in file_inclusion: file_included = file_included[1:-1] file_included = file_included.strip() if file_included not in files_included: files_included.append(file_included) file_inclusion = re.findall('^' + valid_for_filename + '$', line) if len(file_inclusion) > 0: for file_included in file_inclusion: file_included = file_included.strip() if file_included not in files_included: files_included.append(file_included) file_inclusion = re.findall('^' + valid_for_filename + '[,\\} $]', line) if len(file_inclusion) > 0: for file_included in file_inclusion: file_included = file_included.strip() if file_included not in files_included: files_included.append(file_included) file_inclusion = re.findall('\\s*' + valid_for_filename + '\\s*$', line) if len(file_inclusion) > 0: for file_included in file_inclusion: file_included = file_included.strip() if file_included not in files_included: files_included.append(file_included) if files_included != ['ERROR']: files_included = files_included[1:] # cut off the dummy for file_included in files_included: if file_included == '': files_included.remove(file_included) if ' ' in file_included: for subfile in file_included.split(' '): if subfile not in files_included: files_included.append(subfile) if ',' in file_included: for subfile in file_included.split(' '): if subfile not in files_included: files_included.append(subfile) return files_included
[ "def", "intelligently_find_filenames", "(", "line", ",", "TeX", "=", "False", ",", "ext", "=", "False", ",", "commas_okay", "=", "False", ")", ":", "files_included", "=", "[", "'ERROR'", "]", "if", "commas_okay", ":", "valid_for_filename", "=", "'\\\\s*[A-Za-z...
36.15625
20.083333
def nlmsg_type(self, value): """Message content setter.""" self.bytearray[self._get_slicers(1)] = bytearray(c_uint16(value or 0))
[ "def", "nlmsg_type", "(", "self", ",", "value", ")", ":", "self", ".", "bytearray", "[", "self", ".", "_get_slicers", "(", "1", ")", "]", "=", "bytearray", "(", "c_uint16", "(", "value", "or", "0", ")", ")" ]
47.666667
16.666667
def _dynamic_operation(self, map_obj): """ Generate function to dynamically apply the operation. Wraps an existing HoloMap or DynamicMap. """ if not isinstance(map_obj, DynamicMap): def dynamic_operation(*key, **kwargs): kwargs = dict(self._eval_kwargs(), **kwargs) obj = map_obj[key] if isinstance(map_obj, HoloMap) else map_obj return self._process(obj, key, kwargs) else: def dynamic_operation(*key, **kwargs): kwargs = dict(self._eval_kwargs(), **kwargs) return self._process(map_obj[key], key, kwargs) if isinstance(self.p.operation, Operation): return OperationCallable(dynamic_operation, inputs=[map_obj], link_inputs=self.p.link_inputs, operation=self.p.operation) else: return Callable(dynamic_operation, inputs=[map_obj], link_inputs=self.p.link_inputs)
[ "def", "_dynamic_operation", "(", "self", ",", "map_obj", ")", ":", "if", "not", "isinstance", "(", "map_obj", ",", "DynamicMap", ")", ":", "def", "dynamic_operation", "(", "*", "key", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "dict", "(", "sel...
49.380952
17.47619
def get_batch_by_transaction(self, transaction_id): """ Check to see if the requested transaction_id is in the current chain. If so, find the batch that has the transaction referenced by the transaction_id and return the batch. This is done by finding the block and searching for the batch. :param transaction_id (string): The id of the transaction that is being requested. :return: The batch that has the transaction. """ payload = self._get_data_by_id( transaction_id, 'commit_store_get_batch_by_transaction') batch = Batch() batch.ParseFromString(payload) return batch
[ "def", "get_batch_by_transaction", "(", "self", ",", "transaction_id", ")", ":", "payload", "=", "self", ".", "_get_data_by_id", "(", "transaction_id", ",", "'commit_store_get_batch_by_transaction'", ")", "batch", "=", "Batch", "(", ")", "batch", ".", "ParseFromStri...
36
20.736842
def mouse_move_event(self, event): """ Forward mouse cursor position events to the example """ self.example.mouse_position_event(event.x(), event.y())
[ "def", "mouse_move_event", "(", "self", ",", "event", ")", ":", "self", ".", "example", ".", "mouse_position_event", "(", "event", ".", "x", "(", ")", ",", "event", ".", "y", "(", ")", ")" ]
36.4
9.6
def fork(self): """Fork this gist. :returns: :class:`Gist <Gist>` if successful, ``None`` otherwise """ url = self._build_url('forks', base_url=self._api) json = self._json(self._post(url), 201) return Gist(json, self) if json else None
[ "def", "fork", "(", "self", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'forks'", ",", "base_url", "=", "self", ".", "_api", ")", "json", "=", "self", ".", "_json", "(", "self", ".", "_post", "(", "url", ")", ",", "201", ")", "return",...
30.888889
19
def parse(self,type_regex=None): """ Each line of the frame cache file is like the following: /frames/E13/LHO/frames/hoftMon_H1/H-H1_DMT_C00_L2-9246,H,H1_DMT_C00_L2,1,16 1240664820 6231 {924600000 924646720 924646784 924647472 924647712 924700000} The description is as follows: 1.1) Directory path of files 1.2) Site 1.3) Type 1.4) Number of frames in the files (assumed to be 1) 1.5) Duration of the frame files. 2) UNIX timestamp for directory modification time. 3) Number of files that that match the above pattern in the directory. 4) List of time range or segments [start, stop) We store the cache for each site and frameType combination as a dictionary where the keys are (directory, duration) tuples and the values are segment lists. Since the cache file is already coalesced we do not have to call the coalesce method on the segment lists. """ path = self.__path cache = self.cache if type_regex: type_filter = re.compile(type_regex) else: type_filter = None f = open(path, 'r') # holds this iteration of the cache gwfDict = {} # parse each line in the cache file for line in f: # ignore lines that don't match the regex if type_filter and type_filter.search(line) is None: continue # split on spaces and then comma to get the parts header, modTime, fileCount, times = line.strip().split(' ', 3) dir, site, frameType, frameCount, duration = header.split(',') duration = int(duration) # times string has form { t1 t2 t3 t4 t5 t6 ... tN t(N+1) } # where the (ti, t(i+1)) represent segments # # first turn the times string into a list of integers times = [ int(s) for s in times[1:-1].split(' ') ] # group the integers by two and turn those tuples into segments segments = [ pycbc_glue.segments.segment(a) for a in self.group(times, 2) ] # initialize if necessary for this site if not gwfDict.has_key(site): gwfDict[site] = {} # initialize if necessary for this frame type if not gwfDict[site].has_key(frameType): gwfDict[site][frameType] = {} # record segment list as value indexed by the (directory, duration) tuple key = (dir, duration) if gwfDict[site][frameType].has_key(key): msg = "The combination %s is not unique in the frame cache file" \ % str(key) raise RuntimeError, msg gwfDict[site][frameType][key] = pycbc_glue.segments.segmentlist(segments) f.close() cache['gwf'] = gwfDict
[ "def", "parse", "(", "self", ",", "type_regex", "=", "None", ")", ":", "path", "=", "self", ".", "__path", "cache", "=", "self", ".", "cache", "if", "type_regex", ":", "type_filter", "=", "re", ".", "compile", "(", "type_regex", ")", "else", ":", "ty...
32.589744
23.717949
def add_dependency(self, p_from_todo, p_to_todo): """ Adds a dependency from task 1 to task 2. """ def find_next_id(): """ Find a new unused ID. Unused means that no task has it as an 'id' value or as a 'p' value. """ def id_exists(p_id): """ Returns True if there exists a todo with the given parent ID. """ for todo in self._todos: number = str(p_id) if todo.has_tag('id', number) or todo.has_tag('p', number): return True return False new_id = 1 while id_exists(new_id): new_id += 1 return str(new_id) def append_projects_to_subtodo(): """ Appends projects in the parent todo item that are not present in the sub todo item. """ if config().append_parent_projects(): for project in p_from_todo.projects() - p_to_todo.projects(): self.append(p_to_todo, "+{}".format(project)) def append_contexts_to_subtodo(): """ Appends contexts in the parent todo item that are not present in the sub todo item. """ if config().append_parent_contexts(): for context in p_from_todo.contexts() - p_to_todo.contexts(): self.append(p_to_todo, "@{}".format(context)) if p_from_todo != p_to_todo and not self._depgraph.has_edge( hash(p_from_todo), hash(p_to_todo)): dep_id = None if p_from_todo.has_tag('id'): dep_id = p_from_todo.tag_value('id') else: dep_id = find_next_id() p_from_todo.set_tag('id', dep_id) p_to_todo.add_tag('p', dep_id) self._add_edge(p_from_todo, p_to_todo, dep_id) append_projects_to_subtodo() append_contexts_to_subtodo() self.dirty = True
[ "def", "add_dependency", "(", "self", ",", "p_from_todo", ",", "p_to_todo", ")", ":", "def", "find_next_id", "(", ")", ":", "\"\"\"\n Find a new unused ID.\n Unused means that no task has it as an 'id' value or as a 'p'\n value.\n \"\"\"", ...
35.448276
15.862069
def process_data(self, data, cloud_cover='total_clouds', **kwargs): """ Defines the steps needed to convert raw forecast data into processed forecast data. Parameters ---------- data: DataFrame Raw forecast data cloud_cover: str, default 'total_clouds' The type of cloud cover used to infer the irradiance. Returns ------- data: DataFrame Processed forecast data. """ data = super(GFS, self).process_data(data, **kwargs) data['temp_air'] = self.kelvin_to_celsius(data['temp_air']) data['wind_speed'] = self.uv_to_speed(data) irrads = self.cloud_cover_to_irradiance(data[cloud_cover], **kwargs) data = data.join(irrads, how='outer') return data[self.output_variables]
[ "def", "process_data", "(", "self", ",", "data", ",", "cloud_cover", "=", "'total_clouds'", ",", "*", "*", "kwargs", ")", ":", "data", "=", "super", "(", "GFS", ",", "self", ")", ".", "process_data", "(", "data", ",", "*", "*", "kwargs", ")", "data",...
35.565217
17.73913
def check_base_suggested_attributes(self, dataset): ''' Check the global suggested attributes for 2.0 templates. These go an extra step besides just checking that they exist. :param netCDF4.Dataset dataset: An open netCDF dataset :creator_type = "" ; //........................................ SUGGESTED - Specifies type of creator with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD) :creator_institution = "" ; //................................. SUGGESTED - The institution of the creator; should uniquely identify the creator's institution. (ACDD) :publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD) :publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD) :program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD) :contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD) :contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD) :geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD) :geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD) :geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD) :date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD) :date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD) :date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD) :product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD) :keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: 'GCMD:GCMD Keywords' ACDD) :platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, aircraft or other. (ACDD) :platform_vocabulary = "" ; //................................. SUGGESTED - Controlled vocabulary for the names used in the "platform" attribute . Example: ‘NASA/GCMD Platform Keywords Version 8.1’ (ACDD) :instrument = "" ; //.......................................... SUGGESTED - Name of the contributing instrument(s) or sensor(s) used to create this data set or product. (ACDD) :instrument_vocabulary = "" ; //............................... SUGGESTED - Controlled vocabulary for the names used in the "instrument" attribute. Example: ‘NASA/GCMD Instrument Keywords Version 8.1’ (ACDD) :cdm_data_type = "Point" ; //.................................. SUGGESTED - The data type, as derived from Unidata's Common Data Model Scientific Data types and understood by THREDDS. (ACDD) :metadata_link = "" ; //....................................... SUGGESTED - A URL that gives the location of more complete metadata. A persistent URL is recommended for this attribute. (ACDD) :references = "" ; //.......................................... SUGGESTED - Published or web-based references that describe the data or methods used to produce it. Recommend URIs (such as a URL or DOI) for papers or other references. (CF) ''' suggested_ctx = TestCtx(BaseCheck.LOW, 'Suggested global attributes') # Do any of the variables define platform ? platform_name = getattr(dataset, 'platform', '') suggested_ctx.assert_true(platform_name != '', 'platform should exist and point to a term in :platform_vocabulary.') cdm_data_type = getattr(dataset, 'cdm_data_type', '') suggested_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'], 'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type)) # Parse dates, check for ISO 8601 for attr in ['date_modified', 'date_issued', 'date_metadata_modified']: attr_value = getattr(dataset, attr, '') try: parse_datetime(attr_value) suggested_ctx.assert_true(True, '') # Score it True! except ISO8601Error: suggested_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value)) units = getattr(dataset, 'geospatial_lat_units', '').lower() suggested_ctx.assert_true(units == 'degrees_north', 'geospatial_lat_units attribute should be degrees_north: {}'.format(units)) units = getattr(dataset, 'geospatial_lon_units', '').lower() suggested_ctx.assert_true(units == 'degrees_east', 'geospatial_lon_units attribute should be degrees_east: {}'.format(units)) contributor_name = getattr(dataset, 'contributor_name', '') contributor_role = getattr(dataset, 'contributor_role', '') names = contributor_role.split(',') roles = contributor_role.split(',') suggested_ctx.assert_true(contributor_name != '', 'contributor_name should exist and not be empty.') suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles') suggested_ctx.assert_true(contributor_role != '', 'contributor_role should exist and not be empty.') suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles') return suggested_ctx.to_result()
[ "def", "check_base_suggested_attributes", "(", "self", ",", "dataset", ")", ":", "suggested_ctx", "=", "TestCtx", "(", "BaseCheck", ".", "LOW", ",", "'Suggested global attributes'", ")", "# Do any of the variables define platform ?", "platform_name", "=", "getattr", "(", ...
109.030769
82.938462
def send_video_note(self, chat_id, data, duration=None, length=None, reply_to_message_id=None, reply_markup=None, disable_notification=None, timeout=None): """ Use this method to send video files, Telegram clients support mp4 videos. :param chat_id: Integer : Unique identifier for the message recipient — User or GroupChat id :param data: InputFile or String : Video note to send. You can either pass a file_id as String to resend a video that is already on the Telegram server :param duration: Integer : Duration of sent video in seconds :param length: Integer : Video width and height, Can't be None and should be in range of (0, 640) :param reply_to_message_id: :param reply_markup: :return: """ return types.Message.de_json( apihelper.send_video_note(self.token, chat_id, data, duration, length, reply_to_message_id, reply_markup, disable_notification, timeout))
[ "def", "send_video_note", "(", "self", ",", "chat_id", ",", "data", ",", "duration", "=", "None", ",", "length", "=", "None", ",", "reply_to_message_id", "=", "None", ",", "reply_markup", "=", "None", ",", "disable_notification", "=", "None", ",", "timeout",...
67.666667
37.4
def read_feather(cls, path, columns=None, use_threads=True): """Read a pandas.DataFrame from Feather format. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the feather file. We only support local files for now. multi threading is set to True by default columns: not supported by pandas api, but can be passed here to read only specific columns use_threads: Whether or not to use threads when reading Notes: pyarrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format """ if cls.read_feather_remote_task is None: return super(RayIO, cls).read_feather( path, columns=columns, use_threads=use_threads ) if columns is None: from pyarrow.feather import FeatherReader fr = FeatherReader(path) columns = [fr.get_column_name(i) for i in range(fr.num_columns)] num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_feather_remote_task._remote( args=(path, cols, num_splits), num_return_vals=num_splits + 1 ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
[ "def", "read_feather", "(", "cls", ",", "path", ",", "columns", "=", "None", ",", "use_threads", "=", "True", ")", ":", "if", "cls", ".", "read_feather_remote_task", "is", "None", ":", "return", "super", "(", "RayIO", ",", "cls", ")", ".", "read_feather"...
38.694915
20.423729
def add_perfdata(self, *args, **kwargs): """ add a perfdata to the internal perfdata list arguments: the same arguments as for Perfdata() """ self._perfdata.append(Perfdata(*args, **kwargs))
[ "def", "add_perfdata", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_perfdata", ".", "append", "(", "Perfdata", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
29.5
12.25
def get_gene2binvec(self): """Return a boolean vector for each gene representing GO section membership.""" _sec2chr = self.sec2chr return {g:[s in s2gos for s in _sec2chr] for g, s2gos in self.gene2section2gos.items()}
[ "def", "get_gene2binvec", "(", "self", ")", ":", "_sec2chr", "=", "self", ".", "sec2chr", "return", "{", "g", ":", "[", "s", "in", "s2gos", "for", "s", "in", "_sec2chr", "]", "for", "g", ",", "s2gos", "in", "self", ".", "gene2section2gos", ".", "item...
59.75
19.5
def get_bits( self, count ): """Get an integer containing the next [count] bits from the source.""" result = 0 for i in range( count ): if self.bits_remaining <= 0: self._fill_buffer() if self.bits_reverse: bit = (1 if (self.current_bits & (0x80 << 8*(self.bytes_to_cache-1))) else 0) self.current_bits <<= 1 self.current_bits &= 0xff else: bit = (self.current_bits & 1) self.current_bits >>= 1 self.bits_remaining -= 1 if self.output_reverse: result <<= 1 result |= bit else: result |= bit << i return result
[ "def", "get_bits", "(", "self", ",", "count", ")", ":", "result", "=", "0", "for", "i", "in", "range", "(", "count", ")", ":", "if", "self", ".", "bits_remaining", "<=", "0", ":", "self", ".", "_fill_buffer", "(", ")", "if", "self", ".", "bits_reve...
33.545455
13.545455
def _filenames_to_modulenames(filenames: Iterable[str], modulename_prefix: str, filename_prefix: str = '') -> Iterable[str]: '''Convert given filenames to module names. Any filename that does not have a corresponding module name will be dropped from the result (i.e. __init__.py). Parameters ---------- :``filename_prefix``: a prefix to drop from all filenames (typically a common directory); defaults to '' :``filenames``: the filenames to transform into module names :``modulename_prefix``: a prefix to add to all module names Return Value(s) --------------- A list of modulenames corresponding to all filenames (for legal module names). ''' modulenames = [] # type: Iterable[str] for filename in filenames: if not filename.endswith('.py'): continue name = filename name = name.replace(filename_prefix, '') name = name.replace('__init__.py', '') name = name.replace('.py', '') name = name.replace('/', '.') name = name.strip('.') if not len(name): continue if not modulename_prefix.endswith('.'): modulename_prefix += '.' name = modulename_prefix + name known_symbols = set() name = '.'.join([ _ for _ in name.split('.') if _ not in known_symbols and not known_symbols.add(_) ]) if len(name): modulenames.append(name) return modulenames
[ "def", "_filenames_to_modulenames", "(", "filenames", ":", "Iterable", "[", "str", "]", ",", "modulename_prefix", ":", "str", ",", "filename_prefix", ":", "str", "=", "''", ")", "->", "Iterable", "[", "str", "]", ":", "modulenames", "=", "[", "]", "# type:...
28.54902
26.27451
def isCollapsed( self ): """ Returns whether or not this group box is collapsed. :return <bool> """ if not self.isCollapsible(): return False if self._inverted: return self.isChecked() return not self.isChecked()
[ "def", "isCollapsed", "(", "self", ")", ":", "if", "not", "self", ".", "isCollapsible", "(", ")", ":", "return", "False", "if", "self", ".", "_inverted", ":", "return", "self", ".", "isChecked", "(", ")", "return", "not", "self", ".", "isChecked", "(",...
26.166667
12.5
def size(col): """ Collection function: returns the length of the array or map stored in the column. :param col: name of column or expression >>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data']) >>> df.select(size(df.data)).collect() [Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.size(_to_java_column(col)))
[ "def", "size", "(", "col", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "size", "(", "_to_java_column", "(", "col", ")", ")", ")" ]
36.583333
19.75
def absolute(value): """Return the absolute value.""" try: return abs(valid_numeric(value)) except (ValueError, TypeError): try: return abs(value) except Exception: return ''
[ "def", "absolute", "(", "value", ")", ":", "try", ":", "return", "abs", "(", "valid_numeric", "(", "value", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "try", ":", "return", "abs", "(", "value", ")", "except", "Exception", ":", ...
25.111111
14.444444
def parse_from_string( root_processor, # type: RootProcessor xml_string # type: Text ): # type: (...) -> Any """ Parse the XML string using the processor starting from the root of the document. :param xml_string: XML string to parse. See also :func:`declxml.parse_from_file` """ if not _is_valid_root_processor(root_processor): raise InvalidRootProcessor('Invalid root processor') parseable_xml_string = xml_string # type: Union[Text, bytes] if _PY2 and isinstance(xml_string, Text): parseable_xml_string = xml_string.encode('utf-8') root = ET.fromstring(parseable_xml_string) _xml_namespace_strip(root) state = _ProcessorState() state.push_location(root_processor.element_path) return root_processor.parse_at_root(root, state)
[ "def", "parse_from_string", "(", "root_processor", ",", "# type: RootProcessor", "xml_string", "# type: Text", ")", ":", "# type: (...) -> Any", "if", "not", "_is_valid_root_processor", "(", "root_processor", ")", ":", "raise", "InvalidRootProcessor", "(", "'Invalid root pr...
31.96
18.68
def guess_type(self, path, allow_directory=True): """ Guess the type of a file. If allow_directory is False, don't consider the possibility that the file is a directory. """ if path.endswith('.ipynb'): return 'notebook' elif allow_directory and self.dir_exists(path): return 'directory' else: return 'file'
[ "def", "guess_type", "(", "self", ",", "path", ",", "allow_directory", "=", "True", ")", ":", "if", "path", ".", "endswith", "(", "'.ipynb'", ")", ":", "return", "'notebook'", "elif", "allow_directory", "and", "self", ".", "dir_exists", "(", "path", ")", ...
30.384615
14.384615
def is_parent_of_log(self, id_, log_id): """Tests if an ``Id`` is a direct parent of a log. arg: id (osid.id.Id): an ``Id`` arg: log_id (osid.id.Id): the ``Id`` of a log return: (boolean) - ``true`` if this ``id`` is a parent of ``log_id,`` ``false`` otherwise raise: NotFound - ``log_id`` is not found raise: NullArgument - ``id`` or ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_parent_of_bin if self._catalog_session is not None: return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=log_id) return self._hierarchy_session.is_parent(id_=log_id, parent_id=id_)
[ "def", "is_parent_of_log", "(", "self", ",", "id_", ",", "log_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_parent_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_sess...
49.7
18.35
def _getLayer(self, name, **kwargs): """ This is the environment implementation of :meth:`BaseFont.getLayer`. **name** will be a :ref:`type-string`. It will have been normalized with :func:`normalizers.normalizeLayerName` and it will have been verified as an existing layer. This must return an instance of :class:`BaseLayer`. Subclasses may override this method. """ for layer in self.layers: if layer.name == name: return layer
[ "def", "_getLayer", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "for", "layer", "in", "self", ".", "layers", ":", "if", "layer", ".", "name", "==", "name", ":", "return", "layer" ]
37.5
11.5
def enkf(self): """ Loop over time windows and apply da :return: """ for cycle_index, time_point in enumerate(self.timeline): if cycle_index >= len(self.timeline) - 1: # Logging : Last Update cycle has finished break print("Print information about this assimilation Cycle ???") # should be handeled in Logger # each cycle should have a dictionary of template files and instruction files to update the model inout # files # get current cycle update information current_cycle_files = self.cycle_update_files[cycle_index] # (1) update model input files for this cycle self.model_temporal_evolotion(cycle_index, current_cycle_files) # (2) generate new Pst object for the current time cycle current_pst = copy.deepcopy(self.pst) # update observation dataframe # update parameter dataframe # update in/out files if needed # At this stage the problem is equivalent to smoother problem self.smoother(current_pst)
[ "def", "enkf", "(", "self", ")", ":", "for", "cycle_index", ",", "time_point", "in", "enumerate", "(", "self", ".", "timeline", ")", ":", "if", "cycle_index", ">=", "len", "(", "self", ".", "timeline", ")", "-", "1", ":", "# Logging : Last Update cycle has...
39.206897
24.034483
def split_all_edges_between_two_vertices(self, vertex1, vertex2, guidance=None, sorted_guidance=False, account_for_colors_multiplicity_in_guidance=True): """ Splits all edges between two supplied vertices in current :class:`BreakpointGraph` instance with respect to the provided guidance. Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_all_edges_between_two_vertices` method. :param vertex1: a first out of two vertices edges between which are to be split :type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected :param vertex2: a second out of two vertices edges between which are to be split :type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected :param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` objects to be split :type guidance: iterable where each entry is iterable with colors entries :return: ``None``, performs inplace changes """ self.__split_all_edges_between_two_vertices(vertex1=vertex1, vertex2=vertex2, guidance=guidance, sorted_guidance=sorted_guidance, account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance)
[ "def", "split_all_edges_between_two_vertices", "(", "self", ",", "vertex1", ",", "vertex2", ",", "guidance", "=", "None", ",", "sorted_guidance", "=", "False", ",", "account_for_colors_multiplicity_in_guidance", "=", "True", ")", ":", "self", ".", "__split_all_edges_b...
81.235294
46.352941
def debugTreePrint(node,pfx="->"): """Purely a debugging aid: Ascii-art picture of a tree descended from node""" print pfx,node.item for c in node.children: debugTreePrint(c," "+pfx)
[ "def", "debugTreePrint", "(", "node", ",", "pfx", "=", "\"->\"", ")", ":", "print", "pfx", ",", "node", ".", "item", "for", "c", "in", "node", ".", "children", ":", "debugTreePrint", "(", "c", ",", "\" \"", "+", "pfx", ")" ]
37.8
10
def disconnect_entry_signals(): """ Disconnect all the signals on Entry model. """ post_save.disconnect( sender=Entry, dispatch_uid=ENTRY_PS_PING_DIRECTORIES) post_save.disconnect( sender=Entry, dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS) post_save.disconnect( sender=Entry, dispatch_uid=ENTRY_PS_FLUSH_SIMILAR_CACHE) post_delete.disconnect( sender=Entry, dispatch_uid=ENTRY_PD_FLUSH_SIMILAR_CACHE)
[ "def", "disconnect_entry_signals", "(", ")", ":", "post_save", ".", "disconnect", "(", "sender", "=", "Entry", ",", "dispatch_uid", "=", "ENTRY_PS_PING_DIRECTORIES", ")", "post_save", ".", "disconnect", "(", "sender", "=", "Entry", ",", "dispatch_uid", "=", "ENT...
29.5625
11.5625
def modifier_id(self, modifier_id): """ Sets the modifier_id of this CatalogModifierOverride. The ID of the [CatalogModifier](#type-catalogmodifier) whose default behavior is being overridden. :param modifier_id: The modifier_id of this CatalogModifierOverride. :type: str """ if modifier_id is None: raise ValueError("Invalid value for `modifier_id`, must not be `None`") if len(modifier_id) < 1: raise ValueError("Invalid value for `modifier_id`, length must be greater than or equal to `1`") self._modifier_id = modifier_id
[ "def", "modifier_id", "(", "self", ",", "modifier_id", ")", ":", "if", "modifier_id", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `modifier_id`, must not be `None`\"", ")", "if", "len", "(", "modifier_id", ")", "<", "1", ":", "raise", "Va...
40.733333
26.6
def _replies(self, *args, **kwargs): """Overridable method.""" reply_msg = make_reply(*args, **kwargs) if self._server: self._server._log('\t%d\t<-- %r' % (self.client_port, reply_msg)) reply_bytes = reply_msg.reply_bytes(self) self._client.sendall(reply_bytes)
[ "def", "_replies", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "reply_msg", "=", "make_reply", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_server", ":", "self", ".", "_server", ".", "_log", "(", "'\\...
43.857143
10.571429
def INDEX_OF_CP(string_expression, substring_expression, start=None, end=None): """ Searches a string for an occurence of a substring and returns the UTF-8 code point index (zero-based) of the first occurence. If the substring is not found, returns -1. https://docs.mongodb.com/manual/reference/operator/aggregation/indexOfCP/ for more details :param string_expression: The string or expression of string :param substring_expression: The string or expression of substring :param start: A number that can be represented as integers (or expression), that specifies the starting index position for the search. :param end: A number that can be represented as integers (or expression), that specifies the ending index position for the search. :return: Aggregation operator """ res = [string_expression, substring_expression] if start is not None: res.append(start) if end is not None: res.append(end) return {'$indexOfCP': res}
[ "def", "INDEX_OF_CP", "(", "string_expression", ",", "substring_expression", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "res", "=", "[", "string_expression", ",", "substring_expression", "]", "if", "start", "is", "not", "None", ":", "res",...
54.5
29.388889
def remove_child_book(self, book_id, child_id): """Removes a child from a book. arg: book_id (osid.id.Id): the ``Id`` of a book arg: child_id (osid.id.Id): the ``Id`` of the new child raise: NotFound - ``book_id`` not a parent of ``child_id`` raise: NullArgument - ``book_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.remove_child_bin_template if self._catalog_session is not None: return self._catalog_session.remove_child_catalog(catalog_id=book_id, child_id=child_id) return self._hierarchy_session.remove_child(id_=book_id, child_id=child_id)
[ "def", "remove_child_book", "(", "self", ",", "book_id", ",", "child_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchyDesignSession.remove_child_bin_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ...
51.941176
23.235294
def reindex_like_indexers(target, other): """Extract indexers to align target with other. Not public API. Parameters ---------- target : Dataset or DataArray Object to be aligned. other : Dataset or DataArray Object to be aligned with. Returns ------- Dict[Any, pandas.Index] providing indexes for reindex keyword arguments. Raises ------ ValueError If any dimensions without labels have different sizes. """ indexers = {k: v for k, v in other.indexes.items() if k in target.dims} for dim in other.dims: if dim not in indexers and dim in target.dims: other_size = other.sizes[dim] target_size = target.sizes[dim] if other_size != target_size: raise ValueError('different size for unlabeled ' 'dimension on argument %r: %r vs %r' % (dim, other_size, target_size)) return indexers
[ "def", "reindex_like_indexers", "(", "target", ",", "other", ")", ":", "indexers", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "other", ".", "indexes", ".", "items", "(", ")", "if", "k", "in", "target", ".", "dims", "}", "for", "dim", "i...
30.1875
21.125
def snmp_server_engineID_drop_engineID_local(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp") engineID_drop = ET.SubElement(snmp_server, "engineID-drop") engineID = ET.SubElement(engineID_drop, "engineID") local = ET.SubElement(engineID, "local") local.text = kwargs.pop('local') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "snmp_server_engineID_drop_engineID_local", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "snmp_server", "=", "ET", ".", "SubElement", "(", "config", ",", "\"snmp-server\"", ",", "xmlns", ...
45.166667
17.083333
def delete_editor(userid): """ :param userid: a string representing the user's UW NetID :return: True if request is successful, False otherwise. raise DataFailureException or a corresponding TrumbaException if the request failed or an error code has been returned. """ url = _make_del_account_url(userid) return _process_resp(url, get_sea_resource(url), _is_editor_deleted )
[ "def", "delete_editor", "(", "userid", ")", ":", "url", "=", "_make_del_account_url", "(", "userid", ")", "return", "_process_resp", "(", "url", ",", "get_sea_resource", "(", "url", ")", ",", "_is_editor_deleted", ")" ]
39.166667
11.333333
def machine_usage(self, hall_no): """Returns the average usage of laundry machines every hour for a given hall. The usages are returned in a dictionary, with the key being the day of the week, and the value being an array listing the usages per hour. :param hall_no: integer corresponding to the id number for the hall. Thus number is returned as part of the all_status call. >>> english_house = l.machine_usage(2) """ try: num = int(hall_no) except ValueError: raise ValueError("Room Number must be integer") r = requests.get(USAGE_BASE_URL + str(num), timeout=60) parsed = BeautifulSoup(r.text, 'html5lib') usage_table = parsed.find_all('table', width='504px')[0] rows = usage_table.find_all('tr') usages = {} for i, row in enumerate(rows): day = [] hours = row.find_all('td') for hour in hours: day.append(self.busy_dict[str(hour['class'][0])]) usages[self.days[i]] = day return usages
[ "def", "machine_usage", "(", "self", ",", "hall_no", ")", ":", "try", ":", "num", "=", "int", "(", "hall_no", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Room Number must be integer\"", ")", "r", "=", "requests", ".", "get", "(", "USAG...
35.870968
18.516129
def _get_files(self): """ Walk the project directory for tests and returns a list. :return: list """ excludes = [ '.git', '.tox', '.vagrant', '.venv', os.path.basename(self._config.verifier.directory), ] generators = [ util.os_walk(self._config.project_directory, '*.yml', excludes), util.os_walk(self._config.project_directory, '*.yaml', excludes), ] return [f for g in generators for f in g]
[ "def", "_get_files", "(", "self", ")", ":", "excludes", "=", "[", "'.git'", ",", "'.tox'", ",", "'.vagrant'", ",", "'.venv'", ",", "os", ".", "path", ".", "basename", "(", "self", ".", "_config", ".", "verifier", ".", "directory", ")", ",", "]", "gen...
28.052632
22.368421
def rm(i): """ Input: { (repo_uoa) - repo UOA (where to delete entry about repository) uoa - data UOA (force) - if 'yes', force removal (with_files) or (all) - if 'yes', remove files as well } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ # Check if global writing is allowed r=ck.check_writing({}) if r['return']>0: return r global cache_repo_uoa, cache_repo_info ruoa=i.get('repo_uoa','') uoa=i.get('data_uoa','') o=i.get('out','') if uoa=='': return {'return':1, 'error':'UOA of the repository is not defined'} wf=i.get('with_files','') if wf=='': wf=i.get('all','') force=i.get('force','') r=ck.access({'action':'load', 'repo_uoa':ruoa, 'module_uoa':work['self_module_uoa'], 'data_uoa':uoa, 'common_func':'yes'}) if r['return']>0: return r duid=r['data_uid'] duoa=r['data_uoa'] d=r['dict'] p=d.get('path','') to_delete=True if o=='con' and force!='yes': r=ck.inp({'text':'Are you sure to delete information about repository '+duoa+' (y/N): '}) c=r['string'].lower() if c!='yes' and c!='y': to_delete=False if to_delete and o=='con' and force!='yes' and wf=='yes': r=ck.inp({'text':'You indicated that you want to DELETE ALL ENTRIES IN THE REPOSITORY! Are you sure (y/N): '}) x=r['string'].lower() if x!='yes' and x!='y': wf='' if to_delete: if o=='con': ck.out('') ck.out('Reloading repo cache ...') r=ck.reload_repo_cache({}) # Ignore errors if r['return']>0: return r if o=='con': ck.out('Removing from cache ...') if duoa in ck.cache_repo_uoa: del (ck.cache_repo_uoa[duoa]) if duid in ck.cache_repo_info: del (ck.cache_repo_info[duid]) if o=='con': ck.out('Rewriting repo cache ...') r=ck.save_repo_cache({}) if r['return']>0: return r if o=='con': ck.out('Removing entry ...') r=ck.access({'action':'remove', 'repo_uoa':ruoa, 'module_uoa':work['self_module_uoa'], 'data_uoa':uoa, 'common_func':'yes'}) if r['return']>0: return r if wf=='yes' and p!='': if o=='con': ck.out('Removing entries from the repository ...') import shutil if os.path.isdir(p): shutil.rmtree(p, onerror=ck.rm_read_only) if o=='con': ck.out('') ck.out('Information about repository was removed successfully!') if wf!='yes': ck.out('Note: repository itself was not removed!') return {'return':0}
[ "def", "rm", "(", "i", ")", ":", "# Check if global writing is allowed", "r", "=", "ck", ".", "check_writing", "(", "{", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "global", "cache_repo_uoa", ",", "cache_repo_info", "ruoa", ...
30.585106
21.138298
def find_kernel_specs(self): """Returns a dict mapping kernel names to resource directories.""" # let real installed kernels overwrite envs with the same name: # this is the same order as the get_kernel_spec way, which also prefers # kernels from the jupyter dir over env kernels. specs = self.find_kernel_specs_for_envs() specs.update(super(EnvironmentKernelSpecManager, self).find_kernel_specs()) return specs
[ "def", "find_kernel_specs", "(", "self", ")", ":", "# let real installed kernels overwrite envs with the same name:", "# this is the same order as the get_kernel_spec way, which also prefers", "# kernels from the jupyter dir over env kernels.", "specs", "=", "self", ".", "find_kernel_specs...
54
17.333333
def are_imaging_dicoms(dicom_input): """ This function will check the dicom headers to see which type of series it is Possibilities are fMRI, DTI, Anatomical (if no clear type is found anatomical is used) :param dicom_input: directory with dicom files or a list of dicom objects """ # if it is philips and multiframe dicom then we assume it is ok if common.is_philips(dicom_input): if common.is_multiframe_dicom(dicom_input): return True # for all others if there is image position patient we assume it is ok header = dicom_input[0] return Tag(0x0020, 0x0037) in header
[ "def", "are_imaging_dicoms", "(", "dicom_input", ")", ":", "# if it is philips and multiframe dicom then we assume it is ok", "if", "common", ".", "is_philips", "(", "dicom_input", ")", ":", "if", "common", ".", "is_multiframe_dicom", "(", "dicom_input", ")", ":", "retu...
38.5625
22.1875
def make_digest_file(data_file, digest_file): '''Create a file containing the hex digest string of a data file.''' hexdigest = get_file_hexdigest(data_file) fd = open(digest_file, 'w') fd.write(hexdigest) fd.close()
[ "def", "make_digest_file", "(", "data_file", ",", "digest_file", ")", ":", "hexdigest", "=", "get_file_hexdigest", "(", "data_file", ")", "fd", "=", "open", "(", "digest_file", ",", "'w'", ")", "fd", ".", "write", "(", "hexdigest", ")", "fd", ".", "close",...
32.857143
19.142857
def people_per_project(self, project_id, company_id): """ This will return all of the people in the given company that can access the given project. """ path = '/projects/%u/contacts/people/%u' % (project_id, company_id) return self._request(path)
[ "def", "people_per_project", "(", "self", ",", "project_id", ",", "company_id", ")", ":", "path", "=", "'/projects/%u/contacts/people/%u'", "%", "(", "project_id", ",", "company_id", ")", "return", "self", ".", "_request", "(", "path", ")" ]
41.285714
13.285714
def get_project_config_path(path=None): """Return project configuration folder if exist.""" project_path = Path(path or '.').absolute().joinpath(RENKU_HOME) if project_path.exists() and project_path.is_dir(): return str(project_path)
[ "def", "get_project_config_path", "(", "path", "=", "None", ")", ":", "project_path", "=", "Path", "(", "path", "or", "'.'", ")", ".", "absolute", "(", ")", ".", "joinpath", "(", "RENKU_HOME", ")", "if", "project_path", ".", "exists", "(", ")", "and", ...
49.8
10.4
def build_loss(model_logits, sparse_targets): """Compute the log loss given predictions and targets.""" time_major_shape = [FLAGS.unroll_steps, FLAGS.batch_size] flat_batch_shape = [FLAGS.unroll_steps * FLAGS.batch_size, -1] xent = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.reshape(model_logits, flat_batch_shape), labels=tf.reshape(sparse_targets, flat_batch_shape[:-1])) xent = tf.reshape(xent, time_major_shape) # Sum over the sequence. sequence_neg_log_prob = tf.reduce_sum(xent, axis=0) # Average over the batch. return tf.reduce_mean(sequence_neg_log_prob, axis=0)
[ "def", "build_loss", "(", "model_logits", ",", "sparse_targets", ")", ":", "time_major_shape", "=", "[", "FLAGS", ".", "unroll_steps", ",", "FLAGS", ".", "batch_size", "]", "flat_batch_shape", "=", "[", "FLAGS", ".", "unroll_steps", "*", "FLAGS", ".", "batch_s...
50.416667
13.333333
def smoothMLS3D(actors, neighbours=10): """ A time sequence of actors is being smoothed in 4D using a `MLS (Moving Least Squares)` variant. The time associated to an actor must be specified in advance with ``actor.time()`` method. Data itself can suggest a meaningful time separation based on the spatial distribution of points. :param int neighbours: fixed nr. of neighbours in space-time to take into account in the fit. .. hint:: |moving_least_squares3D| |moving_least_squares3D.py|_ """ from scipy.spatial import KDTree coords4d = [] for a in actors: # build the list of 4d coordinates coords3d = a.coordinates() n = len(coords3d) pttimes = [[a.time()]] * n coords4d += np.append(coords3d, pttimes, axis=1).tolist() avedt = float(actors[-1].time() - actors[0].time()) / len(actors) print("Average time separation between actors dt =", round(avedt, 3)) coords4d = np.array(coords4d) newcoords4d = [] kd = KDTree(coords4d, leafsize=neighbours) suggest = "" pb = vio.ProgressBar(0, len(coords4d)) for i in pb.range(): mypt = coords4d[i] # dr = np.sqrt(3*dx**2+dt**2) # iclosest = kd.query_ball_Point(mypt, r=dr) # dists, iclosest = kd.query(mypt, k=None, distance_upper_bound=dr) dists, iclosest = kd.query(mypt, k=neighbours) closest = coords4d[iclosest] nc = len(closest) if nc >= neighbours and nc > 5: m = np.linalg.lstsq(closest, [1.0] * nc)[0] # needs python3 vers = m / np.linalg.norm(m) hpcenter = np.mean(closest, axis=0) # hyperplane center dist = np.dot(mypt - hpcenter, vers) projpt = mypt - dist * vers newcoords4d.append(projpt) if not i % 1000: # work out some stats v = np.std(closest, axis=0) vx = round((v[0] + v[1] + v[2]) / 3, 3) suggest = "data suggest dt=" + str(vx) pb.print(suggest) newcoords4d = np.array(newcoords4d) ctimes = newcoords4d[:, 3] ccoords3d = np.delete(newcoords4d, 3, axis=1) # get rid of time act = vs.Points(ccoords3d) act.pointColors(ctimes, cmap="jet") # use a colormap to associate a color to time return act
[ "def", "smoothMLS3D", "(", "actors", ",", "neighbours", "=", "10", ")", ":", "from", "scipy", ".", "spatial", "import", "KDTree", "coords4d", "=", "[", "]", "for", "a", "in", "actors", ":", "# build the list of 4d coordinates", "coords3d", "=", "a", ".", "...
36.901639
20.180328
def editFolder(self, description, webEncrypted=False): """ This operation allows you to change the description of an existing folder or change the web encrypted property. The web encrypted property indicates if all the services contained in the folder are only accessible over a secure channel (SSL). When setting this property to true, you also need to enable the virtual directory security in the security configuration. Inputs: description - a description of the folder webEncrypted - boolean to indicate if the services are accessible over SSL only. """ url = self._url + "/editFolder" params = { "f" : "json", "webEncrypted" : webEncrypted, "description" : "%s" % description } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "editFolder", "(", "self", ",", "description", ",", "webEncrypted", "=", "False", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/editFolder\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"webEncrypted\"", ":", "webEncrypted", ",", "\"...
43.6
17.36
def acquire_authorization_header(self): """Acquire tokens from AAD.""" try: return self._acquire_authorization_header() except AdalError as error: if self._authentication_method is AuthenticationMethod.aad_username_password: kwargs = {"username": self._username, "client_id": self._client_id} elif self._authentication_method is AuthenticationMethod.aad_application_key: kwargs = {"client_id": self._client_id} elif self._authentication_method is AuthenticationMethod.aad_device_login: kwargs = {"client_id": self._client_id} elif self._authentication_method is AuthenticationMethod.aad_application_certificate: kwargs = {"client_id": self._client_id, "thumbprint": self._thumbprint} else: raise error kwargs["resource"] = self._kusto_cluster kwargs["authority"] = self._adal_context.authority.url raise KustoAuthenticationError(self._authentication_method.value, error, **kwargs)
[ "def", "acquire_authorization_header", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_acquire_authorization_header", "(", ")", "except", "AdalError", "as", "error", ":", "if", "self", ".", "_authentication_method", "is", "AuthenticationMethod", ".", "...
53.75
28.95
def get_file_version_info(cls, filename): """ Get the program version from an executable file, if available. @type filename: str @param filename: Pathname to the executable file to query. @rtype: tuple(str, str, bool, bool, str, str) @return: Tuple with version information extracted from the executable file metadata, containing the following: - File version number (C{"major.minor"}). - Product version number (C{"major.minor"}). - C{True} for debug builds, C{False} for production builds. - C{True} for legacy OS builds (DOS, OS/2, Win16), C{False} for modern OS builds. - Binary file type. May be one of the following values: - "application" - "dynamic link library" - "static link library" - "font" - "raster font" - "TrueType font" - "vector font" - "driver" - "communications driver" - "display driver" - "installable driver" - "keyboard driver" - "language driver" - "legacy driver" - "mouse driver" - "network driver" - "printer driver" - "sound driver" - "system driver" - "versioned printer driver" - Binary creation timestamp. Any of the fields may be C{None} if not available. @raise WindowsError: Raises an exception on error. """ # Get the file version info structure. pBlock = win32.GetFileVersionInfo(filename) pBuffer, dwLen = win32.VerQueryValue(pBlock, "\\") if dwLen != ctypes.sizeof(win32.VS_FIXEDFILEINFO): raise ctypes.WinError(win32.ERROR_BAD_LENGTH) pVersionInfo = ctypes.cast(pBuffer, ctypes.POINTER(win32.VS_FIXEDFILEINFO)) VersionInfo = pVersionInfo.contents if VersionInfo.dwSignature != 0xFEEF04BD: raise ctypes.WinError(win32.ERROR_BAD_ARGUMENTS) # File and product versions. FileVersion = "%d.%d" % (VersionInfo.dwFileVersionMS, VersionInfo.dwFileVersionLS) ProductVersion = "%d.%d" % (VersionInfo.dwProductVersionMS, VersionInfo.dwProductVersionLS) # Debug build? if VersionInfo.dwFileFlagsMask & win32.VS_FF_DEBUG: DebugBuild = (VersionInfo.dwFileFlags & win32.VS_FF_DEBUG) != 0 else: DebugBuild = None # Legacy OS build? LegacyBuild = (VersionInfo.dwFileOS != win32.VOS_NT_WINDOWS32) # File type. FileType = cls.__binary_types.get(VersionInfo.dwFileType) if VersionInfo.dwFileType == win32.VFT_DRV: FileType = cls.__driver_types.get(VersionInfo.dwFileSubtype) elif VersionInfo.dwFileType == win32.VFT_FONT: FileType = cls.__font_types.get(VersionInfo.dwFileSubtype) # Timestamp, ex: "Monday, July 7, 2013 (12:20:50.126)". # FIXME: how do we know the time zone? FileDate = (VersionInfo.dwFileDateMS << 32) + VersionInfo.dwFileDateLS if FileDate: CreationTime = win32.FileTimeToSystemTime(FileDate) CreationTimestamp = "%s, %s %d, %d (%d:%d:%d.%d)" % ( cls.__days_of_the_week[CreationTime.wDayOfWeek], cls.__months[CreationTime.wMonth], CreationTime.wDay, CreationTime.wYear, CreationTime.wHour, CreationTime.wMinute, CreationTime.wSecond, CreationTime.wMilliseconds, ) else: CreationTimestamp = None # Return the file version info. return ( FileVersion, ProductVersion, DebugBuild, LegacyBuild, FileType, CreationTimestamp, )
[ "def", "get_file_version_info", "(", "cls", ",", "filename", ")", ":", "# Get the file version info structure.", "pBlock", "=", "win32", ".", "GetFileVersionInfo", "(", "filename", ")", "pBuffer", ",", "dwLen", "=", "win32", ".", "VerQueryValue", "(", "pBlock", ",...
39.07767
16.84466
def fit_theta(self): """use least squares to fit all default curves parameter seperately Returns ------- None """ x = range(1, self.point_num + 1) y = self.trial_history for i in range(NUM_OF_FUNCTIONS): model = curve_combination_models[i] try: # The maximum number of iterations to fit is 100*(N+1), where N is the number of elements in `x0`. if model_para_num[model] == 2: a, b = optimize.curve_fit(all_models[model], x, y)[0] model_para[model][0] = a model_para[model][1] = b elif model_para_num[model] == 3: a, b, c = optimize.curve_fit(all_models[model], x, y)[0] model_para[model][0] = a model_para[model][1] = b model_para[model][2] = c elif model_para_num[model] == 4: a, b, c, d = optimize.curve_fit(all_models[model], x, y)[0] model_para[model][0] = a model_para[model][1] = b model_para[model][2] = c model_para[model][3] = d except (RuntimeError, FloatingPointError, OverflowError, ZeroDivisionError): # Ignore exceptions caused by numerical calculations pass except Exception as exception: logger.critical("Exceptions in fit_theta:", exception)
[ "def", "fit_theta", "(", "self", ")", ":", "x", "=", "range", "(", "1", ",", "self", ".", "point_num", "+", "1", ")", "y", "=", "self", ".", "trial_history", "for", "i", "in", "range", "(", "NUM_OF_FUNCTIONS", ")", ":", "model", "=", "curve_combinati...
45.393939
16.363636
def run_netsh_command(netsh_args): """Execute a netsh command and return the output.""" devnull = open(os.devnull, 'w') command_raw = 'netsh interface ipv4 ' + netsh_args return int(subprocess.call(command_raw, stdout=devnull))
[ "def", "run_netsh_command", "(", "netsh_args", ")", ":", "devnull", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "command_raw", "=", "'netsh interface ipv4 '", "+", "netsh_args", "return", "int", "(", "subprocess", ".", "call", "(", "command_raw", ...
47.8
9
def make_carrier_tone(freq, db, dur, samplerate, caldb=100, calv=0.1): """ Produce a pure tone signal :param freq: Frequency of the tone to be produced (Hz) :type freq: int :param db: Intensity of the tone in dB SPL :type db: int :param dur: duration (seconds) :type dur: float :param samplerate: generation frequency of tone (Hz) :type samplerate: int :param caldb: Reference intensity (dB SPL). Together with calv, provides a reference point for what intensity equals what output voltage level :type caldb: int :param calv: Reference voltage (V). Together with caldb, provides a reference point for what intensity equals what output voltage level :type calv: float :returns: tone, timevals -- the signal and the time index values """ if samplerate <= 0: raise ValueError("Samplerate must be greater than 0") if caldb <= 0: raise ValueError("Calibration dB SPL must be greater than 0") npts = int(dur * samplerate) amp = (10 ** ((db - caldb) / 20) * calv) if USE_RMS: amp *= 1.414213562373 if VERBOSE: print( "current dB: {}, fs: {}, current frequency: {} kHz, AO Amp: {:.6f}".format(db, samplerate, freq / 1000, amp)) print("cal dB: {}, V at cal dB: {}".format(caldb, calv)) tone = amp * np.sin((freq * dur) * np.linspace(0, 2 * np.pi, npts)) timevals = np.arange(npts) / samplerate return tone, timevals
[ "def", "make_carrier_tone", "(", "freq", ",", "db", ",", "dur", ",", "samplerate", ",", "caldb", "=", "100", ",", "calv", "=", "0.1", ")", ":", "if", "samplerate", "<=", "0", ":", "raise", "ValueError", "(", "\"Samplerate must be greater than 0\"", ")", "i...
37.447368
26.710526
def mapReduce(mapFunc, reductionFunc, *iterables, **kwargs): """Exectues the :meth:`~scoop.futures.map` function and then applies a reduction function to its result. The reduction function will cumulatively merge the results of the map function in order to get a single final value. This call is blocking. :param mapFunc: Any picklable callable object (function or class object with *__call__* method); this object will be called to execute the Futures. The callable must return a value. :param reductionFunc: Any picklable callable object (function or class object with *__call__* method); this object will be called to reduce pairs of Futures results. The callable must support two parameters and return a single value. :param iterables: Iterable objects; each will be zipped to form an iterable of arguments tuples that will be passed to the callable object as a separate Future. :param timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. :returns: A single value.""" return submit( _recursiveReduce, mapFunc, reductionFunc, False, *iterables ).result()
[ "def", "mapReduce", "(", "mapFunc", ",", "reductionFunc", ",", "*", "iterables", ",", "*", "*", "kwargs", ")", ":", "return", "submit", "(", "_recursiveReduce", ",", "mapFunc", ",", "reductionFunc", ",", "False", ",", "*", "iterables", ")", ".", "result", ...
45.407407
24.888889
def search_call_sets(self, variant_set_id, name=None, biosample_id=None): """ Returns an iterator over the CallSets fulfilling the specified conditions from the specified VariantSet. :param str variant_set_id: Find callsets belonging to the provided variant set. :param str name: Only CallSets matching the specified name will be returned. :param str biosample_id: Only CallSets matching this id will be returned. :return: An iterator over the :class:`ga4gh.protocol.CallSet` objects defined by the query parameters. """ request = protocol.SearchCallSetsRequest() request.variant_set_id = variant_set_id request.name = pb.string(name) request.biosample_id = pb.string(biosample_id) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "callsets", protocol.SearchCallSetsResponse)
[ "def", "search_call_sets", "(", "self", ",", "variant_set_id", ",", "name", "=", "None", ",", "biosample_id", "=", "None", ")", ":", "request", "=", "protocol", ".", "SearchCallSetsRequest", "(", ")", "request", ".", "variant_set_id", "=", "variant_set_id", "r...
45.952381
16.428571
def fgm(self, x, labels, targeted=False): """ TensorFlow Eager implementation of the Fast Gradient Method. :param x: the input variable :param targeted: Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :return: a tensor for the adversarial example """ # Compute loss with tf.GradientTape() as tape: # input should be watched because it may be # combination of trainable and non-trainable variables tape.watch(x) loss_obj = LossCrossEntropy(self.model, smoothing=0.) loss = loss_obj.fprop(x=x, y=labels) if targeted: loss = -loss # Define gradient of loss wrt input grad = tape.gradient(loss, x) optimal_perturbation = attacks.optimize_linear(grad, self.eps, self.ord) # Add perturbation to original example to obtain adversarial example adv_x = x + optimal_perturbation # If clipping is needed # reset all values outside of [clip_min, clip_max] if (self.clip_min is not None) and (self.clip_max is not None): adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) return adv_x
[ "def", "fgm", "(", "self", ",", "x", ",", "labels", ",", "targeted", "=", "False", ")", ":", "# Compute loss", "with", "tf", ".", "GradientTape", "(", ")", "as", "tape", ":", "# input should be watched because it may be", "# combination of trainable and non-trainabl...
39.40625
18.15625
def recursive_processing(self, base_dir, target_dir, it): """Method to recursivly process the notebooks in the `base_dir` Parameters ---------- base_dir: str Path to the base example directory (see the `examples_dir` parameter for the :class:`Gallery` class) target_dir: str Path to the output directory for the rst files (see the `gallery_dirs` parameter for the :class:`Gallery` class) it: iterable The iterator over the subdirectories and files in `base_dir` generated by the :func:`os.walk` function""" try: file_dir, dirs, files = next(it) except StopIteration: return '', [] readme_files = {'README.md', 'README.rst', 'README.txt'} if readme_files.intersection(files): foutdir = file_dir.replace(base_dir, target_dir) create_dirs(foutdir) this_nbps = [ NotebookProcessor( infile=f, outfile=os.path.join(foutdir, os.path.basename(f)), disable_warnings=self.disable_warnings, preprocess=( (self.preprocess is True or f in self.preprocess) and not (self.dont_preprocess is True or f in self.dont_preprocess)), clear=((self.clear is True or f in self.clear) and not (self.dont_clear is True or f in self.dont_clear)), code_example=self.code_examples.get(f), supplementary_files=self.supplementary_files.get(f), other_supplementary_files=self.osf.get(f), thumbnail_figure=self.thumbnail_figures.get(f), url=self.get_url(f.replace(base_dir, '')), **self._nbp_kws) for f in map(lambda f: os.path.join(file_dir, f), filter(self.pattern.match, files))] readme_file = next(iter(readme_files.intersection(files))) else: return '', [] labels = OrderedDict() this_label = 'gallery_' + foutdir.replace(os.path.sep, '_') if this_label.endswith('_'): this_label = this_label[:-1] for d in dirs: label, nbps = self.recursive_processing( base_dir, target_dir, it) if label: labels[label] = nbps s = ".. _%s:\n\n" % this_label with open(os.path.join(file_dir, readme_file)) as f: s += f.read().rstrip() + '\n\n' s += "\n\n.. toctree::\n\n" s += ''.join(' %s\n' % os.path.splitext(os.path.basename( nbp.get_out_file()))[0] for nbp in this_nbps) for d in dirs: findex = os.path.join(d, 'index.rst') if os.path.exists(os.path.join(foutdir, findex)): s += ' %s\n' % os.path.splitext(findex)[0] s += '\n' for nbp in this_nbps: code_div = nbp.code_div if code_div is not None: s += code_div + '\n' else: s += nbp.thumbnail_div + '\n' s += "\n.. raw:: html\n\n <div style='clear:both'></div>\n" for label, nbps in labels.items(): s += '\n.. only:: html\n\n .. rubric:: :ref:`%s`\n\n' % ( label) for nbp in nbps: code_div = nbp.code_div if code_div is not None: s += code_div + '\n' else: s += nbp.thumbnail_div + '\n' s += "\n.. raw:: html\n\n <div style='clear:both'></div>\n" s += '\n' with open(os.path.join(foutdir, 'index.rst'), 'w') as f: f.write(s) return this_label, list(chain(this_nbps, *labels.values()))
[ "def", "recursive_processing", "(", "self", ",", "base_dir", ",", "target_dir", ",", "it", ")", ":", "try", ":", "file_dir", ",", "dirs", ",", "files", "=", "next", "(", "it", ")", "except", "StopIteration", ":", "return", "''", ",", "[", "]", "readme_...
42.622222
18.3
def sample(self, ctrs, rstate=None, return_q=False, kdtree=None): """ Sample a point uniformly distributed within the *union* of cubes. Uses a K-D Tree to perform the search if provided. Returns ------- x : `~numpy.ndarray` with shape (ndim,) A coordinate within the set of cubes. q : int, optional The number of cubes `x` falls within. """ if rstate is None: rstate = np.random nctrs = len(ctrs) # number of cubes # If there is only one cube, sample from it. if nctrs == 1: dx = self.hside * (2. * rstate.rand(self.n) - 1.) x = ctrs[0] + dx if return_q: return x, 1 else: return x # Select a cube at random. idx = rstate.randint(nctrs) # Select a point from the chosen cube. dx = self.hside * (2. * rstate.rand(self.n) - 1.) x = ctrs[idx] + dx # Check how many cubes the point lies within, passing over # the `idx`-th cube `x` was sampled from. q = self.overlap(x, ctrs, kdtree=kdtree) if return_q: # If `q` is being returned, assume the user wants to # explicitly apply the `1. / q` acceptance criterion to # properly sample from the union of balls. return x, q else: # If `q` is not being returned, assume the user wants this # done internally. while rstate.rand() > (1. / q): idx = rstate.randint(nctrs) dx = self.hside * (2. * rstate.rand(self.n) - 1.) x = ctrs[idx] + dx q = self.overlap(x, ctrs, kdtree=kdtree) return x
[ "def", "sample", "(", "self", ",", "ctrs", ",", "rstate", "=", "None", ",", "return_q", "=", "False", ",", "kdtree", "=", "None", ")", ":", "if", "rstate", "is", "None", ":", "rstate", "=", "np", ".", "random", "nctrs", "=", "len", "(", "ctrs", "...
32.148148
19.592593
def cnst_A1T(self, Y1): r"""Compute :math:`A_1^T \mathbf{y}_1` component of :math:`A^T \mathbf{y}`. In this case :math:`A_1^T \mathbf{y}_1 = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots) \mathbf{y}_1`. """ Y1f = sl.rfftn(Y1, None, axes=self.cri.axisN) return sl.irfftn(np.conj(self.GDf) * Y1f, self.cri.Nv, self.cri.axisN)
[ "def", "cnst_A1T", "(", "self", ",", "Y1", ")", ":", "Y1f", "=", "sl", ".", "rfftn", "(", "Y1", ",", "None", ",", "axes", "=", "self", ".", "cri", ".", "axisN", ")", "return", "sl", ".", "irfftn", "(", "np", ".", "conj", "(", "self", ".", "GD...
42.555556
16.333333
def generate_dataset(self, cdl_path): ''' Use ncgen to generate a netCDF file from a .cdl file Returns the path to the generated netcdf file :param str cdl_path: Absolute path to cdl file that is used to generate netCDF file ''' if '.cdl' in cdl_path: # it's possible the filename doesn't have the .cdl extension ds_str = cdl_path.replace('.cdl', '.nc') else: ds_str = cdl_path + '.nc' subprocess.call(['ncgen', '-o', ds_str, cdl_path]) return ds_str
[ "def", "generate_dataset", "(", "self", ",", "cdl_path", ")", ":", "if", "'.cdl'", "in", "cdl_path", ":", "# it's possible the filename doesn't have the .cdl extension", "ds_str", "=", "cdl_path", ".", "replace", "(", "'.cdl'", ",", "'.nc'", ")", "else", ":", "ds_...
41.230769
24.307692
def get_handler(self, *args, **options): """ Entry point to plug the LiveReload feature. """ handler = super(Command, self).get_handler(*args, **options) if options['use_livereload']: threading.Timer(1, self.livereload_request, kwargs=options).start() return handler
[ "def", "get_handler", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "handler", "=", "super", "(", "Command", ",", "self", ")", ".", "get_handler", "(", "*", "args", ",", "*", "*", "options", ")", "if", "options", "[", "'use_liv...
39.875
12.375
def swo_enable(self, cpu_speed, swo_speed=9600, port_mask=0x01): """Enables SWO output on the target device. Configures the output protocol, the SWO output speed, and enables any ITM & stimulus ports. This is equivalent to calling ``.swo_start()``. Note: If SWO is already enabled, it will first stop SWO before enabling it again. Args: self (JLink): the ``JLink`` instance cpu_speed (int): the target CPU frequency in Hz swo_speed (int): the frequency in Hz used by the target to communicate port_mask (int): port mask specifying which stimulus ports to enable Returns: ``None`` Raises: JLinkException: on error """ if self.swo_enabled(): self.swo_stop() res = self._dll.JLINKARM_SWO_EnableTarget(cpu_speed, swo_speed, enums.JLinkSWOInterfaces.UART, port_mask) if res != 0: raise errors.JLinkException(res) self._swo_enabled = True return None
[ "def", "swo_enable", "(", "self", ",", "cpu_speed", ",", "swo_speed", "=", "9600", ",", "port_mask", "=", "0x01", ")", ":", "if", "self", ".", "swo_enabled", "(", ")", ":", "self", ".", "swo_stop", "(", ")", "res", "=", "self", ".", "_dll", ".", "J...
31.972973
24.783784
def convert_value(self, v): """ convert the expression that is in the term to something that is accepted by pytables """ def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) kind = _ensure_decoded(self.kind) meta = _ensure_decoded(self.meta) if kind == 'datetime64' or kind == 'datetime': if isinstance(v, (int, float)): v = stringify(v) v = _ensure_decoded(v) v = Timestamp(v) if v.tz is not None: v = v.tz_convert('UTC') return TermValue(v, v.value, kind) elif kind == 'timedelta64' or kind == 'timedelta': v = Timedelta(v, unit='s').value return TermValue(int(v), v, kind) elif meta == 'category': metadata = com.values_from_object(self.metadata) result = metadata.searchsorted(v, side='left') # result returns 0 if v is first element or if v is not in metadata # check that metadata contains v if not result and v not in metadata: result = -1 return TermValue(result, result, 'integer') elif kind == 'integer': v = int(float(v)) return TermValue(v, v, kind) elif kind == 'float': v = float(v) return TermValue(v, v, kind) elif kind == 'bool': if isinstance(v, str): v = not v.strip().lower() in ['false', 'f', 'no', 'n', 'none', '0', '[]', '{}', ''] else: v = bool(v) return TermValue(v, v, kind) elif isinstance(v, str): # string quoting return TermValue(v, stringify(v), 'string') else: raise TypeError("Cannot compare {v} of type {typ} to {kind} column" .format(v=v, typ=type(v), kind=kind))
[ "def", "convert_value", "(", "self", ",", "v", ")", ":", "def", "stringify", "(", "value", ")", ":", "if", "self", ".", "encoding", "is", "not", "None", ":", "encoder", "=", "partial", "(", "pprint_thing_encoded", ",", "encoding", "=", "self", ".", "en...
39.703704
13.277778
def backup(self, paths=None): """Backup method driver.""" if not paths: paths = self._get_paths() try: self._backup_compresslevel(paths) except TypeError: try: self._backup_pb_gui(paths) except ImportError: self._backup_pb_tqdm(paths) # Delete source if specified if self.delete_source: shutil.rmtree(self.source) return self.zip_filename
[ "def", "backup", "(", "self", ",", "paths", "=", "None", ")", ":", "if", "not", "paths", ":", "paths", "=", "self", ".", "_get_paths", "(", ")", "try", ":", "self", ".", "_backup_compresslevel", "(", "paths", ")", "except", "TypeError", ":", "try", "...
27.764706
13.117647
def postinit(self, func=None, args=None, keywords=None): """Do some setup after initialisation. :param func: What is being called. :type func: NodeNG or None :param args: The positional arguments being given to the call. :type args: list(NodeNG) or None :param keywords: The keyword arguments being given to the call. :type keywords: list(NodeNG) or None """ self.func = func self.args = args self.keywords = keywords
[ "def", "postinit", "(", "self", ",", "func", "=", "None", ",", "args", "=", "None", ",", "keywords", "=", "None", ")", ":", "self", ".", "func", "=", "func", "self", ".", "args", "=", "args", "self", ".", "keywords", "=", "keywords" ]
32.933333
16.6
def updateRPYText(self): 'Updates the displayed Roll, Pitch, Yaw Text' self.rollText.set_text('Roll: %.2f' % self.roll) self.pitchText.set_text('Pitch: %.2f' % self.pitch) self.yawText.set_text('Yaw: %.2f' % self.yaw)
[ "def", "updateRPYText", "(", "self", ")", ":", "self", ".", "rollText", ".", "set_text", "(", "'Roll: %.2f'", "%", "self", ".", "roll", ")", "self", ".", "pitchText", ".", "set_text", "(", "'Pitch: %.2f'", "%", "self", ".", "pitch", ")", "self", ".", ...
49.8
16.2
def eigenvalues_rev(T, k, ncv=None, mu=None): r"""Compute the eigenvalues of a reversible, sparse transition matrix. Parameters ---------- T : (M, M) scipy.sparse matrix Transition matrix k : int Number of eigenvalues to compute. ncv : int, optional The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k mu : (M,) ndarray, optional Stationary distribution of T Returns ------- v : (k,) ndarray Eigenvalues of T Raises ------ ValueError If stationary distribution is nonpositive. Notes ----- The first k eigenvalues of largest magnitude are computed. """ """compute stationary distribution if not given""" if mu is None: mu = stationary_distribution(T) if np.any(mu <= 0): raise ValueError('Cannot symmetrize transition matrix') """ symmetrize T """ smu = np.sqrt(mu) D = diags(smu, 0) Dinv = diags(1.0/smu, 0) S = (D.dot(T)).dot(Dinv) """Compute eigenvalues using a solver for symmetric/hermititan eigenproblems""" evals = scipy.sparse.linalg.eigsh(S, k=k, ncv=ncv, which='LM', return_eigenvectors=False) return evals
[ "def", "eigenvalues_rev", "(", "T", ",", "k", ",", "ncv", "=", "None", ",", "mu", "=", "None", ")", ":", "\"\"\"compute stationary distribution if not given\"\"\"", "if", "mu", "is", "None", ":", "mu", "=", "stationary_distribution", "(", "T", ")", "if", "np...
27.590909
20.272727
def sa_indices(num_states, num_actions): """ Generate `s_indices` and `a_indices` for `DiscreteDP`, for the case where all the actions are feasible at every state. Parameters ---------- num_states : scalar(int) Number of states. num_actions : scalar(int) Number of actions. Returns ------- s_indices : ndarray(int, ndim=1) Array containing the state indices. a_indices : ndarray(int, ndim=1) Array containing the action indices. Examples -------- >>> s_indices, a_indices = qe.markov.sa_indices(4, 3) >>> s_indices array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]) >>> a_indices array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]) """ L = num_states * num_actions dtype = np.int_ s_indices = np.empty(L, dtype=dtype) a_indices = np.empty(L, dtype=dtype) i = 0 for s in range(num_states): for a in range(num_actions): s_indices[i] = s a_indices[i] = a i += 1 return s_indices, a_indices
[ "def", "sa_indices", "(", "num_states", ",", "num_actions", ")", ":", "L", "=", "num_states", "*", "num_actions", "dtype", "=", "np", ".", "int_", "s_indices", "=", "np", ".", "empty", "(", "L", ",", "dtype", "=", "dtype", ")", "a_indices", "=", "np", ...
23.651163
18.674419
def merge_from_list(self, list_args): """find any matching parser_args from list_args and merge them into this instance list_args -- list -- an array of (args, kwargs) tuples """ def xs(name, parser_args, list_args): """build the generator of matching list_args""" for args, kwargs in list_args: if len(set(args) & parser_args) > 0: yield args, kwargs else: if 'dest' in kwargs: if kwargs['dest'] == name: yield args, kwargs for args, kwargs in xs(self.name, self.parser_args, list_args): self.merge_args(args) self.merge_kwargs(kwargs)
[ "def", "merge_from_list", "(", "self", ",", "list_args", ")", ":", "def", "xs", "(", "name", ",", "parser_args", ",", "list_args", ")", ":", "\"\"\"build the generator of matching list_args\"\"\"", "for", "args", ",", "kwargs", "in", "list_args", ":", "if", "len...
37
13.3
def uploadFile(uploadfunc, fileindex, existing, uf, skip_broken=False): """Update a file object so that the location is a reference to the toil file store, writing it to the file store if necessary. """ if uf["location"].startswith("toilfs:") or uf["location"].startswith("_:"): return if uf["location"] in fileindex: uf["location"] = fileindex[uf["location"]] return if not uf["location"] and uf["path"]: uf["location"] = schema_salad.ref_resolver.file_uri(uf["path"]) if uf["location"].startswith("file://") and not os.path.isfile(uf["location"][7:]): if skip_broken: return else: raise cwltool.errors.WorkflowException( "File is missing: %s" % uf["location"]) uf["location"] = write_file( uploadfunc, fileindex, existing, uf["location"])
[ "def", "uploadFile", "(", "uploadfunc", ",", "fileindex", ",", "existing", ",", "uf", ",", "skip_broken", "=", "False", ")", ":", "if", "uf", "[", "\"location\"", "]", ".", "startswith", "(", "\"toilfs:\"", ")", "or", "uf", "[", "\"location\"", "]", ".",...
40.47619
20.238095
def get_dates(raw_table) -> "list of dates": """ Goes through the first column of input table and returns the first sequence of dates it finds. """ dates = [] found_first = False for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]): if dstr: if len(dstr.split("/")) == 3: d = datetime.datetime.strptime(dstr, '%m/%d/%Y') elif len(dstr.split("-")) == 3: d = datetime.datetime.strptime(dstr, '%Y-%m-%d') else: # Not necessarily an error, could just be a non-date cell logging.debug("unknown date-format: {}".format(dstr)) continue dates.append(d) if not found_first: found_first = True logging.debug("Found first date: '{}' at i: {}".format(d.isoformat(), i)) elif found_first: logging.debug("Last date: {}".format(d)) break return dates
[ "def", "get_dates", "(", "raw_table", ")", "->", "\"list of dates\"", ":", "dates", "=", "[", "]", "found_first", "=", "False", "for", "i", ",", "dstr", "in", "enumerate", "(", "[", "raw_table", "[", "i", "]", "[", "0", "]", "for", "i", "in", "range"...
43.36
17.84
def export_gpx_file(self): """Generate GPX element tree from ``Trackpoints``. Returns: etree.ElementTree: GPX element tree depicting ``Trackpoints`` objects """ gpx = create_elem('gpx', GPX_ELEM_ATTRIB) if not self.metadata.bounds: self.metadata.bounds = [j for i in self for j in i] gpx.append(self.metadata.togpx()) track = create_elem('trk') gpx.append(track) for segment in self: chunk = create_elem('trkseg') track.append(chunk) for place in segment: chunk.append(place.togpx()) return etree.ElementTree(gpx)
[ "def", "export_gpx_file", "(", "self", ")", ":", "gpx", "=", "create_elem", "(", "'gpx'", ",", "GPX_ELEM_ATTRIB", ")", "if", "not", "self", ".", "metadata", ".", "bounds", ":", "self", ".", "metadata", ".", "bounds", "=", "[", "j", "for", "i", "in", ...
33.4
13.05
def update_video(video_data): """ Called on to update Video objects in the database update_video is used to update Video objects by the given edx_video_id in the video_data. Args: video_data (dict): { url: api url to the video edx_video_id: ID of the video duration: Length of video in seconds client_video_id: client ID of video encoded_video: a list of EncodedVideo dicts url: url of the video file_size: size of the video in bytes profile: ID of the profile courses: Courses associated with this video } Raises: Raises ValVideoNotFoundError if the video cannot be retrieved. Raises ValCannotUpdateError if the video cannot be updated. Returns the successfully updated Video object """ try: video = _get_video(video_data.get("edx_video_id")) except Video.DoesNotExist: error_message = u"Video not found when trying to update video with edx_video_id: {0}".format(video_data.get("edx_video_id")) raise ValVideoNotFoundError(error_message) serializer = VideoSerializer(video, data=video_data) if serializer.is_valid(): serializer.save() return video_data.get("edx_video_id") else: raise ValCannotUpdateError(serializer.errors)
[ "def", "update_video", "(", "video_data", ")", ":", "try", ":", "video", "=", "_get_video", "(", "video_data", ".", "get", "(", "\"edx_video_id\"", ")", ")", "except", "Video", ".", "DoesNotExist", ":", "error_message", "=", "u\"Video not found when trying to upda...
35.564103
22.076923
def write_csv(path, data): """This function writes comma-separated <data> to <path>. Parameter <path> is either a pathname or a file-like object that supports the |write()| method.""" fd = _try_open_file(path, 'w', 'The first argument must be a pathname or an object that supports write() method') for v in data: fd.write(",".join([str(x) for x in v])) fd.write("\n") _try_close_file(fd, path)
[ "def", "write_csv", "(", "path", ",", "data", ")", ":", "fd", "=", "_try_open_file", "(", "path", ",", "'w'", ",", "'The first argument must be a pathname or an object that supports write() method'", ")", "for", "v", "in", "data", ":", "fd", ".", "write", "(", "...
40.727273
18.909091
def calc_transform(src, dst_crs=None, resolution=None, dimensions=None, src_bounds=None, dst_bounds=None, target_aligned_pixels=False): """Output dimensions and transform for a reprojection. Parameters ------------ src: rasterio.io.DatasetReader Data source. dst_crs: rasterio.crs.CRS, optional Target coordinate reference system. resolution: tuple (x resolution, y resolution) or float, optional Target resolution, in units of target coordinate reference system. dimensions: tuple (width, height), optional Output file size in pixels and lines. src_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output file from source bounds (in source georeferenced units). dst_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output file from destination bounds (in destination georeferenced units). target_aligned_pixels: bool, optional Align the output bounds based on the resolution. Default is `False`. Returns ------- dst_crs: rasterio.crs.CRS Output crs transform: Affine Output affine transformation matrix width, height: int Output dimensions """ if resolution is not None: if isinstance(resolution, (float, int)): resolution = (float(resolution), float(resolution)) if target_aligned_pixels: if not resolution: raise ValueError('target_aligned_pixels cannot be used without resolution') if src_bounds or dst_bounds: raise ValueError('target_aligned_pixels cannot be used with src_bounds or dst_bounds') elif dimensions: invalid_combos = (dst_bounds, resolution) if any(p for p in invalid_combos if p is not None): raise ValueError('dimensions cannot be used with dst_bounds or resolution') if src_bounds and dst_bounds: raise ValueError('src_bounds and dst_bounds may not be specified simultaneously') if dst_crs is not None: if dimensions: # Calculate resolution appropriate for dimensions # in target. dst_width, dst_height = dimensions bounds = src_bounds or src.bounds xmin, ymin, xmax, ymax = transform_bounds( src.crs, dst_crs, *bounds) dst_transform = Affine( (xmax - xmin) / float(dst_width), 0, xmin, 0, (ymin - ymax) / float(dst_height), ymax ) elif src_bounds or dst_bounds: if not resolution: raise ValueError('resolution is required when using src_bounds or dst_bounds') if src_bounds: xmin, ymin, xmax, ymax = transform_bounds( src.crs, dst_crs, *src_bounds) else: xmin, ymin, xmax, ymax = dst_bounds dst_transform = Affine(resolution[0], 0, xmin, 0, -resolution[1], ymax) dst_width = max(int(ceil((xmax - xmin) / resolution[0])), 1) dst_height = max(int(ceil((ymax - ymin) / resolution[1])), 1) else: if src.transform.is_identity and src.gcps: src_crs = src.gcps[1] kwargs = {'gcps': src.gcps[0]} else: src_crs = src.crs kwargs = src.bounds._asdict() dst_transform, dst_width, dst_height = calcdt( src_crs, dst_crs, src.width, src.height, resolution=resolution, **kwargs) elif dimensions: # Same projection, different dimensions, calculate resolution. dst_crs = src.crs dst_width, dst_height = dimensions l, b, r, t = src_bounds or src.bounds dst_transform = Affine( (r - l) / float(dst_width), 0, l, 0, (b - t) / float(dst_height), t ) elif src_bounds or dst_bounds: # Same projection, different dimensions and possibly # different resolution. if not resolution: resolution = (src.transform.a, -src.transform.e) dst_crs = src.crs xmin, ymin, xmax, ymax = (src_bounds or dst_bounds) dst_transform = Affine(resolution[0], 0, xmin, 0, -resolution[1], ymax) dst_width = max(int(ceil((xmax - xmin) / resolution[0])), 1) dst_height = max(int(ceil((ymax - ymin) / resolution[1])), 1) elif resolution: # Same projection, different resolution. dst_crs = src.crs l, b, r, t = src.bounds dst_transform = Affine(resolution[0], 0, l, 0, -resolution[1], t) dst_width = max(int(ceil((r - l) / resolution[0])), 1) dst_height = max(int(ceil((t - b) / resolution[1])), 1) else: dst_crs = src.crs dst_transform = src.transform dst_width = src.width dst_height = src.height if target_aligned_pixels: dst_transform, dst_width, dst_height = aligned_target( dst_transform, dst_width, dst_height, resolution) return dst_crs, dst_transform, dst_width, dst_height
[ "def", "calc_transform", "(", "src", ",", "dst_crs", "=", "None", ",", "resolution", "=", "None", ",", "dimensions", "=", "None", ",", "src_bounds", "=", "None", ",", "dst_bounds", "=", "None", ",", "target_aligned_pixels", "=", "False", ")", ":", "if", ...
37.132353
20.125
def add(A, b, offset=0): """ Add b to the view of A in place (!). Returns modified A. Broadcasting is allowed, thus b can be scalar. if offset is not zero, make sure b is of right shape! :param ndarray A: 2 dimensional array :param ndarray-like b: either one dimensional or scalar :param int offset: same as in view. :rtype: view of A, which is adjusted inplace """ return _diag_ufunc(A, b, offset, np.add)
[ "def", "add", "(", "A", ",", "b", ",", "offset", "=", "0", ")", ":", "return", "_diag_ufunc", "(", "A", ",", "b", ",", "offset", ",", "np", ".", "add", ")" ]
31.357143
12.357143
def computeFunctional(x, cooP): ''' Compute value of functional J(X) = ||PX - PA||^2_F, where P is projector into index subspace of known elements, X is our approximation, A is original tensor. Parameters: :tt.vector: x current approximation [X] :dict: cooP dictionary with two records - 'indices': numpy.array of P x d shape, contains index subspace of P known elements; each string is an index of one element. - 'values': numpy array of size P, contains P known values. Returns: :float: result value of functional ''' indices = cooP['indices'] values = cooP['values'] [P, d] = indices.shape assert P == len(values) result = 0 for p in xrange(P): index = tuple(indices[p, :]) result += (x[index] - values[p])**2 result *= 0.5 return result
[ "def", "computeFunctional", "(", "x", ",", "cooP", ")", ":", "indices", "=", "cooP", "[", "'indices'", "]", "values", "=", "cooP", "[", "'values'", "]", "[", "P", ",", "d", "]", "=", "indices", ".", "shape", "assert", "P", "==", "len", "(", "values...
28.382353
17.617647