code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def molspec(x): """ Parse a string for a lipid or a solvent as given on the command line (MOLECULE[=NUMBER|:NUMBER]); where `=NUMBER` sets an absolute number of the molecule, and `:NUMBER` sets a relative number of it. If both absolute and relative number are set False, then relative count is 1. """ lip = x.split(":") abn = lip[0].split("=") names = abn[0] if len(abn) > 1: nrel = 0 nabs = int(abn[1]) else: nabs = 0 if len(lip) > 1: nrel = float(lip[1]) else: nrel = 1 return abn[0], nabs, nrel
def function[molspec, parameter[x]]: constant[ Parse a string for a lipid or a solvent as given on the command line (MOLECULE[=NUMBER|:NUMBER]); where `=NUMBER` sets an absolute number of the molecule, and `:NUMBER` sets a relative number of it. If both absolute and relative number are set False, then relative count is 1. ] variable[lip] assign[=] call[name[x].split, parameter[constant[:]]] variable[abn] assign[=] call[call[name[lip]][constant[0]].split, parameter[constant[=]]] variable[names] assign[=] call[name[abn]][constant[0]] if compare[call[name[len], parameter[name[abn]]] greater[>] constant[1]] begin[:] variable[nrel] assign[=] constant[0] variable[nabs] assign[=] call[name[int], parameter[call[name[abn]][constant[1]]]] return[tuple[[<ast.Subscript object at 0x7da2054a51b0>, <ast.Name object at 0x7da2054a5d50>, <ast.Name object at 0x7da2054a5060>]]]
keyword[def] identifier[molspec] ( identifier[x] ): literal[string] identifier[lip] = identifier[x] . identifier[split] ( literal[string] ) identifier[abn] = identifier[lip] [ literal[int] ]. identifier[split] ( literal[string] ) identifier[names] = identifier[abn] [ literal[int] ] keyword[if] identifier[len] ( identifier[abn] )> literal[int] : identifier[nrel] = literal[int] identifier[nabs] = identifier[int] ( identifier[abn] [ literal[int] ]) keyword[else] : identifier[nabs] = literal[int] keyword[if] identifier[len] ( identifier[lip] )> literal[int] : identifier[nrel] = identifier[float] ( identifier[lip] [ literal[int] ]) keyword[else] : identifier[nrel] = literal[int] keyword[return] identifier[abn] [ literal[int] ], identifier[nabs] , identifier[nrel]
def molspec(x): """ Parse a string for a lipid or a solvent as given on the command line (MOLECULE[=NUMBER|:NUMBER]); where `=NUMBER` sets an absolute number of the molecule, and `:NUMBER` sets a relative number of it. If both absolute and relative number are set False, then relative count is 1. """ lip = x.split(':') abn = lip[0].split('=') names = abn[0] if len(abn) > 1: nrel = 0 nabs = int(abn[1]) # depends on [control=['if'], data=[]] else: nabs = 0 if len(lip) > 1: nrel = float(lip[1]) # depends on [control=['if'], data=[]] else: nrel = 1 return (abn[0], nabs, nrel)
def _load_image_set_index(self, shuffle): """ find out which indexes correspond to given image set (train or val) Parameters: ---------- shuffle : boolean whether to shuffle the image list Returns: ---------- entire list of images specified in the setting """ image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt') assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file) with open(image_set_index_file) as f: image_set_index = [x.strip() for x in f.readlines()] if shuffle: np.random.shuffle(image_set_index) return image_set_index
def function[_load_image_set_index, parameter[self, shuffle]]: constant[ find out which indexes correspond to given image set (train or val) Parameters: ---------- shuffle : boolean whether to shuffle the image list Returns: ---------- entire list of images specified in the setting ] variable[image_set_index_file] assign[=] call[name[os].path.join, parameter[name[self].data_path, constant[ImageSets], constant[Main], binary_operation[name[self].image_set + constant[.txt]]]] assert[call[name[os].path.exists, parameter[name[image_set_index_file]]]] with call[name[open], parameter[name[image_set_index_file]]] begin[:] variable[image_set_index] assign[=] <ast.ListComp object at 0x7da18fe91c90> if name[shuffle] begin[:] call[name[np].random.shuffle, parameter[name[image_set_index]]] return[name[image_set_index]]
keyword[def] identifier[_load_image_set_index] ( identifier[self] , identifier[shuffle] ): literal[string] identifier[image_set_index_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[data_path] , literal[string] , literal[string] , identifier[self] . identifier[image_set] + literal[string] ) keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[image_set_index_file] ), literal[string] . identifier[format] ( identifier[image_set_index_file] ) keyword[with] identifier[open] ( identifier[image_set_index_file] ) keyword[as] identifier[f] : identifier[image_set_index] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[f] . identifier[readlines] ()] keyword[if] identifier[shuffle] : identifier[np] . identifier[random] . identifier[shuffle] ( identifier[image_set_index] ) keyword[return] identifier[image_set_index]
def _load_image_set_index(self, shuffle): """ find out which indexes correspond to given image set (train or val) Parameters: ---------- shuffle : boolean whether to shuffle the image list Returns: ---------- entire list of images specified in the setting """ image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt') assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file) with open(image_set_index_file) as f: image_set_index = [x.strip() for x in f.readlines()] # depends on [control=['with'], data=['f']] if shuffle: np.random.shuffle(image_set_index) # depends on [control=['if'], data=[]] return image_set_index
def nth_day_of_month(n, weekday, month, year): """ Return (year, month, day) tuple that represents nth weekday of month in year. If n==0, returns last weekday of month. Weekdays: Monday=0 """ if not (0 <= n <= 5): raise IndexError("Nth day of month must be 0-5. Received: {}".format(n)) if not (0 <= weekday <= 6): raise IndexError("Weekday must be 0-6") firstday, daysinmonth = calendar.monthrange(year, month) # Get first WEEKDAY of month first_weekday_of_kind = 1 + (weekday - firstday) % 7 if n == 0: # find last weekday of kind, which is 5 if these conditions are met, else 4 if first_weekday_of_kind in [1, 2, 3] and first_weekday_of_kind + 28 < daysinmonth: n = 5 else: n = 4 day = first_weekday_of_kind + ((n - 1) * 7) if day > daysinmonth: raise IndexError("No {}th day of month {}".format(n, month)) return (year, month, day)
def function[nth_day_of_month, parameter[n, weekday, month, year]]: constant[ Return (year, month, day) tuple that represents nth weekday of month in year. If n==0, returns last weekday of month. Weekdays: Monday=0 ] if <ast.UnaryOp object at 0x7da1b0fac7f0> begin[:] <ast.Raise object at 0x7da1b0fac220> if <ast.UnaryOp object at 0x7da1b0faeb60> begin[:] <ast.Raise object at 0x7da1b0fac5e0> <ast.Tuple object at 0x7da1b0facb50> assign[=] call[name[calendar].monthrange, parameter[name[year], name[month]]] variable[first_weekday_of_kind] assign[=] binary_operation[constant[1] + binary_operation[binary_operation[name[weekday] - name[firstday]] <ast.Mod object at 0x7da2590d6920> constant[7]]] if compare[name[n] equal[==] constant[0]] begin[:] if <ast.BoolOp object at 0x7da1b0faee00> begin[:] variable[n] assign[=] constant[5] variable[day] assign[=] binary_operation[name[first_weekday_of_kind] + binary_operation[binary_operation[name[n] - constant[1]] * constant[7]]] if compare[name[day] greater[>] name[daysinmonth]] begin[:] <ast.Raise object at 0x7da1b0faf6a0> return[tuple[[<ast.Name object at 0x7da1b0faf370>, <ast.Name object at 0x7da1b0faf100>, <ast.Name object at 0x7da1b0fac400>]]]
keyword[def] identifier[nth_day_of_month] ( identifier[n] , identifier[weekday] , identifier[month] , identifier[year] ): literal[string] keyword[if] keyword[not] ( literal[int] <= identifier[n] <= literal[int] ): keyword[raise] identifier[IndexError] ( literal[string] . identifier[format] ( identifier[n] )) keyword[if] keyword[not] ( literal[int] <= identifier[weekday] <= literal[int] ): keyword[raise] identifier[IndexError] ( literal[string] ) identifier[firstday] , identifier[daysinmonth] = identifier[calendar] . identifier[monthrange] ( identifier[year] , identifier[month] ) identifier[first_weekday_of_kind] = literal[int] +( identifier[weekday] - identifier[firstday] )% literal[int] keyword[if] identifier[n] == literal[int] : keyword[if] identifier[first_weekday_of_kind] keyword[in] [ literal[int] , literal[int] , literal[int] ] keyword[and] identifier[first_weekday_of_kind] + literal[int] < identifier[daysinmonth] : identifier[n] = literal[int] keyword[else] : identifier[n] = literal[int] identifier[day] = identifier[first_weekday_of_kind] +(( identifier[n] - literal[int] )* literal[int] ) keyword[if] identifier[day] > identifier[daysinmonth] : keyword[raise] identifier[IndexError] ( literal[string] . identifier[format] ( identifier[n] , identifier[month] )) keyword[return] ( identifier[year] , identifier[month] , identifier[day] )
def nth_day_of_month(n, weekday, month, year): """ Return (year, month, day) tuple that represents nth weekday of month in year. If n==0, returns last weekday of month. Weekdays: Monday=0 """ if not 0 <= n <= 5: raise IndexError('Nth day of month must be 0-5. Received: {}'.format(n)) # depends on [control=['if'], data=[]] if not 0 <= weekday <= 6: raise IndexError('Weekday must be 0-6') # depends on [control=['if'], data=[]] (firstday, daysinmonth) = calendar.monthrange(year, month) # Get first WEEKDAY of month first_weekday_of_kind = 1 + (weekday - firstday) % 7 if n == 0: # find last weekday of kind, which is 5 if these conditions are met, else 4 if first_weekday_of_kind in [1, 2, 3] and first_weekday_of_kind + 28 < daysinmonth: n = 5 # depends on [control=['if'], data=[]] else: n = 4 # depends on [control=['if'], data=['n']] day = first_weekday_of_kind + (n - 1) * 7 if day > daysinmonth: raise IndexError('No {}th day of month {}'.format(n, month)) # depends on [control=['if'], data=[]] return (year, month, day)
def recover_annotation(): """ Recover the last annotated SFrame. If you annotate an SFrame and forget to assign it to a variable, this function allows you to recover the last annotated SFrame. Returns ------- out : SFrame A new SFrame that contains the recovered annotation data. Examples -------- >> annotations = tc.image_classifier.recover_annotation() >> print(annotations) Columns: images Image labels int annotations str Rows: 400 Data: +----------------------+-------------+ | images | annotations | +----------------------+-------------+ | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Dog | | Height: 28 Width: 28 | Mouse | | Height: 28 Width: 28 | Feather | | Height: 28 Width: 28 | Bird | | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Dog | | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Bird | +----------------------+-------------+ [400 rows x 3 columns] """ empty_instance = __tc.extensions.ImageClassification() annotation_wrapper = empty_instance.get_annotation_registry() return annotation_wrapper.annotation_sframe
def function[recover_annotation, parameter[]]: constant[ Recover the last annotated SFrame. If you annotate an SFrame and forget to assign it to a variable, this function allows you to recover the last annotated SFrame. Returns ------- out : SFrame A new SFrame that contains the recovered annotation data. Examples -------- >> annotations = tc.image_classifier.recover_annotation() >> print(annotations) Columns: images Image labels int annotations str Rows: 400 Data: +----------------------+-------------+ | images | annotations | +----------------------+-------------+ | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Dog | | Height: 28 Width: 28 | Mouse | | Height: 28 Width: 28 | Feather | | Height: 28 Width: 28 | Bird | | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Dog | | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Bird | +----------------------+-------------+ [400 rows x 3 columns] ] variable[empty_instance] assign[=] call[name[__tc].extensions.ImageClassification, parameter[]] variable[annotation_wrapper] assign[=] call[name[empty_instance].get_annotation_registry, parameter[]] return[name[annotation_wrapper].annotation_sframe]
keyword[def] identifier[recover_annotation] (): literal[string] identifier[empty_instance] = identifier[__tc] . identifier[extensions] . identifier[ImageClassification] () identifier[annotation_wrapper] = identifier[empty_instance] . identifier[get_annotation_registry] () keyword[return] identifier[annotation_wrapper] . identifier[annotation_sframe]
def recover_annotation(): """ Recover the last annotated SFrame. If you annotate an SFrame and forget to assign it to a variable, this function allows you to recover the last annotated SFrame. Returns ------- out : SFrame A new SFrame that contains the recovered annotation data. Examples -------- >> annotations = tc.image_classifier.recover_annotation() >> print(annotations) Columns: images Image labels int annotations str Rows: 400 Data: +----------------------+-------------+ | images | annotations | +----------------------+-------------+ | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Dog | | Height: 28 Width: 28 | Mouse | | Height: 28 Width: 28 | Feather | | Height: 28 Width: 28 | Bird | | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Dog | | Height: 28 Width: 28 | Cat | | Height: 28 Width: 28 | Bird | +----------------------+-------------+ [400 rows x 3 columns] """ empty_instance = __tc.extensions.ImageClassification() annotation_wrapper = empty_instance.get_annotation_registry() return annotation_wrapper.annotation_sframe
def use(self, id): """ Use a particular Virtual Server instance @param id: Virtual Server ID @type id: int """ if self._connected and id > 0: self.send_command('use', keys={'sid': id})
def function[use, parameter[self, id]]: constant[ Use a particular Virtual Server instance @param id: Virtual Server ID @type id: int ] if <ast.BoolOp object at 0x7da20e9b1990> begin[:] call[name[self].send_command, parameter[constant[use]]]
keyword[def] identifier[use] ( identifier[self] , identifier[id] ): literal[string] keyword[if] identifier[self] . identifier[_connected] keyword[and] identifier[id] > literal[int] : identifier[self] . identifier[send_command] ( literal[string] , identifier[keys] ={ literal[string] : identifier[id] })
def use(self, id): """ Use a particular Virtual Server instance @param id: Virtual Server ID @type id: int """ if self._connected and id > 0: self.send_command('use', keys={'sid': id}) # depends on [control=['if'], data=[]]
def wait(self, timeout=None): # type: (Optional[int]) -> None """Wait on the long running operation for a specified length of time. You can check if this call as ended with timeout with the "done()" method. :param int timeout: Period of time to wait for the long running operation to complete (in seconds). :raises CloudError: Server problem with the query. """ if self._thread is None: return self._thread.join(timeout=timeout) try: # Let's handle possible None in forgiveness here raise self._exception # type: ignore except TypeError: # Was None pass
def function[wait, parameter[self, timeout]]: constant[Wait on the long running operation for a specified length of time. You can check if this call as ended with timeout with the "done()" method. :param int timeout: Period of time to wait for the long running operation to complete (in seconds). :raises CloudError: Server problem with the query. ] if compare[name[self]._thread is constant[None]] begin[:] return[None] call[name[self]._thread.join, parameter[]] <ast.Try object at 0x7da18f58e410>
keyword[def] identifier[wait] ( identifier[self] , identifier[timeout] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[_thread] keyword[is] keyword[None] : keyword[return] identifier[self] . identifier[_thread] . identifier[join] ( identifier[timeout] = identifier[timeout] ) keyword[try] : keyword[raise] identifier[self] . identifier[_exception] keyword[except] identifier[TypeError] : keyword[pass]
def wait(self, timeout=None): # type: (Optional[int]) -> None 'Wait on the long running operation for a specified length\n of time. You can check if this call as ended with timeout with the\n "done()" method.\n\n :param int timeout: Period of time to wait for the long running\n operation to complete (in seconds).\n :raises CloudError: Server problem with the query.\n ' if self._thread is None: return # depends on [control=['if'], data=[]] self._thread.join(timeout=timeout) try: # Let's handle possible None in forgiveness here raise self._exception # type: ignore # depends on [control=['try'], data=[]] except TypeError: # Was None pass # depends on [control=['except'], data=[]]
def contains(container, item): """Extends ``operator.contains`` by trying very hard to find ``item`` inside container.""" # equality counts as containment and is usually non destructive if container == item: return True # testing mapping containment is usually non destructive if isinstance(container, abc.Mapping) and mapping_contains(container, item): return True # standard containment except special cases if isinstance(container, str): # str __contains__ includes substring match that we don't count as containment if strict_contains(container, item): return True else: try: if item in container: return True except Exception: pass # search matches in generic instances return instance_contains(container, item)
def function[contains, parameter[container, item]]: constant[Extends ``operator.contains`` by trying very hard to find ``item`` inside container.] if compare[name[container] equal[==] name[item]] begin[:] return[constant[True]] if <ast.BoolOp object at 0x7da1b1588c70> begin[:] return[constant[True]] if call[name[isinstance], parameter[name[container], name[str]]] begin[:] if call[name[strict_contains], parameter[name[container], name[item]]] begin[:] return[constant[True]] return[call[name[instance_contains], parameter[name[container], name[item]]]]
keyword[def] identifier[contains] ( identifier[container] , identifier[item] ): literal[string] keyword[if] identifier[container] == identifier[item] : keyword[return] keyword[True] keyword[if] identifier[isinstance] ( identifier[container] , identifier[abc] . identifier[Mapping] ) keyword[and] identifier[mapping_contains] ( identifier[container] , identifier[item] ): keyword[return] keyword[True] keyword[if] identifier[isinstance] ( identifier[container] , identifier[str] ): keyword[if] identifier[strict_contains] ( identifier[container] , identifier[item] ): keyword[return] keyword[True] keyword[else] : keyword[try] : keyword[if] identifier[item] keyword[in] identifier[container] : keyword[return] keyword[True] keyword[except] identifier[Exception] : keyword[pass] keyword[return] identifier[instance_contains] ( identifier[container] , identifier[item] )
def contains(container, item): """Extends ``operator.contains`` by trying very hard to find ``item`` inside container.""" # equality counts as containment and is usually non destructive if container == item: return True # depends on [control=['if'], data=[]] # testing mapping containment is usually non destructive if isinstance(container, abc.Mapping) and mapping_contains(container, item): return True # depends on [control=['if'], data=[]] # standard containment except special cases if isinstance(container, str): # str __contains__ includes substring match that we don't count as containment if strict_contains(container, item): return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: try: if item in container: return True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception: pass # depends on [control=['except'], data=[]] # search matches in generic instances return instance_contains(container, item)
def reference_fasta(self): """Absolute path to the fasta file with EricScript reference data.""" if self._db_location: ref_files = glob.glob(os.path.join(self._db_location, "*", self._REF_FASTA)) if ref_files: return ref_files[0]
def function[reference_fasta, parameter[self]]: constant[Absolute path to the fasta file with EricScript reference data.] if name[self]._db_location begin[:] variable[ref_files] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[self]._db_location, constant[*], name[self]._REF_FASTA]]]] if name[ref_files] begin[:] return[call[name[ref_files]][constant[0]]]
keyword[def] identifier[reference_fasta] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_db_location] : identifier[ref_files] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_db_location] , literal[string] , identifier[self] . identifier[_REF_FASTA] )) keyword[if] identifier[ref_files] : keyword[return] identifier[ref_files] [ literal[int] ]
def reference_fasta(self): """Absolute path to the fasta file with EricScript reference data.""" if self._db_location: ref_files = glob.glob(os.path.join(self._db_location, '*', self._REF_FASTA)) if ref_files: return ref_files[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def disconnect(self, receiver): """ Disconnect a receiver from a signal. Signal and receiver must exist, otherwise an exception is thrown. :param receiver: Name of the receiver """ if receiver not in self.receivers.keys(): raise Exception("No receiver %s was registered" % receiver) self.receivers[receiver].disconnect() del(self.receivers[receiver]) self.__log.debug("Receiver %s disconnected" % receiver)
def function[disconnect, parameter[self, receiver]]: constant[ Disconnect a receiver from a signal. Signal and receiver must exist, otherwise an exception is thrown. :param receiver: Name of the receiver ] if compare[name[receiver] <ast.NotIn object at 0x7da2590d7190> call[name[self].receivers.keys, parameter[]]] begin[:] <ast.Raise object at 0x7da1b2455bd0> call[call[name[self].receivers][name[receiver]].disconnect, parameter[]] <ast.Delete object at 0x7da1b24573d0> call[name[self].__log.debug, parameter[binary_operation[constant[Receiver %s disconnected] <ast.Mod object at 0x7da2590d6920> name[receiver]]]]
keyword[def] identifier[disconnect] ( identifier[self] , identifier[receiver] ): literal[string] keyword[if] identifier[receiver] keyword[not] keyword[in] identifier[self] . identifier[receivers] . identifier[keys] (): keyword[raise] identifier[Exception] ( literal[string] % identifier[receiver] ) identifier[self] . identifier[receivers] [ identifier[receiver] ]. identifier[disconnect] () keyword[del] ( identifier[self] . identifier[receivers] [ identifier[receiver] ]) identifier[self] . identifier[__log] . identifier[debug] ( literal[string] % identifier[receiver] )
def disconnect(self, receiver): """ Disconnect a receiver from a signal. Signal and receiver must exist, otherwise an exception is thrown. :param receiver: Name of the receiver """ if receiver not in self.receivers.keys(): raise Exception('No receiver %s was registered' % receiver) # depends on [control=['if'], data=['receiver']] self.receivers[receiver].disconnect() del self.receivers[receiver] self.__log.debug('Receiver %s disconnected' % receiver)
def phase_by(val: Any, phase_turns: float, qubit_index: int, default: TDefault = RaiseTypeErrorIfNotProvided): """Returns a phased version of the effect. For example, an X gate phased by 90 degrees would be a Y gate. This works by calling `val`'s _phase_by_ method and returning the result. Args: val: The value to describe with a unitary matrix. phase_turns: The amount to phase the gate, in fractions of a whole turn. Divide by 2pi to get radians. qubit_index: The index of the target qubit the phasing applies to. For operations this is the index of the qubit within the operation's qubit list. For gates it's the index of the qubit within the tuple of qubits taken by the gate's `on` method. default: The default value to return if `val` can't be phased. If not specified, an error is raised when `val` can't be phased. Returns: If `val` has a _phase_by_ method and its result is not NotImplemented, that result is returned. Otherwise, the function will return the default value provided or raise a TypeError if none was provided. Raises: TypeError: `val` doesn't have a _phase_by_ method (or that method returned NotImplemented) and no `default` was specified. """ getter = getattr(val, '_phase_by_', None) result = NotImplemented if getter is None else getter( phase_turns, qubit_index) if result is not NotImplemented: return result if default is not RaiseTypeErrorIfNotProvided: return default if getter is None: raise TypeError("object of type '{}' " "has no _phase_by_ method.".format(type(val))) raise TypeError("object of type '{}' does have a _phase_by_ method, " "but it returned NotImplemented.".format(type(val)))
def function[phase_by, parameter[val, phase_turns, qubit_index, default]]: constant[Returns a phased version of the effect. For example, an X gate phased by 90 degrees would be a Y gate. This works by calling `val`'s _phase_by_ method and returning the result. Args: val: The value to describe with a unitary matrix. phase_turns: The amount to phase the gate, in fractions of a whole turn. Divide by 2pi to get radians. qubit_index: The index of the target qubit the phasing applies to. For operations this is the index of the qubit within the operation's qubit list. For gates it's the index of the qubit within the tuple of qubits taken by the gate's `on` method. default: The default value to return if `val` can't be phased. If not specified, an error is raised when `val` can't be phased. Returns: If `val` has a _phase_by_ method and its result is not NotImplemented, that result is returned. Otherwise, the function will return the default value provided or raise a TypeError if none was provided. Raises: TypeError: `val` doesn't have a _phase_by_ method (or that method returned NotImplemented) and no `default` was specified. ] variable[getter] assign[=] call[name[getattr], parameter[name[val], constant[_phase_by_], constant[None]]] variable[result] assign[=] <ast.IfExp object at 0x7da1b217ff10> if compare[name[result] is_not name[NotImplemented]] begin[:] return[name[result]] if compare[name[default] is_not name[RaiseTypeErrorIfNotProvided]] begin[:] return[name[default]] if compare[name[getter] is constant[None]] begin[:] <ast.Raise object at 0x7da1b217c820> <ast.Raise object at 0x7da1b217e9b0>
keyword[def] identifier[phase_by] ( identifier[val] : identifier[Any] , identifier[phase_turns] : identifier[float] , identifier[qubit_index] : identifier[int] , identifier[default] : identifier[TDefault] = identifier[RaiseTypeErrorIfNotProvided] ): literal[string] identifier[getter] = identifier[getattr] ( identifier[val] , literal[string] , keyword[None] ) identifier[result] = identifier[NotImplemented] keyword[if] identifier[getter] keyword[is] keyword[None] keyword[else] identifier[getter] ( identifier[phase_turns] , identifier[qubit_index] ) keyword[if] identifier[result] keyword[is] keyword[not] identifier[NotImplemented] : keyword[return] identifier[result] keyword[if] identifier[default] keyword[is] keyword[not] identifier[RaiseTypeErrorIfNotProvided] : keyword[return] identifier[default] keyword[if] identifier[getter] keyword[is] keyword[None] : keyword[raise] identifier[TypeError] ( literal[string] literal[string] . identifier[format] ( identifier[type] ( identifier[val] ))) keyword[raise] identifier[TypeError] ( literal[string] literal[string] . identifier[format] ( identifier[type] ( identifier[val] )))
def phase_by(val: Any, phase_turns: float, qubit_index: int, default: TDefault=RaiseTypeErrorIfNotProvided): """Returns a phased version of the effect. For example, an X gate phased by 90 degrees would be a Y gate. This works by calling `val`'s _phase_by_ method and returning the result. Args: val: The value to describe with a unitary matrix. phase_turns: The amount to phase the gate, in fractions of a whole turn. Divide by 2pi to get radians. qubit_index: The index of the target qubit the phasing applies to. For operations this is the index of the qubit within the operation's qubit list. For gates it's the index of the qubit within the tuple of qubits taken by the gate's `on` method. default: The default value to return if `val` can't be phased. If not specified, an error is raised when `val` can't be phased. Returns: If `val` has a _phase_by_ method and its result is not NotImplemented, that result is returned. Otherwise, the function will return the default value provided or raise a TypeError if none was provided. Raises: TypeError: `val` doesn't have a _phase_by_ method (or that method returned NotImplemented) and no `default` was specified. """ getter = getattr(val, '_phase_by_', None) result = NotImplemented if getter is None else getter(phase_turns, qubit_index) if result is not NotImplemented: return result # depends on [control=['if'], data=['result']] if default is not RaiseTypeErrorIfNotProvided: return default # depends on [control=['if'], data=['default']] if getter is None: raise TypeError("object of type '{}' has no _phase_by_ method.".format(type(val))) # depends on [control=['if'], data=[]] raise TypeError("object of type '{}' does have a _phase_by_ method, but it returned NotImplemented.".format(type(val)))
def _data_graph_add_edge(self, src, dst, **edge_labels): """ Add an edge in the data dependence graph. :param ProgramVariable src: Source node. :param ProgramVariable dst: Destination node. :param edge_labels: All labels associated with the edge. :return: None """ if src in self._data_graph and dst in self._data_graph[src]: return self._data_graph.add_edge(src, dst, **edge_labels) self._simplified_data_graph = None
def function[_data_graph_add_edge, parameter[self, src, dst]]: constant[ Add an edge in the data dependence graph. :param ProgramVariable src: Source node. :param ProgramVariable dst: Destination node. :param edge_labels: All labels associated with the edge. :return: None ] if <ast.BoolOp object at 0x7da18dc98bb0> begin[:] return[None] call[name[self]._data_graph.add_edge, parameter[name[src], name[dst]]] name[self]._simplified_data_graph assign[=] constant[None]
keyword[def] identifier[_data_graph_add_edge] ( identifier[self] , identifier[src] , identifier[dst] ,** identifier[edge_labels] ): literal[string] keyword[if] identifier[src] keyword[in] identifier[self] . identifier[_data_graph] keyword[and] identifier[dst] keyword[in] identifier[self] . identifier[_data_graph] [ identifier[src] ]: keyword[return] identifier[self] . identifier[_data_graph] . identifier[add_edge] ( identifier[src] , identifier[dst] ,** identifier[edge_labels] ) identifier[self] . identifier[_simplified_data_graph] = keyword[None]
def _data_graph_add_edge(self, src, dst, **edge_labels): """ Add an edge in the data dependence graph. :param ProgramVariable src: Source node. :param ProgramVariable dst: Destination node. :param edge_labels: All labels associated with the edge. :return: None """ if src in self._data_graph and dst in self._data_graph[src]: return # depends on [control=['if'], data=[]] self._data_graph.add_edge(src, dst, **edge_labels) self._simplified_data_graph = None
def writeWarp(self, warpDict): """ Write a list of (in, out) values for a warpmap """ warpElement = ET.Element("warp") axisNames = sorted(warpDict.keys()) for name in axisNames: axisElement = ET.Element("axis") axisElement.attrib['name'] = name for a, b in warpDict[name]: warpPt = ET.Element("map") warpPt.attrib['input'] = str(a) warpPt.attrib['output'] = str(b) axisElement.append(warpPt) warpElement.append(axisElement) self.root.append(warpElement)
def function[writeWarp, parameter[self, warpDict]]: constant[ Write a list of (in, out) values for a warpmap ] variable[warpElement] assign[=] call[name[ET].Element, parameter[constant[warp]]] variable[axisNames] assign[=] call[name[sorted], parameter[call[name[warpDict].keys, parameter[]]]] for taget[name[name]] in starred[name[axisNames]] begin[:] variable[axisElement] assign[=] call[name[ET].Element, parameter[constant[axis]]] call[name[axisElement].attrib][constant[name]] assign[=] name[name] for taget[tuple[[<ast.Name object at 0x7da18fe93820>, <ast.Name object at 0x7da18fe91870>]]] in starred[call[name[warpDict]][name[name]]] begin[:] variable[warpPt] assign[=] call[name[ET].Element, parameter[constant[map]]] call[name[warpPt].attrib][constant[input]] assign[=] call[name[str], parameter[name[a]]] call[name[warpPt].attrib][constant[output]] assign[=] call[name[str], parameter[name[b]]] call[name[axisElement].append, parameter[name[warpPt]]] call[name[warpElement].append, parameter[name[axisElement]]] call[name[self].root.append, parameter[name[warpElement]]]
keyword[def] identifier[writeWarp] ( identifier[self] , identifier[warpDict] ): literal[string] identifier[warpElement] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[axisNames] = identifier[sorted] ( identifier[warpDict] . identifier[keys] ()) keyword[for] identifier[name] keyword[in] identifier[axisNames] : identifier[axisElement] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[axisElement] . identifier[attrib] [ literal[string] ]= identifier[name] keyword[for] identifier[a] , identifier[b] keyword[in] identifier[warpDict] [ identifier[name] ]: identifier[warpPt] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[warpPt] . identifier[attrib] [ literal[string] ]= identifier[str] ( identifier[a] ) identifier[warpPt] . identifier[attrib] [ literal[string] ]= identifier[str] ( identifier[b] ) identifier[axisElement] . identifier[append] ( identifier[warpPt] ) identifier[warpElement] . identifier[append] ( identifier[axisElement] ) identifier[self] . identifier[root] . identifier[append] ( identifier[warpElement] )
def writeWarp(self, warpDict): """ Write a list of (in, out) values for a warpmap """ warpElement = ET.Element('warp') axisNames = sorted(warpDict.keys()) for name in axisNames: axisElement = ET.Element('axis') axisElement.attrib['name'] = name for (a, b) in warpDict[name]: warpPt = ET.Element('map') warpPt.attrib['input'] = str(a) warpPt.attrib['output'] = str(b) axisElement.append(warpPt) # depends on [control=['for'], data=[]] warpElement.append(axisElement) # depends on [control=['for'], data=['name']] self.root.append(warpElement)
def __get_value_from_data(self, key, data): """Find value from json data :@pram key :@type: string :@pram data :@type data: dict :@return object :@throws KeyError """ if key.isdigit(): return data[int(key)] if key not in data: raise KeyError("Key not exists") return data.get(key)
def function[__get_value_from_data, parameter[self, key, data]]: constant[Find value from json data :@pram key :@type: string :@pram data :@type data: dict :@return object :@throws KeyError ] if call[name[key].isdigit, parameter[]] begin[:] return[call[name[data]][call[name[int], parameter[name[key]]]]] if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[data]] begin[:] <ast.Raise object at 0x7da1b0525990> return[call[name[data].get, parameter[name[key]]]]
keyword[def] identifier[__get_value_from_data] ( identifier[self] , identifier[key] , identifier[data] ): literal[string] keyword[if] identifier[key] . identifier[isdigit] (): keyword[return] identifier[data] [ identifier[int] ( identifier[key] )] keyword[if] identifier[key] keyword[not] keyword[in] identifier[data] : keyword[raise] identifier[KeyError] ( literal[string] ) keyword[return] identifier[data] . identifier[get] ( identifier[key] )
def __get_value_from_data(self, key, data): """Find value from json data :@pram key :@type: string :@pram data :@type data: dict :@return object :@throws KeyError """ if key.isdigit(): return data[int(key)] # depends on [control=['if'], data=[]] if key not in data: raise KeyError('Key not exists') # depends on [control=['if'], data=[]] return data.get(key)
def cached_request(self, request): """ Return a cached response if it exists in the cache, otherwise return False. """ cache_url = self.cache_url(request.url) cc = self.parse_cache_control(request.headers) # non-caching states no_cache = True if 'no-cache' in cc else False if 'max-age' in cc and cc['max-age'] == 0: no_cache = True # Bail out if no-cache was set if no_cache: return False # It is in the cache, so lets see if it is going to be # fresh enough resp = self.serializer.loads(request, self.cache.get(cache_url)) # Check to see if we have a cached object if not resp: return False # If we have a cached 301, return it immediately. We don't # need to test our response for other headers b/c it is # intrinsically "cacheable" as it is Permanent. # See: # https://tools.ietf.org/html/rfc7231#section-6.4.2 # # Client can try to refresh the value by repeating the request # with cache busting headers as usual (ie no-cache). if resp.status == 301: return resp headers = CaseInsensitiveDict(resp.headers) if not headers or 'date' not in headers: # With date or etag, the cached response can never be used # and should be deleted. if 'etag' not in headers: self.cache.delete(cache_url) return False now = time.time() date = calendar.timegm( parsedate_tz(headers['date']) ) current_age = max(0, now - date) # TODO: There is an assumption that the result will be a # urllib3 response object. This may not be best since we # could probably avoid instantiating or constructing the # response until we know we need it. resp_cc = self.parse_cache_control(headers) # determine freshness freshness_lifetime = 0 # Check the max-age pragma in the cache control header if 'max-age' in resp_cc and resp_cc['max-age'].isdigit(): freshness_lifetime = int(resp_cc['max-age']) # If there isn't a max-age, check for an expires header elif 'expires' in headers: expires = parsedate_tz(headers['expires']) if expires is not None: expire_time = calendar.timegm(expires) - date freshness_lifetime = max(0, expire_time) # determine if we are setting freshness limit in the req if 'max-age' in cc: try: freshness_lifetime = int(cc['max-age']) except ValueError: freshness_lifetime = 0 if 'min-fresh' in cc: try: min_fresh = int(cc['min-fresh']) except ValueError: min_fresh = 0 # adjust our current age by our min fresh current_age += min_fresh # see how fresh we actually are fresh = (freshness_lifetime > current_age) if fresh: return resp # we're not fresh. If we don't have an Etag, clear it out if 'etag' not in headers: self.cache.delete(cache_url) # return the original handler return False
def function[cached_request, parameter[self, request]]: constant[ Return a cached response if it exists in the cache, otherwise return False. ] variable[cache_url] assign[=] call[name[self].cache_url, parameter[name[request].url]] variable[cc] assign[=] call[name[self].parse_cache_control, parameter[name[request].headers]] variable[no_cache] assign[=] <ast.IfExp object at 0x7da20e960c40> if <ast.BoolOp object at 0x7da20e961d80> begin[:] variable[no_cache] assign[=] constant[True] if name[no_cache] begin[:] return[constant[False]] variable[resp] assign[=] call[name[self].serializer.loads, parameter[name[request], call[name[self].cache.get, parameter[name[cache_url]]]]] if <ast.UnaryOp object at 0x7da20e962da0> begin[:] return[constant[False]] if compare[name[resp].status equal[==] constant[301]] begin[:] return[name[resp]] variable[headers] assign[=] call[name[CaseInsensitiveDict], parameter[name[resp].headers]] if <ast.BoolOp object at 0x7da20e961cc0> begin[:] if compare[constant[etag] <ast.NotIn object at 0x7da2590d7190> name[headers]] begin[:] call[name[self].cache.delete, parameter[name[cache_url]]] return[constant[False]] variable[now] assign[=] call[name[time].time, parameter[]] variable[date] assign[=] call[name[calendar].timegm, parameter[call[name[parsedate_tz], parameter[call[name[headers]][constant[date]]]]]] variable[current_age] assign[=] call[name[max], parameter[constant[0], binary_operation[name[now] - name[date]]]] variable[resp_cc] assign[=] call[name[self].parse_cache_control, parameter[name[headers]]] variable[freshness_lifetime] assign[=] constant[0] if <ast.BoolOp object at 0x7da1b26af6a0> begin[:] variable[freshness_lifetime] assign[=] call[name[int], parameter[call[name[resp_cc]][constant[max-age]]]] if compare[constant[max-age] in name[cc]] begin[:] <ast.Try object at 0x7da1b26adae0> if compare[constant[min-fresh] in name[cc]] begin[:] <ast.Try object at 0x7da1b26adc60> <ast.AugAssign object at 0x7da1b26ad870> variable[fresh] assign[=] compare[name[freshness_lifetime] greater[>] name[current_age]] if name[fresh] begin[:] return[name[resp]] if compare[constant[etag] <ast.NotIn object at 0x7da2590d7190> name[headers]] begin[:] call[name[self].cache.delete, parameter[name[cache_url]]] return[constant[False]]
keyword[def] identifier[cached_request] ( identifier[self] , identifier[request] ): literal[string] identifier[cache_url] = identifier[self] . identifier[cache_url] ( identifier[request] . identifier[url] ) identifier[cc] = identifier[self] . identifier[parse_cache_control] ( identifier[request] . identifier[headers] ) identifier[no_cache] = keyword[True] keyword[if] literal[string] keyword[in] identifier[cc] keyword[else] keyword[False] keyword[if] literal[string] keyword[in] identifier[cc] keyword[and] identifier[cc] [ literal[string] ]== literal[int] : identifier[no_cache] = keyword[True] keyword[if] identifier[no_cache] : keyword[return] keyword[False] identifier[resp] = identifier[self] . identifier[serializer] . identifier[loads] ( identifier[request] , identifier[self] . identifier[cache] . identifier[get] ( identifier[cache_url] )) keyword[if] keyword[not] identifier[resp] : keyword[return] keyword[False] keyword[if] identifier[resp] . identifier[status] == literal[int] : keyword[return] identifier[resp] identifier[headers] = identifier[CaseInsensitiveDict] ( identifier[resp] . identifier[headers] ) keyword[if] keyword[not] identifier[headers] keyword[or] literal[string] keyword[not] keyword[in] identifier[headers] : keyword[if] literal[string] keyword[not] keyword[in] identifier[headers] : identifier[self] . identifier[cache] . identifier[delete] ( identifier[cache_url] ) keyword[return] keyword[False] identifier[now] = identifier[time] . identifier[time] () identifier[date] = identifier[calendar] . identifier[timegm] ( identifier[parsedate_tz] ( identifier[headers] [ literal[string] ]) ) identifier[current_age] = identifier[max] ( literal[int] , identifier[now] - identifier[date] ) identifier[resp_cc] = identifier[self] . identifier[parse_cache_control] ( identifier[headers] ) identifier[freshness_lifetime] = literal[int] keyword[if] literal[string] keyword[in] identifier[resp_cc] keyword[and] identifier[resp_cc] [ literal[string] ]. identifier[isdigit] (): identifier[freshness_lifetime] = identifier[int] ( identifier[resp_cc] [ literal[string] ]) keyword[elif] literal[string] keyword[in] identifier[headers] : identifier[expires] = identifier[parsedate_tz] ( identifier[headers] [ literal[string] ]) keyword[if] identifier[expires] keyword[is] keyword[not] keyword[None] : identifier[expire_time] = identifier[calendar] . identifier[timegm] ( identifier[expires] )- identifier[date] identifier[freshness_lifetime] = identifier[max] ( literal[int] , identifier[expire_time] ) keyword[if] literal[string] keyword[in] identifier[cc] : keyword[try] : identifier[freshness_lifetime] = identifier[int] ( identifier[cc] [ literal[string] ]) keyword[except] identifier[ValueError] : identifier[freshness_lifetime] = literal[int] keyword[if] literal[string] keyword[in] identifier[cc] : keyword[try] : identifier[min_fresh] = identifier[int] ( identifier[cc] [ literal[string] ]) keyword[except] identifier[ValueError] : identifier[min_fresh] = literal[int] identifier[current_age] += identifier[min_fresh] identifier[fresh] =( identifier[freshness_lifetime] > identifier[current_age] ) keyword[if] identifier[fresh] : keyword[return] identifier[resp] keyword[if] literal[string] keyword[not] keyword[in] identifier[headers] : identifier[self] . identifier[cache] . identifier[delete] ( identifier[cache_url] ) keyword[return] keyword[False]
def cached_request(self, request): """ Return a cached response if it exists in the cache, otherwise return False. """ cache_url = self.cache_url(request.url) cc = self.parse_cache_control(request.headers) # non-caching states no_cache = True if 'no-cache' in cc else False if 'max-age' in cc and cc['max-age'] == 0: no_cache = True # depends on [control=['if'], data=[]] # Bail out if no-cache was set if no_cache: return False # depends on [control=['if'], data=[]] # It is in the cache, so lets see if it is going to be # fresh enough resp = self.serializer.loads(request, self.cache.get(cache_url)) # Check to see if we have a cached object if not resp: return False # depends on [control=['if'], data=[]] # If we have a cached 301, return it immediately. We don't # need to test our response for other headers b/c it is # intrinsically "cacheable" as it is Permanent. # See: # https://tools.ietf.org/html/rfc7231#section-6.4.2 # # Client can try to refresh the value by repeating the request # with cache busting headers as usual (ie no-cache). if resp.status == 301: return resp # depends on [control=['if'], data=[]] headers = CaseInsensitiveDict(resp.headers) if not headers or 'date' not in headers: # With date or etag, the cached response can never be used # and should be deleted. if 'etag' not in headers: self.cache.delete(cache_url) # depends on [control=['if'], data=[]] return False # depends on [control=['if'], data=[]] now = time.time() date = calendar.timegm(parsedate_tz(headers['date'])) current_age = max(0, now - date) # TODO: There is an assumption that the result will be a # urllib3 response object. This may not be best since we # could probably avoid instantiating or constructing the # response until we know we need it. resp_cc = self.parse_cache_control(headers) # determine freshness freshness_lifetime = 0 # Check the max-age pragma in the cache control header if 'max-age' in resp_cc and resp_cc['max-age'].isdigit(): freshness_lifetime = int(resp_cc['max-age']) # depends on [control=['if'], data=[]] # If there isn't a max-age, check for an expires header elif 'expires' in headers: expires = parsedate_tz(headers['expires']) if expires is not None: expire_time = calendar.timegm(expires) - date freshness_lifetime = max(0, expire_time) # depends on [control=['if'], data=['expires']] # depends on [control=['if'], data=['headers']] # determine if we are setting freshness limit in the req if 'max-age' in cc: try: freshness_lifetime = int(cc['max-age']) # depends on [control=['try'], data=[]] except ValueError: freshness_lifetime = 0 # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['cc']] if 'min-fresh' in cc: try: min_fresh = int(cc['min-fresh']) # depends on [control=['try'], data=[]] except ValueError: min_fresh = 0 # depends on [control=['except'], data=[]] # adjust our current age by our min fresh current_age += min_fresh # depends on [control=['if'], data=['cc']] # see how fresh we actually are fresh = freshness_lifetime > current_age if fresh: return resp # depends on [control=['if'], data=[]] # we're not fresh. If we don't have an Etag, clear it out if 'etag' not in headers: self.cache.delete(cache_url) # depends on [control=['if'], data=[]] # return the original handler return False
def detectAllKws(self): """ Detect all keyword from infile, return as a list USAGE: kwslist = detectAllKws() """ kwslist = [] for line in self.file_lines: # if line.strip() == '': continue line = ''.join(line.strip().split()) if line.startswith("!"): continue # if ':' in line and not "line" in line: if ':' in line: kw_name = line.split(':')[0] if set(kw_name).difference({'=', '-', '*', '/', '+'}) == set(kw_name): kwslist.append(kw_name) return kwslist
def function[detectAllKws, parameter[self]]: constant[ Detect all keyword from infile, return as a list USAGE: kwslist = detectAllKws() ] variable[kwslist] assign[=] list[[]] for taget[name[line]] in starred[name[self].file_lines] begin[:] variable[line] assign[=] call[constant[].join, parameter[call[call[name[line].strip, parameter[]].split, parameter[]]]] if call[name[line].startswith, parameter[constant[!]]] begin[:] continue if compare[constant[:] in name[line]] begin[:] variable[kw_name] assign[=] call[call[name[line].split, parameter[constant[:]]]][constant[0]] if compare[call[call[name[set], parameter[name[kw_name]]].difference, parameter[<ast.Set object at 0x7da1b09e8760>]] equal[==] call[name[set], parameter[name[kw_name]]]] begin[:] call[name[kwslist].append, parameter[name[kw_name]]] return[name[kwslist]]
keyword[def] identifier[detectAllKws] ( identifier[self] ): literal[string] identifier[kwslist] =[] keyword[for] identifier[line] keyword[in] identifier[self] . identifier[file_lines] : identifier[line] = literal[string] . identifier[join] ( identifier[line] . identifier[strip] (). identifier[split] ()) keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[continue] keyword[if] literal[string] keyword[in] identifier[line] : identifier[kw_name] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ] keyword[if] identifier[set] ( identifier[kw_name] ). identifier[difference] ({ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] })== identifier[set] ( identifier[kw_name] ): identifier[kwslist] . identifier[append] ( identifier[kw_name] ) keyword[return] identifier[kwslist]
def detectAllKws(self): """ Detect all keyword from infile, return as a list USAGE: kwslist = detectAllKws() """ kwslist = [] for line in self.file_lines: # if line.strip() == '': continue line = ''.join(line.strip().split()) if line.startswith('!'): continue # depends on [control=['if'], data=[]] # if ':' in line and not "line" in line: if ':' in line: kw_name = line.split(':')[0] if set(kw_name).difference({'=', '-', '*', '/', '+'}) == set(kw_name): kwslist.append(kw_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']] return kwslist
def get_selection(available, selection, base='/scif/apps'): '''we compare the basename (the exp_id) of the selection and available, regardless of parent directories''' if isinstance(selection, str): selection = selection.split(',') available = [os.path.basename(x) for x in available] selection = [os.path.basename(x) for x in selection] finalset = [x for x in selection if x in available] if len(finalset) == 0: bot.warning("No user experiments selected, providing all %s" %(len(available))) finalset = available return ["%s/%s" %(base,x) for x in finalset]
def function[get_selection, parameter[available, selection, base]]: constant[we compare the basename (the exp_id) of the selection and available, regardless of parent directories] if call[name[isinstance], parameter[name[selection], name[str]]] begin[:] variable[selection] assign[=] call[name[selection].split, parameter[constant[,]]] variable[available] assign[=] <ast.ListComp object at 0x7da2041da650> variable[selection] assign[=] <ast.ListComp object at 0x7da2041da770> variable[finalset] assign[=] <ast.ListComp object at 0x7da2041d8f40> if compare[call[name[len], parameter[name[finalset]]] equal[==] constant[0]] begin[:] call[name[bot].warning, parameter[binary_operation[constant[No user experiments selected, providing all %s] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[available]]]]]] variable[finalset] assign[=] name[available] return[<ast.ListComp object at 0x7da2041db5b0>]
keyword[def] identifier[get_selection] ( identifier[available] , identifier[selection] , identifier[base] = literal[string] ): literal[string] keyword[if] identifier[isinstance] ( identifier[selection] , identifier[str] ): identifier[selection] = identifier[selection] . identifier[split] ( literal[string] ) identifier[available] =[ identifier[os] . identifier[path] . identifier[basename] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[available] ] identifier[selection] =[ identifier[os] . identifier[path] . identifier[basename] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[selection] ] identifier[finalset] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[selection] keyword[if] identifier[x] keyword[in] identifier[available] ] keyword[if] identifier[len] ( identifier[finalset] )== literal[int] : identifier[bot] . identifier[warning] ( literal[string] %( identifier[len] ( identifier[available] ))) identifier[finalset] = identifier[available] keyword[return] [ literal[string] %( identifier[base] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[finalset] ]
def get_selection(available, selection, base='/scif/apps'): """we compare the basename (the exp_id) of the selection and available, regardless of parent directories""" if isinstance(selection, str): selection = selection.split(',') # depends on [control=['if'], data=[]] available = [os.path.basename(x) for x in available] selection = [os.path.basename(x) for x in selection] finalset = [x for x in selection if x in available] if len(finalset) == 0: bot.warning('No user experiments selected, providing all %s' % len(available)) finalset = available # depends on [control=['if'], data=[]] return ['%s/%s' % (base, x) for x in finalset]
def access_service_descriptor(price, consume_endpoint, service_endpoint, timeout, template_id): """ Access service descriptor. :param price: Asset price, int :param consume_endpoint: url of the service provider, str :param service_endpoint: identifier of the service inside the asset DDO, str :param timeout: amount of time in seconds before the agreement expires, int :param template_id: id of the template use to create the service, str :return: Service descriptor. """ return (ServiceTypes.ASSET_ACCESS, {'price': price, 'consumeEndpoint': consume_endpoint, 'serviceEndpoint': service_endpoint, 'timeout': timeout, 'templateId': template_id})
def function[access_service_descriptor, parameter[price, consume_endpoint, service_endpoint, timeout, template_id]]: constant[ Access service descriptor. :param price: Asset price, int :param consume_endpoint: url of the service provider, str :param service_endpoint: identifier of the service inside the asset DDO, str :param timeout: amount of time in seconds before the agreement expires, int :param template_id: id of the template use to create the service, str :return: Service descriptor. ] return[tuple[[<ast.Attribute object at 0x7da18fe90eb0>, <ast.Dict object at 0x7da18fe939a0>]]]
keyword[def] identifier[access_service_descriptor] ( identifier[price] , identifier[consume_endpoint] , identifier[service_endpoint] , identifier[timeout] , identifier[template_id] ): literal[string] keyword[return] ( identifier[ServiceTypes] . identifier[ASSET_ACCESS] , { literal[string] : identifier[price] , literal[string] : identifier[consume_endpoint] , literal[string] : identifier[service_endpoint] , literal[string] : identifier[timeout] , literal[string] : identifier[template_id] })
def access_service_descriptor(price, consume_endpoint, service_endpoint, timeout, template_id): """ Access service descriptor. :param price: Asset price, int :param consume_endpoint: url of the service provider, str :param service_endpoint: identifier of the service inside the asset DDO, str :param timeout: amount of time in seconds before the agreement expires, int :param template_id: id of the template use to create the service, str :return: Service descriptor. """ return (ServiceTypes.ASSET_ACCESS, {'price': price, 'consumeEndpoint': consume_endpoint, 'serviceEndpoint': service_endpoint, 'timeout': timeout, 'templateId': template_id})
def get_physical_plan(cluster, environ, topology, role=None): ''' Get the physical plan state of a topology in a cluster from tracker :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(PHYSICALPLAN_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def function[get_physical_plan, parameter[cluster, environ, topology, role]]: constant[ Get the physical plan state of a topology in a cluster from tracker :param cluster: :param environ: :param topology: :param role: :return: ] variable[params] assign[=] call[name[dict], parameter[]] if compare[name[role] is_not constant[None]] begin[:] call[name[params]][constant[role]] assign[=] name[role] variable[request_url] assign[=] call[name[tornado].httputil.url_concat, parameter[call[name[create_url], parameter[name[PHYSICALPLAN_URL_FMT]]], name[params]]] <ast.Raise object at 0x7da18fe906a0>
keyword[def] identifier[get_physical_plan] ( identifier[cluster] , identifier[environ] , identifier[topology] , identifier[role] = keyword[None] ): literal[string] identifier[params] = identifier[dict] ( identifier[cluster] = identifier[cluster] , identifier[environ] = identifier[environ] , identifier[topology] = identifier[topology] ) keyword[if] identifier[role] keyword[is] keyword[not] keyword[None] : identifier[params] [ literal[string] ]= identifier[role] identifier[request_url] = identifier[tornado] . identifier[httputil] . identifier[url_concat] ( identifier[create_url] ( identifier[PHYSICALPLAN_URL_FMT] ), identifier[params] ) keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] (( keyword[yield] identifier[fetch_url_as_json] ( identifier[request_url] )))
def get_physical_plan(cluster, environ, topology, role=None): """ Get the physical plan state of a topology in a cluster from tracker :param cluster: :param environ: :param topology: :param role: :return: """ params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role # depends on [control=['if'], data=['role']] request_url = tornado.httputil.url_concat(create_url(PHYSICALPLAN_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
def _post_xml(self, *args, **kwargs): """Wrapper around Requests for POST requests Returns: Response: A Requests Response object """ if 'timeout' not in kwargs: kwargs['timeout'] = self.timeout req = self.session_xml.post(*args, **kwargs) return req
def function[_post_xml, parameter[self]]: constant[Wrapper around Requests for POST requests Returns: Response: A Requests Response object ] if compare[constant[timeout] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] call[name[kwargs]][constant[timeout]] assign[=] name[self].timeout variable[req] assign[=] call[name[self].session_xml.post, parameter[<ast.Starred object at 0x7da1b0f42d10>]] return[name[req]]
keyword[def] identifier[_post_xml] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[timeout] identifier[req] = identifier[self] . identifier[session_xml] . identifier[post] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[req]
def _post_xml(self, *args, **kwargs): """Wrapper around Requests for POST requests Returns: Response: A Requests Response object """ if 'timeout' not in kwargs: kwargs['timeout'] = self.timeout # depends on [control=['if'], data=['kwargs']] req = self.session_xml.post(*args, **kwargs) return req
def can_view(self, user): """ Returns True if user has permission to render this view. At minimum this requires an active staff user. If the required_groups attribute is not empty then the user must be a member of at least one of those groups. If there are no required groups set for the view but required groups are set for the bundle then the user must be a member of at least one of those groups. If there are no groups to check this will return True. """ if user.is_staff and user.is_active: if user.is_superuser: return True elif self.required_groups: return self._user_in_groups(user, self.required_groups) elif self.bundle.required_groups: return self._user_in_groups(user, self.bundle.required_groups) else: return True return False
def function[can_view, parameter[self, user]]: constant[ Returns True if user has permission to render this view. At minimum this requires an active staff user. If the required_groups attribute is not empty then the user must be a member of at least one of those groups. If there are no required groups set for the view but required groups are set for the bundle then the user must be a member of at least one of those groups. If there are no groups to check this will return True. ] if <ast.BoolOp object at 0x7da1b0bd0b20> begin[:] if name[user].is_superuser begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[can_view] ( identifier[self] , identifier[user] ): literal[string] keyword[if] identifier[user] . identifier[is_staff] keyword[and] identifier[user] . identifier[is_active] : keyword[if] identifier[user] . identifier[is_superuser] : keyword[return] keyword[True] keyword[elif] identifier[self] . identifier[required_groups] : keyword[return] identifier[self] . identifier[_user_in_groups] ( identifier[user] , identifier[self] . identifier[required_groups] ) keyword[elif] identifier[self] . identifier[bundle] . identifier[required_groups] : keyword[return] identifier[self] . identifier[_user_in_groups] ( identifier[user] , identifier[self] . identifier[bundle] . identifier[required_groups] ) keyword[else] : keyword[return] keyword[True] keyword[return] keyword[False]
def can_view(self, user): """ Returns True if user has permission to render this view. At minimum this requires an active staff user. If the required_groups attribute is not empty then the user must be a member of at least one of those groups. If there are no required groups set for the view but required groups are set for the bundle then the user must be a member of at least one of those groups. If there are no groups to check this will return True. """ if user.is_staff and user.is_active: if user.is_superuser: return True # depends on [control=['if'], data=[]] elif self.required_groups: return self._user_in_groups(user, self.required_groups) # depends on [control=['if'], data=[]] elif self.bundle.required_groups: return self._user_in_groups(user, self.bundle.required_groups) # depends on [control=['if'], data=[]] else: return True # depends on [control=['if'], data=[]] return False
def cumulative_sum(self): """ Return the cumulative sum of the elements in the SArray. Returns an SArray where each element in the output corresponds to the sum of all the elements preceding and including it. The SArray is expected to be of numeric type (int, float), or a numeric vector type. Returns ------- out : sarray[int, float, array.array] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. - For SArray's of type array.array, all entries are expected to be of the same size. Examples -------- >>> sa = SArray([1, 2, 3, 4, 5]) >>> sa.cumulative_sum() dtype: int rows: 3 [1, 3, 6, 10, 15] """ from .. import extensions agg_op = "__builtin__cum_sum__" return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op))
def function[cumulative_sum, parameter[self]]: constant[ Return the cumulative sum of the elements in the SArray. Returns an SArray where each element in the output corresponds to the sum of all the elements preceding and including it. The SArray is expected to be of numeric type (int, float), or a numeric vector type. Returns ------- out : sarray[int, float, array.array] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. - For SArray's of type array.array, all entries are expected to be of the same size. Examples -------- >>> sa = SArray([1, 2, 3, 4, 5]) >>> sa.cumulative_sum() dtype: int rows: 3 [1, 3, 6, 10, 15] ] from relative_module[None] import module[extensions] variable[agg_op] assign[=] constant[__builtin__cum_sum__] return[call[name[SArray], parameter[]]]
keyword[def] identifier[cumulative_sum] ( identifier[self] ): literal[string] keyword[from] .. keyword[import] identifier[extensions] identifier[agg_op] = literal[string] keyword[return] identifier[SArray] ( identifier[_proxy] = identifier[self] . identifier[__proxy__] . identifier[builtin_cumulative_aggregate] ( identifier[agg_op] ))
def cumulative_sum(self): """ Return the cumulative sum of the elements in the SArray. Returns an SArray where each element in the output corresponds to the sum of all the elements preceding and including it. The SArray is expected to be of numeric type (int, float), or a numeric vector type. Returns ------- out : sarray[int, float, array.array] Notes ----- - Missing values are ignored while performing the cumulative aggregate operation. - For SArray's of type array.array, all entries are expected to be of the same size. Examples -------- >>> sa = SArray([1, 2, 3, 4, 5]) >>> sa.cumulative_sum() dtype: int rows: 3 [1, 3, 6, 10, 15] """ from .. import extensions agg_op = '__builtin__cum_sum__' return SArray(_proxy=self.__proxy__.builtin_cumulative_aggregate(agg_op))
def GetVolumeByIndex(self, volume_index): """Retrieves a specific volume based on the index. Args: volume_index (int): index of the volume. Returns: Volume: a volume or None if not available. """ if not self._is_parsed: self._Parse() self._is_parsed = True if volume_index < 0 or volume_index >= len(self._volume_identifiers): return None volume_identifier = self._volume_identifiers[volume_index] return self._volumes[volume_identifier]
def function[GetVolumeByIndex, parameter[self, volume_index]]: constant[Retrieves a specific volume based on the index. Args: volume_index (int): index of the volume. Returns: Volume: a volume or None if not available. ] if <ast.UnaryOp object at 0x7da1b064ccd0> begin[:] call[name[self]._Parse, parameter[]] name[self]._is_parsed assign[=] constant[True] if <ast.BoolOp object at 0x7da1b064e110> begin[:] return[constant[None]] variable[volume_identifier] assign[=] call[name[self]._volume_identifiers][name[volume_index]] return[call[name[self]._volumes][name[volume_identifier]]]
keyword[def] identifier[GetVolumeByIndex] ( identifier[self] , identifier[volume_index] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_is_parsed] : identifier[self] . identifier[_Parse] () identifier[self] . identifier[_is_parsed] = keyword[True] keyword[if] identifier[volume_index] < literal[int] keyword[or] identifier[volume_index] >= identifier[len] ( identifier[self] . identifier[_volume_identifiers] ): keyword[return] keyword[None] identifier[volume_identifier] = identifier[self] . identifier[_volume_identifiers] [ identifier[volume_index] ] keyword[return] identifier[self] . identifier[_volumes] [ identifier[volume_identifier] ]
def GetVolumeByIndex(self, volume_index): """Retrieves a specific volume based on the index. Args: volume_index (int): index of the volume. Returns: Volume: a volume or None if not available. """ if not self._is_parsed: self._Parse() self._is_parsed = True # depends on [control=['if'], data=[]] if volume_index < 0 or volume_index >= len(self._volume_identifiers): return None # depends on [control=['if'], data=[]] volume_identifier = self._volume_identifiers[volume_index] return self._volumes[volume_identifier]
def step2(expnums, ccd, version, prefix=None, dry_run=False, default="WCS"): """run the actual step2 on the given exp/ccd combo""" jmp_trans = ['step2ajmp'] jmp_args = ['step2bjmp'] matt_args = ['step2matt_jmp'] idx = 0 for expnum in expnums: jmp_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8] ) jmp_trans.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8] ) idx += 1 matt_args.append('-f%d' % idx) matt_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix)[0:-9] ) logging.info(util.exec_prog(jmp_trans)) if default == "WCS": logging.info(compute_trans(expnums, ccd, version, prefix, default=default)) logging.info(util.exec_prog(jmp_args)) logging.info(util.exec_prog(matt_args)) ## check that the shifts from step2 are rational check_args = ['checktrans'] if os.access('proc-these-files', os.R_OK): os.unlink('proc-these-files') ptf = open('proc-these-files', 'w') ptf.write("# A dummy file that is created so checktrans could run.\n") ptf.write("# Frame FWHM PSF?\n") for expnum in expnums: filename = os.path.splitext(storage.get_image(expnum, ccd, version=version, prefix=prefix))[0] if not os.access(filename + ".bright.psf", os.R_OK): os.link(filename + ".bright.jmp", filename + ".bright.psf") if not os.access(filename + ".obj.psf", os.R_OK): os.link(filename + ".obj.jmp", filename + ".obj.psf") ptf.write("{:>19s}{:>10.1f}{:>5s}\n".format(filename, _FWHM, "NO")) ptf.close() if os.access('BAD_TRANS', os.F_OK): os.unlink('BAD_TRANS') logging.info(util.exec_prog(check_args)) if os.access('BAD_TRANS', os.F_OK): raise OSError(errno.EBADMSG, 'BAD_TRANS') if os.access('proc-these-files', os.F_OK): os.unlink('proc-these-files') if dry_run: return for expnum in expnums: for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']: uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix) filename = os.path.basename(uri) storage.copy(filename, uri) return
def function[step2, parameter[expnums, ccd, version, prefix, dry_run, default]]: constant[run the actual step2 on the given exp/ccd combo] variable[jmp_trans] assign[=] list[[<ast.Constant object at 0x7da1b19c3c40>]] variable[jmp_args] assign[=] list[[<ast.Constant object at 0x7da1b19c3b80>]] variable[matt_args] assign[=] list[[<ast.Constant object at 0x7da1b19c3ac0>]] variable[idx] assign[=] constant[0] for taget[name[expnum]] in starred[name[expnums]] begin[:] call[name[jmp_args].append, parameter[call[call[name[storage].get_file, parameter[name[expnum]]]][<ast.Slice object at 0x7da1b19c3610>]]] call[name[jmp_trans].append, parameter[call[call[name[storage].get_file, parameter[name[expnum]]]][<ast.Slice object at 0x7da1b19c31f0>]]] <ast.AugAssign object at 0x7da1b19c3100> call[name[matt_args].append, parameter[binary_operation[constant[-f%d] <ast.Mod object at 0x7da2590d6920> name[idx]]]] call[name[matt_args].append, parameter[call[call[name[storage].get_file, parameter[name[expnum]]]][<ast.Slice object at 0x7da1b19c2bc0>]]] call[name[logging].info, parameter[call[name[util].exec_prog, parameter[name[jmp_trans]]]]] if compare[name[default] equal[==] constant[WCS]] begin[:] call[name[logging].info, parameter[call[name[compute_trans], parameter[name[expnums], name[ccd], name[version], name[prefix]]]]] call[name[logging].info, parameter[call[name[util].exec_prog, parameter[name[jmp_args]]]]] call[name[logging].info, parameter[call[name[util].exec_prog, parameter[name[matt_args]]]]] variable[check_args] assign[=] list[[<ast.Constant object at 0x7da1b19c2200>]] if call[name[os].access, parameter[constant[proc-these-files], name[os].R_OK]] begin[:] call[name[os].unlink, parameter[constant[proc-these-files]]] variable[ptf] assign[=] call[name[open], parameter[constant[proc-these-files], constant[w]]] call[name[ptf].write, parameter[constant[# A dummy file that is created so checktrans could run. ]]] call[name[ptf].write, parameter[constant[# Frame FWHM PSF? ]]] for taget[name[expnum]] in starred[name[expnums]] begin[:] variable[filename] assign[=] call[call[name[os].path.splitext, parameter[call[name[storage].get_image, parameter[name[expnum], name[ccd]]]]]][constant[0]] if <ast.UnaryOp object at 0x7da1b19c1810> begin[:] call[name[os].link, parameter[binary_operation[name[filename] + constant[.bright.jmp]], binary_operation[name[filename] + constant[.bright.psf]]]] if <ast.UnaryOp object at 0x7da1b19c1420> begin[:] call[name[os].link, parameter[binary_operation[name[filename] + constant[.obj.jmp]], binary_operation[name[filename] + constant[.obj.psf]]]] call[name[ptf].write, parameter[call[constant[{:>19s}{:>10.1f}{:>5s} ].format, parameter[name[filename], name[_FWHM], constant[NO]]]]] call[name[ptf].close, parameter[]] if call[name[os].access, parameter[constant[BAD_TRANS], name[os].F_OK]] begin[:] call[name[os].unlink, parameter[constant[BAD_TRANS]]] call[name[logging].info, parameter[call[name[util].exec_prog, parameter[name[check_args]]]]] if call[name[os].access, parameter[constant[BAD_TRANS], name[os].F_OK]] begin[:] <ast.Raise object at 0x7da1b1950850> if call[name[os].access, parameter[constant[proc-these-files], name[os].F_OK]] begin[:] call[name[os].unlink, parameter[constant[proc-these-files]]] if name[dry_run] begin[:] return[None] for taget[name[expnum]] in starred[name[expnums]] begin[:] for taget[name[ext]] in starred[list[[<ast.Constant object at 0x7da1b1953df0>, <ast.Constant object at 0x7da1b1952200>, <ast.Constant object at 0x7da1b1952b60>]]] begin[:] variable[uri] assign[=] call[name[storage].dbimages_uri, parameter[name[expnum]]] variable[filename] assign[=] call[name[os].path.basename, parameter[name[uri]]] call[name[storage].copy, parameter[name[filename], name[uri]]] return[None]
keyword[def] identifier[step2] ( identifier[expnums] , identifier[ccd] , identifier[version] , identifier[prefix] = keyword[None] , identifier[dry_run] = keyword[False] , identifier[default] = literal[string] ): literal[string] identifier[jmp_trans] =[ literal[string] ] identifier[jmp_args] =[ literal[string] ] identifier[matt_args] =[ literal[string] ] identifier[idx] = literal[int] keyword[for] identifier[expnum] keyword[in] identifier[expnums] : identifier[jmp_args] . identifier[append] ( identifier[storage] . identifier[get_file] ( identifier[expnum] , identifier[ccd] = identifier[ccd] , identifier[version] = identifier[version] , identifier[ext] = literal[string] , identifier[prefix] = identifier[prefix] )[ literal[int] :- literal[int] ] ) identifier[jmp_trans] . identifier[append] ( identifier[storage] . identifier[get_file] ( identifier[expnum] , identifier[ccd] = identifier[ccd] , identifier[version] = identifier[version] , identifier[ext] = literal[string] , identifier[prefix] = identifier[prefix] )[ literal[int] :- literal[int] ] ) identifier[idx] += literal[int] identifier[matt_args] . identifier[append] ( literal[string] % identifier[idx] ) identifier[matt_args] . identifier[append] ( identifier[storage] . identifier[get_file] ( identifier[expnum] , identifier[ccd] = identifier[ccd] , identifier[version] = identifier[version] , identifier[ext] = literal[string] , identifier[prefix] = identifier[prefix] )[ literal[int] :- literal[int] ] ) identifier[logging] . identifier[info] ( identifier[util] . identifier[exec_prog] ( identifier[jmp_trans] )) keyword[if] identifier[default] == literal[string] : identifier[logging] . identifier[info] ( identifier[compute_trans] ( identifier[expnums] , identifier[ccd] , identifier[version] , identifier[prefix] , identifier[default] = identifier[default] )) identifier[logging] . identifier[info] ( identifier[util] . identifier[exec_prog] ( identifier[jmp_args] )) identifier[logging] . identifier[info] ( identifier[util] . identifier[exec_prog] ( identifier[matt_args] )) identifier[check_args] =[ literal[string] ] keyword[if] identifier[os] . identifier[access] ( literal[string] , identifier[os] . identifier[R_OK] ): identifier[os] . identifier[unlink] ( literal[string] ) identifier[ptf] = identifier[open] ( literal[string] , literal[string] ) identifier[ptf] . identifier[write] ( literal[string] ) identifier[ptf] . identifier[write] ( literal[string] ) keyword[for] identifier[expnum] keyword[in] identifier[expnums] : identifier[filename] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[storage] . identifier[get_image] ( identifier[expnum] , identifier[ccd] , identifier[version] = identifier[version] , identifier[prefix] = identifier[prefix] ))[ literal[int] ] keyword[if] keyword[not] identifier[os] . identifier[access] ( identifier[filename] + literal[string] , identifier[os] . identifier[R_OK] ): identifier[os] . identifier[link] ( identifier[filename] + literal[string] , identifier[filename] + literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[access] ( identifier[filename] + literal[string] , identifier[os] . identifier[R_OK] ): identifier[os] . identifier[link] ( identifier[filename] + literal[string] , identifier[filename] + literal[string] ) identifier[ptf] . identifier[write] ( literal[string] . identifier[format] ( identifier[filename] , identifier[_FWHM] , literal[string] )) identifier[ptf] . identifier[close] () keyword[if] identifier[os] . identifier[access] ( literal[string] , identifier[os] . identifier[F_OK] ): identifier[os] . identifier[unlink] ( literal[string] ) identifier[logging] . identifier[info] ( identifier[util] . identifier[exec_prog] ( identifier[check_args] )) keyword[if] identifier[os] . identifier[access] ( literal[string] , identifier[os] . identifier[F_OK] ): keyword[raise] identifier[OSError] ( identifier[errno] . identifier[EBADMSG] , literal[string] ) keyword[if] identifier[os] . identifier[access] ( literal[string] , identifier[os] . identifier[F_OK] ): identifier[os] . identifier[unlink] ( literal[string] ) keyword[if] identifier[dry_run] : keyword[return] keyword[for] identifier[expnum] keyword[in] identifier[expnums] : keyword[for] identifier[ext] keyword[in] [ literal[string] , literal[string] , literal[string] ]: identifier[uri] = identifier[storage] . identifier[dbimages_uri] ( identifier[expnum] , identifier[ccd] = identifier[ccd] , identifier[version] = identifier[version] , identifier[ext] = identifier[ext] , identifier[prefix] = identifier[prefix] ) identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[uri] ) identifier[storage] . identifier[copy] ( identifier[filename] , identifier[uri] ) keyword[return]
def step2(expnums, ccd, version, prefix=None, dry_run=False, default='WCS'): """run the actual step2 on the given exp/ccd combo""" jmp_trans = ['step2ajmp'] jmp_args = ['step2bjmp'] matt_args = ['step2matt_jmp'] idx = 0 for expnum in expnums: jmp_args.append(storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]) jmp_trans.append(storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8]) idx += 1 matt_args.append('-f%d' % idx) matt_args.append(storage.get_file(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix)[0:-9]) # depends on [control=['for'], data=['expnum']] logging.info(util.exec_prog(jmp_trans)) if default == 'WCS': logging.info(compute_trans(expnums, ccd, version, prefix, default=default)) # depends on [control=['if'], data=['default']] logging.info(util.exec_prog(jmp_args)) logging.info(util.exec_prog(matt_args)) ## check that the shifts from step2 are rational check_args = ['checktrans'] if os.access('proc-these-files', os.R_OK): os.unlink('proc-these-files') # depends on [control=['if'], data=[]] ptf = open('proc-these-files', 'w') ptf.write('# A dummy file that is created so checktrans could run.\n') ptf.write('# Frame FWHM PSF?\n') for expnum in expnums: filename = os.path.splitext(storage.get_image(expnum, ccd, version=version, prefix=prefix))[0] if not os.access(filename + '.bright.psf', os.R_OK): os.link(filename + '.bright.jmp', filename + '.bright.psf') # depends on [control=['if'], data=[]] if not os.access(filename + '.obj.psf', os.R_OK): os.link(filename + '.obj.jmp', filename + '.obj.psf') # depends on [control=['if'], data=[]] ptf.write('{:>19s}{:>10.1f}{:>5s}\n'.format(filename, _FWHM, 'NO')) # depends on [control=['for'], data=['expnum']] ptf.close() if os.access('BAD_TRANS', os.F_OK): os.unlink('BAD_TRANS') # depends on [control=['if'], data=[]] logging.info(util.exec_prog(check_args)) if os.access('BAD_TRANS', os.F_OK): raise OSError(errno.EBADMSG, 'BAD_TRANS') # depends on [control=['if'], data=[]] if os.access('proc-these-files', os.F_OK): os.unlink('proc-these-files') # depends on [control=['if'], data=[]] if dry_run: return # depends on [control=['if'], data=[]] for expnum in expnums: for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']: uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix) filename = os.path.basename(uri) storage.copy(filename, uri) # depends on [control=['for'], data=['ext']] # depends on [control=['for'], data=['expnum']] return
def _make_parser(**kwargs): """ :return: (keyword args to be used, parser object) """ # Optional arguements for configparser.SafeConfigParser{,readfp} kwargs_0 = filter_options(("defaults", "dict_type", "allow_no_value"), kwargs) kwargs_1 = filter_options(("filename", ), kwargs) try: parser = configparser.SafeConfigParser(**kwargs_0) except TypeError: # .. note:: # It seems ConfigParser.*ConfigParser in python 2.6 does not support # 'allow_no_value' option parameter, and TypeError will be thrown. kwargs_0 = filter_options(("defaults", "dict_type"), kwargs) parser = configparser.SafeConfigParser(**kwargs_0) return (kwargs_1, parser)
def function[_make_parser, parameter[]]: constant[ :return: (keyword args to be used, parser object) ] variable[kwargs_0] assign[=] call[name[filter_options], parameter[tuple[[<ast.Constant object at 0x7da18fe90820>, <ast.Constant object at 0x7da18fe91ba0>, <ast.Constant object at 0x7da18fe90580>]], name[kwargs]]] variable[kwargs_1] assign[=] call[name[filter_options], parameter[tuple[[<ast.Constant object at 0x7da18fe90190>]], name[kwargs]]] <ast.Try object at 0x7da1b0544370> return[tuple[[<ast.Name object at 0x7da18fe90430>, <ast.Name object at 0x7da18fe935b0>]]]
keyword[def] identifier[_make_parser] (** identifier[kwargs] ): literal[string] identifier[kwargs_0] = identifier[filter_options] (( literal[string] , literal[string] , literal[string] ), identifier[kwargs] ) identifier[kwargs_1] = identifier[filter_options] (( literal[string] ,), identifier[kwargs] ) keyword[try] : identifier[parser] = identifier[configparser] . identifier[SafeConfigParser] (** identifier[kwargs_0] ) keyword[except] identifier[TypeError] : identifier[kwargs_0] = identifier[filter_options] (( literal[string] , literal[string] ), identifier[kwargs] ) identifier[parser] = identifier[configparser] . identifier[SafeConfigParser] (** identifier[kwargs_0] ) keyword[return] ( identifier[kwargs_1] , identifier[parser] )
def _make_parser(**kwargs): """ :return: (keyword args to be used, parser object) """ # Optional arguements for configparser.SafeConfigParser{,readfp} kwargs_0 = filter_options(('defaults', 'dict_type', 'allow_no_value'), kwargs) kwargs_1 = filter_options(('filename',), kwargs) try: parser = configparser.SafeConfigParser(**kwargs_0) # depends on [control=['try'], data=[]] except TypeError: # .. note:: # It seems ConfigParser.*ConfigParser in python 2.6 does not support # 'allow_no_value' option parameter, and TypeError will be thrown. kwargs_0 = filter_options(('defaults', 'dict_type'), kwargs) parser = configparser.SafeConfigParser(**kwargs_0) # depends on [control=['except'], data=[]] return (kwargs_1, parser)
def mount(self, prefix, app, **options): ''' Mount an application (:class:`Bottle` or plain WSGI) to a specific URL prefix. Example:: root_app.mount('/admin/', admin_app) :param prefix: path prefix or `mount-point`. If it ends in a slash, that slash is mandatory. :param app: an instance of :class:`Bottle` or a WSGI application. All other parameters are passed to the underlying :meth:`route` call. ''' if isinstance(app, basestring): depr('Parameter order of Bottle.mount() changed.', True) # 0.10 segments = [p for p in prefix.split('/') if p] if not segments: raise ValueError('Empty path prefix.') path_depth = len(segments) def mountpoint_wrapper(): try: request.path_shift(path_depth) rs = HTTPResponse([]) def start_response(status, headerlist, exc_info=None): if exc_info: try: _raise(*exc_info) finally: exc_info = None rs.status = status for name, value in headerlist: rs.add_header(name, value) return rs.body.append body = app(request.environ, start_response) if body and rs.body: body = itertools.chain(rs.body, body) rs.body = body or rs.body return rs finally: request.path_shift(-path_depth) options.setdefault('skip', True) options.setdefault('method', 'PROXY') options.setdefault('mountpoint', {'prefix': prefix, 'target': app}) options['callback'] = mountpoint_wrapper self.route('/%s/<:re:.*>' % '/'.join(segments), **options) if not prefix.endswith('/'): self.route('/' + '/'.join(segments), **options)
def function[mount, parameter[self, prefix, app]]: constant[ Mount an application (:class:`Bottle` or plain WSGI) to a specific URL prefix. Example:: root_app.mount('/admin/', admin_app) :param prefix: path prefix or `mount-point`. If it ends in a slash, that slash is mandatory. :param app: an instance of :class:`Bottle` or a WSGI application. All other parameters are passed to the underlying :meth:`route` call. ] if call[name[isinstance], parameter[name[app], name[basestring]]] begin[:] call[name[depr], parameter[constant[Parameter order of Bottle.mount() changed.], constant[True]]] variable[segments] assign[=] <ast.ListComp object at 0x7da1b18393c0> if <ast.UnaryOp object at 0x7da1b2344070> begin[:] <ast.Raise object at 0x7da1b2345ba0> variable[path_depth] assign[=] call[name[len], parameter[name[segments]]] def function[mountpoint_wrapper, parameter[]]: <ast.Try object at 0x7da1b2345540> call[name[options].setdefault, parameter[constant[skip], constant[True]]] call[name[options].setdefault, parameter[constant[method], constant[PROXY]]] call[name[options].setdefault, parameter[constant[mountpoint], dictionary[[<ast.Constant object at 0x7da1b183b910>, <ast.Constant object at 0x7da1b183b8e0>], [<ast.Name object at 0x7da1b1839ea0>, <ast.Name object at 0x7da1b1839f30>]]]] call[name[options]][constant[callback]] assign[=] name[mountpoint_wrapper] call[name[self].route, parameter[binary_operation[constant[/%s/<:re:.*>] <ast.Mod object at 0x7da2590d6920> call[constant[/].join, parameter[name[segments]]]]]] if <ast.UnaryOp object at 0x7da1b1838d30> begin[:] call[name[self].route, parameter[binary_operation[constant[/] + call[constant[/].join, parameter[name[segments]]]]]]
keyword[def] identifier[mount] ( identifier[self] , identifier[prefix] , identifier[app] ,** identifier[options] ): literal[string] keyword[if] identifier[isinstance] ( identifier[app] , identifier[basestring] ): identifier[depr] ( literal[string] , keyword[True] ) identifier[segments] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[prefix] . identifier[split] ( literal[string] ) keyword[if] identifier[p] ] keyword[if] keyword[not] identifier[segments] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[path_depth] = identifier[len] ( identifier[segments] ) keyword[def] identifier[mountpoint_wrapper] (): keyword[try] : identifier[request] . identifier[path_shift] ( identifier[path_depth] ) identifier[rs] = identifier[HTTPResponse] ([]) keyword[def] identifier[start_response] ( identifier[status] , identifier[headerlist] , identifier[exc_info] = keyword[None] ): keyword[if] identifier[exc_info] : keyword[try] : identifier[_raise] (* identifier[exc_info] ) keyword[finally] : identifier[exc_info] = keyword[None] identifier[rs] . identifier[status] = identifier[status] keyword[for] identifier[name] , identifier[value] keyword[in] identifier[headerlist] : identifier[rs] . identifier[add_header] ( identifier[name] , identifier[value] ) keyword[return] identifier[rs] . identifier[body] . identifier[append] identifier[body] = identifier[app] ( identifier[request] . identifier[environ] , identifier[start_response] ) keyword[if] identifier[body] keyword[and] identifier[rs] . identifier[body] : identifier[body] = identifier[itertools] . identifier[chain] ( identifier[rs] . identifier[body] , identifier[body] ) identifier[rs] . identifier[body] = identifier[body] keyword[or] identifier[rs] . identifier[body] keyword[return] identifier[rs] keyword[finally] : identifier[request] . identifier[path_shift] (- identifier[path_depth] ) identifier[options] . identifier[setdefault] ( literal[string] , keyword[True] ) identifier[options] . identifier[setdefault] ( literal[string] , literal[string] ) identifier[options] . identifier[setdefault] ( literal[string] ,{ literal[string] : identifier[prefix] , literal[string] : identifier[app] }) identifier[options] [ literal[string] ]= identifier[mountpoint_wrapper] identifier[self] . identifier[route] ( literal[string] % literal[string] . identifier[join] ( identifier[segments] ),** identifier[options] ) keyword[if] keyword[not] identifier[prefix] . identifier[endswith] ( literal[string] ): identifier[self] . identifier[route] ( literal[string] + literal[string] . identifier[join] ( identifier[segments] ),** identifier[options] )
def mount(self, prefix, app, **options): """ Mount an application (:class:`Bottle` or plain WSGI) to a specific URL prefix. Example:: root_app.mount('/admin/', admin_app) :param prefix: path prefix or `mount-point`. If it ends in a slash, that slash is mandatory. :param app: an instance of :class:`Bottle` or a WSGI application. All other parameters are passed to the underlying :meth:`route` call. """ if isinstance(app, basestring): depr('Parameter order of Bottle.mount() changed.', True) # 0.10 # depends on [control=['if'], data=[]] segments = [p for p in prefix.split('/') if p] if not segments: raise ValueError('Empty path prefix.') # depends on [control=['if'], data=[]] path_depth = len(segments) def mountpoint_wrapper(): try: request.path_shift(path_depth) rs = HTTPResponse([]) def start_response(status, headerlist, exc_info=None): if exc_info: try: _raise(*exc_info) # depends on [control=['try'], data=[]] finally: exc_info = None # depends on [control=['if'], data=[]] rs.status = status for (name, value) in headerlist: rs.add_header(name, value) # depends on [control=['for'], data=[]] return rs.body.append body = app(request.environ, start_response) if body and rs.body: body = itertools.chain(rs.body, body) # depends on [control=['if'], data=[]] rs.body = body or rs.body return rs # depends on [control=['try'], data=[]] finally: request.path_shift(-path_depth) options.setdefault('skip', True) options.setdefault('method', 'PROXY') options.setdefault('mountpoint', {'prefix': prefix, 'target': app}) options['callback'] = mountpoint_wrapper self.route('/%s/<:re:.*>' % '/'.join(segments), **options) if not prefix.endswith('/'): self.route('/' + '/'.join(segments), **options) # depends on [control=['if'], data=[]]
def exec_module(self, module): """Execute the module using the old imp.""" path = [os.path.dirname(module.__file__)] # file should have been resolved before (module creation) file = None try: file, pathname, description = imp.find_module(module.__name__.rpartition('.')[-1], path) module = imp.load_module(module.__name__, file, pathname, description) finally: if file: file.close()
def function[exec_module, parameter[self, module]]: constant[Execute the module using the old imp.] variable[path] assign[=] list[[<ast.Call object at 0x7da18c4cddb0>]] variable[file] assign[=] constant[None] <ast.Try object at 0x7da18c4ce950>
keyword[def] identifier[exec_module] ( identifier[self] , identifier[module] ): literal[string] identifier[path] =[ identifier[os] . identifier[path] . identifier[dirname] ( identifier[module] . identifier[__file__] )] identifier[file] = keyword[None] keyword[try] : identifier[file] , identifier[pathname] , identifier[description] = identifier[imp] . identifier[find_module] ( identifier[module] . identifier[__name__] . identifier[rpartition] ( literal[string] )[- literal[int] ], identifier[path] ) identifier[module] = identifier[imp] . identifier[load_module] ( identifier[module] . identifier[__name__] , identifier[file] , identifier[pathname] , identifier[description] ) keyword[finally] : keyword[if] identifier[file] : identifier[file] . identifier[close] ()
def exec_module(self, module): """Execute the module using the old imp.""" path = [os.path.dirname(module.__file__)] # file should have been resolved before (module creation) file = None try: (file, pathname, description) = imp.find_module(module.__name__.rpartition('.')[-1], path) module = imp.load_module(module.__name__, file, pathname, description) # depends on [control=['try'], data=[]] finally: if file: file.close() # depends on [control=['if'], data=[]]
def _read_byte(self, address): """Read a byte from memory. """ # Initialize memory location with a random value. if address not in self._memory: self._memory[address] = random.randint(0x00, 0xff) return self._memory[address]
def function[_read_byte, parameter[self, address]]: constant[Read a byte from memory. ] if compare[name[address] <ast.NotIn object at 0x7da2590d7190> name[self]._memory] begin[:] call[name[self]._memory][name[address]] assign[=] call[name[random].randint, parameter[constant[0], constant[255]]] return[call[name[self]._memory][name[address]]]
keyword[def] identifier[_read_byte] ( identifier[self] , identifier[address] ): literal[string] keyword[if] identifier[address] keyword[not] keyword[in] identifier[self] . identifier[_memory] : identifier[self] . identifier[_memory] [ identifier[address] ]= identifier[random] . identifier[randint] ( literal[int] , literal[int] ) keyword[return] identifier[self] . identifier[_memory] [ identifier[address] ]
def _read_byte(self, address): """Read a byte from memory. """ # Initialize memory location with a random value. if address not in self._memory: self._memory[address] = random.randint(0, 255) # depends on [control=['if'], data=['address']] return self._memory[address]
def _find_symbol(self, module, name, fallback=None): """ Find the symbol of the specified name inside the module or raise an exception. """ if not hasattr(module, name) and fallback: return self._find_symbol(module, fallback, None) return getattr(module, name)
def function[_find_symbol, parameter[self, module, name, fallback]]: constant[ Find the symbol of the specified name inside the module or raise an exception. ] if <ast.BoolOp object at 0x7da18ede75b0> begin[:] return[call[name[self]._find_symbol, parameter[name[module], name[fallback], constant[None]]]] return[call[name[getattr], parameter[name[module], name[name]]]]
keyword[def] identifier[_find_symbol] ( identifier[self] , identifier[module] , identifier[name] , identifier[fallback] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[module] , identifier[name] ) keyword[and] identifier[fallback] : keyword[return] identifier[self] . identifier[_find_symbol] ( identifier[module] , identifier[fallback] , keyword[None] ) keyword[return] identifier[getattr] ( identifier[module] , identifier[name] )
def _find_symbol(self, module, name, fallback=None): """ Find the symbol of the specified name inside the module or raise an exception. """ if not hasattr(module, name) and fallback: return self._find_symbol(module, fallback, None) # depends on [control=['if'], data=[]] return getattr(module, name)
def get_upgrade(self, using=None, **kwargs): """ Monitor how much of the index is upgraded. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_upgrade`` unchanged. """ return self._get_connection(using).indices.get_upgrade(index=self._name, **kwargs)
def function[get_upgrade, parameter[self, using]]: constant[ Monitor how much of the index is upgraded. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_upgrade`` unchanged. ] return[call[call[name[self]._get_connection, parameter[name[using]]].indices.get_upgrade, parameter[]]]
keyword[def] identifier[get_upgrade] ( identifier[self] , identifier[using] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[_get_connection] ( identifier[using] ). identifier[indices] . identifier[get_upgrade] ( identifier[index] = identifier[self] . identifier[_name] ,** identifier[kwargs] )
def get_upgrade(self, using=None, **kwargs): """ Monitor how much of the index is upgraded. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_upgrade`` unchanged. """ return self._get_connection(using).indices.get_upgrade(index=self._name, **kwargs)
def _parse_body(self, body): """ For just call a deserializer for FORMAT""" if is_python3(): return json.loads(body.decode('UTF-8')) else: return json.loads(body)
def function[_parse_body, parameter[self, body]]: constant[ For just call a deserializer for FORMAT] if call[name[is_python3], parameter[]] begin[:] return[call[name[json].loads, parameter[call[name[body].decode, parameter[constant[UTF-8]]]]]]
keyword[def] identifier[_parse_body] ( identifier[self] , identifier[body] ): literal[string] keyword[if] identifier[is_python3] (): keyword[return] identifier[json] . identifier[loads] ( identifier[body] . identifier[decode] ( literal[string] )) keyword[else] : keyword[return] identifier[json] . identifier[loads] ( identifier[body] )
def _parse_body(self, body): """ For just call a deserializer for FORMAT""" if is_python3(): return json.loads(body.decode('UTF-8')) # depends on [control=['if'], data=[]] else: return json.loads(body)
def get_container_file_download_url(cluster, environ, topology, container, path, role=None): ''' :param cluster: :param environ: :param topology: :param container: :param path: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(FILE_DOWNLOAD_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) return request_url
def function[get_container_file_download_url, parameter[cluster, environ, topology, container, path, role]]: constant[ :param cluster: :param environ: :param topology: :param container: :param path: :param role: :return: ] variable[params] assign[=] call[name[dict], parameter[]] if compare[name[role] is_not constant[None]] begin[:] call[name[params]][constant[role]] assign[=] name[role] variable[request_url] assign[=] call[name[tornado].httputil.url_concat, parameter[call[name[create_url], parameter[name[FILE_DOWNLOAD_URL_FMT]]], name[params]]] if compare[name[role] is_not constant[None]] begin[:] variable[request_url] assign[=] call[name[tornado].httputil.url_concat, parameter[name[request_url], call[name[dict], parameter[]]]] return[name[request_url]]
keyword[def] identifier[get_container_file_download_url] ( identifier[cluster] , identifier[environ] , identifier[topology] , identifier[container] , identifier[path] , identifier[role] = keyword[None] ): literal[string] identifier[params] = identifier[dict] ( identifier[cluster] = identifier[cluster] , identifier[environ] = identifier[environ] , identifier[topology] = identifier[topology] , identifier[container] = identifier[container] , identifier[path] = identifier[path] ) keyword[if] identifier[role] keyword[is] keyword[not] keyword[None] : identifier[params] [ literal[string] ]= identifier[role] identifier[request_url] = identifier[tornado] . identifier[httputil] . identifier[url_concat] ( identifier[create_url] ( identifier[FILE_DOWNLOAD_URL_FMT] ), identifier[params] ) keyword[if] identifier[role] keyword[is] keyword[not] keyword[None] : identifier[request_url] = identifier[tornado] . identifier[httputil] . identifier[url_concat] ( identifier[request_url] , identifier[dict] ( identifier[role] = identifier[role] )) keyword[return] identifier[request_url]
def get_container_file_download_url(cluster, environ, topology, container, path, role=None): """ :param cluster: :param environ: :param topology: :param container: :param path: :param role: :return: """ params = dict(cluster=cluster, environ=environ, topology=topology, container=container, path=path) if role is not None: params['role'] = role # depends on [control=['if'], data=['role']] request_url = tornado.httputil.url_concat(create_url(FILE_DOWNLOAD_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) # depends on [control=['if'], data=['role']] return request_url
def send_recv(self, message, timeout=10.0): """Send a message to a PandABox and wait for the response Args: message (str): The message to send timeout (float): How long to wait before raising queue.Empty Returns: str: The response """ response_queue = self.send(message) response = self.recv(response_queue, timeout) return response
def function[send_recv, parameter[self, message, timeout]]: constant[Send a message to a PandABox and wait for the response Args: message (str): The message to send timeout (float): How long to wait before raising queue.Empty Returns: str: The response ] variable[response_queue] assign[=] call[name[self].send, parameter[name[message]]] variable[response] assign[=] call[name[self].recv, parameter[name[response_queue], name[timeout]]] return[name[response]]
keyword[def] identifier[send_recv] ( identifier[self] , identifier[message] , identifier[timeout] = literal[int] ): literal[string] identifier[response_queue] = identifier[self] . identifier[send] ( identifier[message] ) identifier[response] = identifier[self] . identifier[recv] ( identifier[response_queue] , identifier[timeout] ) keyword[return] identifier[response]
def send_recv(self, message, timeout=10.0): """Send a message to a PandABox and wait for the response Args: message (str): The message to send timeout (float): How long to wait before raising queue.Empty Returns: str: The response """ response_queue = self.send(message) response = self.recv(response_queue, timeout) return response
def config_string(self): """ Build the storable string corresponding to this configuration object. :rtype: string """ return (gc.CONFIG_STRING_SEPARATOR_SYMBOL).join( [u"%s%s%s" % (fn, gc.CONFIG_STRING_ASSIGNMENT_SYMBOL, self.data[fn]) for fn in sorted(self.data.keys()) if self.data[fn] is not None] )
def function[config_string, parameter[self]]: constant[ Build the storable string corresponding to this configuration object. :rtype: string ] return[call[name[gc].CONFIG_STRING_SEPARATOR_SYMBOL.join, parameter[<ast.ListComp object at 0x7da1b18fb2b0>]]]
keyword[def] identifier[config_string] ( identifier[self] ): literal[string] keyword[return] ( identifier[gc] . identifier[CONFIG_STRING_SEPARATOR_SYMBOL] ). identifier[join] ( [ literal[string] %( identifier[fn] , identifier[gc] . identifier[CONFIG_STRING_ASSIGNMENT_SYMBOL] , identifier[self] . identifier[data] [ identifier[fn] ]) keyword[for] identifier[fn] keyword[in] identifier[sorted] ( identifier[self] . identifier[data] . identifier[keys] ()) keyword[if] identifier[self] . identifier[data] [ identifier[fn] ] keyword[is] keyword[not] keyword[None] ] )
def config_string(self): """ Build the storable string corresponding to this configuration object. :rtype: string """ return gc.CONFIG_STRING_SEPARATOR_SYMBOL.join([u'%s%s%s' % (fn, gc.CONFIG_STRING_ASSIGNMENT_SYMBOL, self.data[fn]) for fn in sorted(self.data.keys()) if self.data[fn] is not None])
def is_fifo(self): """ Whether this path is a FIFO. """ try: return S_ISFIFO(self.stat().st_mode) except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False
def function[is_fifo, parameter[self]]: constant[ Whether this path is a FIFO. ] <ast.Try object at 0x7da2041d8a90>
keyword[def] identifier[is_fifo] ( identifier[self] ): literal[string] keyword[try] : keyword[return] identifier[S_ISFIFO] ( identifier[self] . identifier[stat] (). identifier[st_mode] ) keyword[except] identifier[OSError] keyword[as] identifier[e] : keyword[if] identifier[e] . identifier[errno] keyword[not] keyword[in] ( identifier[ENOENT] , identifier[ENOTDIR] ): keyword[raise] keyword[return] keyword[False]
def is_fifo(self): """ Whether this path is a FIFO. """ try: return S_ISFIFO(self.stat().st_mode) # depends on [control=['try'], data=[]] except OSError as e: if e.errno not in (ENOENT, ENOTDIR): raise # depends on [control=['if'], data=[]] # Path doesn't exist or is a broken symlink # (see https://bitbucket.org/pitrou/pathlib/issue/12/) return False # depends on [control=['except'], data=['e']]
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path): """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here) """ import re import numpy as np print("Loading weights...") names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8')) shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8')) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] # This was used when we had a single embedding matrix for positions and tokens # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0) # del init_params[1] init_params = [arr.squeeze() for arr in init_params] try: assert model.tokens_embed.weight.shape == init_params[1].shape assert model.positions_embed.weight.shape == init_params[0].shape except AssertionError as e: e.args += (model.tokens_embed.weight.shape, init_params[1].shape) e.args += (model.positions_embed.weight.shape, init_params[0].shape) raise model.tokens_embed.weight.data = torch.from_numpy(init_params[1]) model.positions_embed.weight.data = torch.from_numpy(init_params[0]) names.pop(0) # Pop position and token embedding arrays init_params.pop(0) init_params.pop(0) for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]): name = name[6:] # skip "model/" assert name[-2:] == ":0" name = name[:-2] name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'w': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
def function[load_tf_weights_in_openai_gpt, parameter[model, openai_checkpoint_folder_path]]: constant[ Load tf pre-trained weights in a pytorch model (from NumPy arrays here) ] import module[re] import module[numpy] as alias[np] call[name[print], parameter[constant[Loading weights...]]] variable[names] assign[=] call[name[json].load, parameter[call[name[open], parameter[binary_operation[name[openai_checkpoint_folder_path] + constant[/parameters_names.json]], constant[r]]]]] variable[shapes] assign[=] call[name[json].load, parameter[call[name[open], parameter[binary_operation[name[openai_checkpoint_folder_path] + constant[/params_shapes.json]], constant[r]]]]] variable[offsets] assign[=] call[name[np].cumsum, parameter[<ast.ListComp object at 0x7da207f98ac0>]] variable[init_params] assign[=] <ast.ListComp object at 0x7da207f9ae60> variable[init_params] assign[=] call[call[name[np].split, parameter[call[name[np].concatenate, parameter[name[init_params], constant[0]]], name[offsets]]]][<ast.Slice object at 0x7da18eb57430>] variable[init_params] assign[=] <ast.ListComp object at 0x7da18eb57fd0> variable[init_params] assign[=] <ast.ListComp object at 0x7da18eb56380> <ast.Try object at 0x7da18eb55ff0> name[model].tokens_embed.weight.data assign[=] call[name[torch].from_numpy, parameter[call[name[init_params]][constant[1]]]] name[model].positions_embed.weight.data assign[=] call[name[torch].from_numpy, parameter[call[name[init_params]][constant[0]]]] call[name[names].pop, parameter[constant[0]]] call[name[init_params].pop, parameter[constant[0]]] call[name[init_params].pop, parameter[constant[0]]] for taget[tuple[[<ast.Name object at 0x7da18eb56920>, <ast.Name object at 0x7da18eb55a20>]]] in starred[call[name[zip], parameter[name[names], name[init_params]]]] begin[:] variable[name] assign[=] call[name[name]][<ast.Slice object at 0x7da18eb55750>] assert[compare[call[name[name]][<ast.Slice object at 0x7da18eb551b0>] equal[==] constant[:0]]] variable[name] assign[=] call[name[name]][<ast.Slice object at 0x7da18eb57dc0>] variable[name] assign[=] call[name[name].split, parameter[constant[/]]] variable[pointer] assign[=] name[model] for taget[name[m_name]] in starred[name[name]] begin[:] if call[name[re].fullmatch, parameter[constant[[A-Za-z]+\d+], name[m_name]]] begin[:] variable[l] assign[=] call[name[re].split, parameter[constant[(\d+)], name[m_name]]] if compare[call[name[l]][constant[0]] equal[==] constant[g]] begin[:] variable[pointer] assign[=] call[name[getattr], parameter[name[pointer], constant[weight]]] if compare[call[name[len], parameter[name[l]]] greater_or_equal[>=] constant[2]] begin[:] variable[num] assign[=] call[name[int], parameter[call[name[l]][constant[1]]]] variable[pointer] assign[=] call[name[pointer]][name[num]] <ast.Try object at 0x7da20e955ea0> <ast.Try object at 0x7da20e9549d0> call[name[print], parameter[call[constant[Initialize PyTorch weight {}].format, parameter[name[name]]]]] name[pointer].data assign[=] call[name[torch].from_numpy, parameter[name[array]]] return[name[model]]
keyword[def] identifier[load_tf_weights_in_openai_gpt] ( identifier[model] , identifier[openai_checkpoint_folder_path] ): literal[string] keyword[import] identifier[re] keyword[import] identifier[numpy] keyword[as] identifier[np] identifier[print] ( literal[string] ) identifier[names] = identifier[json] . identifier[load] ( identifier[open] ( identifier[openai_checkpoint_folder_path] + literal[string] , literal[string] , identifier[encoding] = literal[string] )) identifier[shapes] = identifier[json] . identifier[load] ( identifier[open] ( identifier[openai_checkpoint_folder_path] + literal[string] , literal[string] , identifier[encoding] = literal[string] )) identifier[offsets] = identifier[np] . identifier[cumsum] ([ identifier[np] . identifier[prod] ( identifier[shape] ) keyword[for] identifier[shape] keyword[in] identifier[shapes] ]) identifier[init_params] =[ identifier[np] . identifier[load] ( identifier[openai_checkpoint_folder_path] + literal[string] . identifier[format] ( identifier[n] )) keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] )] identifier[init_params] = identifier[np] . identifier[split] ( identifier[np] . identifier[concatenate] ( identifier[init_params] , literal[int] ), identifier[offsets] )[:- literal[int] ] identifier[init_params] =[ identifier[param] . identifier[reshape] ( identifier[shape] ) keyword[for] identifier[param] , identifier[shape] keyword[in] identifier[zip] ( identifier[init_params] , identifier[shapes] )] identifier[init_params] =[ identifier[arr] . identifier[squeeze] () keyword[for] identifier[arr] keyword[in] identifier[init_params] ] keyword[try] : keyword[assert] identifier[model] . identifier[tokens_embed] . identifier[weight] . identifier[shape] == identifier[init_params] [ literal[int] ]. identifier[shape] keyword[assert] identifier[model] . identifier[positions_embed] . identifier[weight] . identifier[shape] == identifier[init_params] [ literal[int] ]. identifier[shape] keyword[except] identifier[AssertionError] keyword[as] identifier[e] : identifier[e] . identifier[args] +=( identifier[model] . identifier[tokens_embed] . identifier[weight] . identifier[shape] , identifier[init_params] [ literal[int] ]. identifier[shape] ) identifier[e] . identifier[args] +=( identifier[model] . identifier[positions_embed] . identifier[weight] . identifier[shape] , identifier[init_params] [ literal[int] ]. identifier[shape] ) keyword[raise] identifier[model] . identifier[tokens_embed] . identifier[weight] . identifier[data] = identifier[torch] . identifier[from_numpy] ( identifier[init_params] [ literal[int] ]) identifier[model] . identifier[positions_embed] . identifier[weight] . identifier[data] = identifier[torch] . identifier[from_numpy] ( identifier[init_params] [ literal[int] ]) identifier[names] . identifier[pop] ( literal[int] ) identifier[init_params] . identifier[pop] ( literal[int] ) identifier[init_params] . identifier[pop] ( literal[int] ) keyword[for] identifier[name] , identifier[array] keyword[in] identifier[zip] ( identifier[names] , identifier[init_params] ): identifier[name] = identifier[name] [ literal[int] :] keyword[assert] identifier[name] [- literal[int] :]== literal[string] identifier[name] = identifier[name] [:- literal[int] ] identifier[name] = identifier[name] . identifier[split] ( literal[string] ) identifier[pointer] = identifier[model] keyword[for] identifier[m_name] keyword[in] identifier[name] : keyword[if] identifier[re] . identifier[fullmatch] ( literal[string] , identifier[m_name] ): identifier[l] = identifier[re] . identifier[split] ( literal[string] , identifier[m_name] ) keyword[else] : identifier[l] =[ identifier[m_name] ] keyword[if] identifier[l] [ literal[int] ]== literal[string] : identifier[pointer] = identifier[getattr] ( identifier[pointer] , literal[string] ) keyword[elif] identifier[l] [ literal[int] ]== literal[string] : identifier[pointer] = identifier[getattr] ( identifier[pointer] , literal[string] ) keyword[elif] identifier[l] [ literal[int] ]== literal[string] : identifier[pointer] = identifier[getattr] ( identifier[pointer] , literal[string] ) keyword[else] : identifier[pointer] = identifier[getattr] ( identifier[pointer] , identifier[l] [ literal[int] ]) keyword[if] identifier[len] ( identifier[l] )>= literal[int] : identifier[num] = identifier[int] ( identifier[l] [ literal[int] ]) identifier[pointer] = identifier[pointer] [ identifier[num] ] keyword[try] : keyword[assert] identifier[pointer] . identifier[shape] == identifier[array] . identifier[shape] keyword[except] identifier[AssertionError] keyword[as] identifier[e] : identifier[e] . identifier[args] +=( identifier[pointer] . identifier[shape] , identifier[array] . identifier[shape] ) keyword[raise] keyword[try] : keyword[assert] identifier[pointer] . identifier[shape] == identifier[array] . identifier[shape] keyword[except] identifier[AssertionError] keyword[as] identifier[e] : identifier[e] . identifier[args] +=( identifier[pointer] . identifier[shape] , identifier[array] . identifier[shape] ) keyword[raise] identifier[print] ( literal[string] . identifier[format] ( identifier[name] )) identifier[pointer] . identifier[data] = identifier[torch] . identifier[from_numpy] ( identifier[array] ) keyword[return] identifier[model]
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path): """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here) """ import re import numpy as np print('Loading weights...') names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', 'r', encoding='utf-8')) shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', 'r', encoding='utf-8')) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for (param, shape) in zip(init_params, shapes)] # This was used when we had a single embedding matrix for positions and tokens # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0) # del init_params[1] init_params = [arr.squeeze() for arr in init_params] try: assert model.tokens_embed.weight.shape == init_params[1].shape assert model.positions_embed.weight.shape == init_params[0].shape # depends on [control=['try'], data=[]] except AssertionError as e: e.args += (model.tokens_embed.weight.shape, init_params[1].shape) e.args += (model.positions_embed.weight.shape, init_params[0].shape) raise # depends on [control=['except'], data=['e']] model.tokens_embed.weight.data = torch.from_numpy(init_params[1]) model.positions_embed.weight.data = torch.from_numpy(init_params[0]) names.pop(0) # Pop position and token embedding arrays init_params.pop(0) init_params.pop(0) for (name, array) in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]): name = name[6:] # skip "model/" assert name[-2:] == ':0' name = name[:-2] name = name.split('/') pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+\\d+', m_name): l = re.split('(\\d+)', m_name) # depends on [control=['if'], data=[]] else: l = [m_name] if l[0] == 'g': pointer = getattr(pointer, 'weight') # depends on [control=['if'], data=[]] elif l[0] == 'b': pointer = getattr(pointer, 'bias') # depends on [control=['if'], data=[]] elif l[0] == 'w': pointer = getattr(pointer, 'weight') # depends on [control=['if'], data=[]] else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m_name']] try: assert pointer.shape == array.shape # depends on [control=['try'], data=[]] except AssertionError as e: e.args += (pointer.shape, array.shape) raise # depends on [control=['except'], data=['e']] try: assert pointer.shape == array.shape # depends on [control=['try'], data=[]] except AssertionError as e: e.args += (pointer.shape, array.shape) raise # depends on [control=['except'], data=['e']] print('Initialize PyTorch weight {}'.format(name)) pointer.data = torch.from_numpy(array) # depends on [control=['for'], data=[]] return model
def dispatch_command(self, command, params=None): """Dispatch device commands to the appropriate handler.""" try: if command in self.handlers: self.handlers[command](**params) else: logging.warning('Unsupported command: %s: %s', command, params) except Exception as e: logging.warning('Error during command execution', exc_info=sys.exc_info()) raise e
def function[dispatch_command, parameter[self, command, params]]: constant[Dispatch device commands to the appropriate handler.] <ast.Try object at 0x7da18bc73e50>
keyword[def] identifier[dispatch_command] ( identifier[self] , identifier[command] , identifier[params] = keyword[None] ): literal[string] keyword[try] : keyword[if] identifier[command] keyword[in] identifier[self] . identifier[handlers] : identifier[self] . identifier[handlers] [ identifier[command] ](** identifier[params] ) keyword[else] : identifier[logging] . identifier[warning] ( literal[string] , identifier[command] , identifier[params] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logging] . identifier[warning] ( literal[string] , identifier[exc_info] = identifier[sys] . identifier[exc_info] ()) keyword[raise] identifier[e]
def dispatch_command(self, command, params=None): """Dispatch device commands to the appropriate handler.""" try: if command in self.handlers: self.handlers[command](**params) # depends on [control=['if'], data=['command']] else: logging.warning('Unsupported command: %s: %s', command, params) # depends on [control=['try'], data=[]] except Exception as e: logging.warning('Error during command execution', exc_info=sys.exc_info()) raise e # depends on [control=['except'], data=['e']]
def show_limit(entries, **kwargs): """Shows a menu but limits the number of entries shown at a time. Functionally equivalent to `show_menu()` with the `limit` parameter set.""" limit = kwargs.pop('limit', 5) if limit <= 0: return show_menu(entries, **kwargs) istart = 0 # Index of group start. iend = limit # Index of group end. dft = kwargs.pop('dft', None) if type(dft) == int: dft = str(dft) while True: if iend > len(entries): iend = len(entries) istart = iend - limit if istart < 0: istart = 0 iend = limit unext = len(entries) - iend # Number of next entries. uprev = istart # Number of previous entries. nnext = "" # Name of 'next' menu entry. nprev = "" # Name of 'prev' menu entry. dnext = "" # Description of 'next' menu entry. dprev = "" # Description of 'prev' menu entry. group = copy.deepcopy(entries[istart:iend]) names = [i.name for i in group] if unext > 0: for i in ["n", "N", "next", "NEXT", "->", ">>", ">>>"]: if i not in names: nnext = i dnext = "Next %u of %u entries" % (unext, len(entries)) group.append(MenuEntry(nnext, dnext, None, None, None)) names.append("n") break if uprev > 0: for i in ["p", "P", "prev", "PREV", "<-", "<<", "<<<"]: if i not in names: nprev = i dprev = "Previous %u of %u entries" % (uprev, len(entries)) group.append(MenuEntry(nprev, dprev, None, None, None)) names.append("p") break tmpdft = None if dft != None: if dft not in names: if "n" in names: tmpdft = "n" else: tmpdft = dft result = show_menu(group, dft=tmpdft, **kwargs) if result == nnext or result == dnext: istart += limit iend += limit elif result == nprev or result == dprev: istart -= limit iend -= limit else: return result
def function[show_limit, parameter[entries]]: constant[Shows a menu but limits the number of entries shown at a time. Functionally equivalent to `show_menu()` with the `limit` parameter set.] variable[limit] assign[=] call[name[kwargs].pop, parameter[constant[limit], constant[5]]] if compare[name[limit] less_or_equal[<=] constant[0]] begin[:] return[call[name[show_menu], parameter[name[entries]]]] variable[istart] assign[=] constant[0] variable[iend] assign[=] name[limit] variable[dft] assign[=] call[name[kwargs].pop, parameter[constant[dft], constant[None]]] if compare[call[name[type], parameter[name[dft]]] equal[==] name[int]] begin[:] variable[dft] assign[=] call[name[str], parameter[name[dft]]] while constant[True] begin[:] if compare[name[iend] greater[>] call[name[len], parameter[name[entries]]]] begin[:] variable[iend] assign[=] call[name[len], parameter[name[entries]]] variable[istart] assign[=] binary_operation[name[iend] - name[limit]] if compare[name[istart] less[<] constant[0]] begin[:] variable[istart] assign[=] constant[0] variable[iend] assign[=] name[limit] variable[unext] assign[=] binary_operation[call[name[len], parameter[name[entries]]] - name[iend]] variable[uprev] assign[=] name[istart] variable[nnext] assign[=] constant[] variable[nprev] assign[=] constant[] variable[dnext] assign[=] constant[] variable[dprev] assign[=] constant[] variable[group] assign[=] call[name[copy].deepcopy, parameter[call[name[entries]][<ast.Slice object at 0x7da20e957c70>]]] variable[names] assign[=] <ast.ListComp object at 0x7da20e9569b0> if compare[name[unext] greater[>] constant[0]] begin[:] for taget[name[i]] in starred[list[[<ast.Constant object at 0x7da20e956ce0>, <ast.Constant object at 0x7da20e954eb0>, <ast.Constant object at 0x7da20e956e30>, <ast.Constant object at 0x7da20e956020>, <ast.Constant object at 0x7da20e9547c0>, <ast.Constant object at 0x7da20e9563e0>, <ast.Constant object at 0x7da20e955a20>]]] begin[:] if compare[name[i] <ast.NotIn object at 0x7da2590d7190> name[names]] begin[:] variable[nnext] assign[=] name[i] variable[dnext] assign[=] binary_operation[constant[Next %u of %u entries] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e955a80>, <ast.Call object at 0x7da20e954b50>]]] call[name[group].append, parameter[call[name[MenuEntry], parameter[name[nnext], name[dnext], constant[None], constant[None], constant[None]]]]] call[name[names].append, parameter[constant[n]]] break if compare[name[uprev] greater[>] constant[0]] begin[:] for taget[name[i]] in starred[list[[<ast.Constant object at 0x7da20e955450>, <ast.Constant object at 0x7da20e957d00>, <ast.Constant object at 0x7da20e9577f0>, <ast.Constant object at 0x7da20e9542e0>, <ast.Constant object at 0x7da20e956aa0>, <ast.Constant object at 0x7da20e9549d0>, <ast.Constant object at 0x7da20e957e50>]]] begin[:] if compare[name[i] <ast.NotIn object at 0x7da2590d7190> name[names]] begin[:] variable[nprev] assign[=] name[i] variable[dprev] assign[=] binary_operation[constant[Previous %u of %u entries] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e957ee0>, <ast.Call object at 0x7da20e954f10>]]] call[name[group].append, parameter[call[name[MenuEntry], parameter[name[nprev], name[dprev], constant[None], constant[None], constant[None]]]]] call[name[names].append, parameter[constant[p]]] break variable[tmpdft] assign[=] constant[None] if compare[name[dft] not_equal[!=] constant[None]] begin[:] if compare[name[dft] <ast.NotIn object at 0x7da2590d7190> name[names]] begin[:] if compare[constant[n] in name[names]] begin[:] variable[tmpdft] assign[=] constant[n] variable[result] assign[=] call[name[show_menu], parameter[name[group]]] if <ast.BoolOp object at 0x7da20c76e020> begin[:] <ast.AugAssign object at 0x7da20c76d5d0> <ast.AugAssign object at 0x7da20c76cf40>
keyword[def] identifier[show_limit] ( identifier[entries] ,** identifier[kwargs] ): literal[string] identifier[limit] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[int] ) keyword[if] identifier[limit] <= literal[int] : keyword[return] identifier[show_menu] ( identifier[entries] ,** identifier[kwargs] ) identifier[istart] = literal[int] identifier[iend] = identifier[limit] identifier[dft] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] ) keyword[if] identifier[type] ( identifier[dft] )== identifier[int] : identifier[dft] = identifier[str] ( identifier[dft] ) keyword[while] keyword[True] : keyword[if] identifier[iend] > identifier[len] ( identifier[entries] ): identifier[iend] = identifier[len] ( identifier[entries] ) identifier[istart] = identifier[iend] - identifier[limit] keyword[if] identifier[istart] < literal[int] : identifier[istart] = literal[int] identifier[iend] = identifier[limit] identifier[unext] = identifier[len] ( identifier[entries] )- identifier[iend] identifier[uprev] = identifier[istart] identifier[nnext] = literal[string] identifier[nprev] = literal[string] identifier[dnext] = literal[string] identifier[dprev] = literal[string] identifier[group] = identifier[copy] . identifier[deepcopy] ( identifier[entries] [ identifier[istart] : identifier[iend] ]) identifier[names] =[ identifier[i] . identifier[name] keyword[for] identifier[i] keyword[in] identifier[group] ] keyword[if] identifier[unext] > literal[int] : keyword[for] identifier[i] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[if] identifier[i] keyword[not] keyword[in] identifier[names] : identifier[nnext] = identifier[i] identifier[dnext] = literal[string] %( identifier[unext] , identifier[len] ( identifier[entries] )) identifier[group] . identifier[append] ( identifier[MenuEntry] ( identifier[nnext] , identifier[dnext] , keyword[None] , keyword[None] , keyword[None] )) identifier[names] . identifier[append] ( literal[string] ) keyword[break] keyword[if] identifier[uprev] > literal[int] : keyword[for] identifier[i] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[if] identifier[i] keyword[not] keyword[in] identifier[names] : identifier[nprev] = identifier[i] identifier[dprev] = literal[string] %( identifier[uprev] , identifier[len] ( identifier[entries] )) identifier[group] . identifier[append] ( identifier[MenuEntry] ( identifier[nprev] , identifier[dprev] , keyword[None] , keyword[None] , keyword[None] )) identifier[names] . identifier[append] ( literal[string] ) keyword[break] identifier[tmpdft] = keyword[None] keyword[if] identifier[dft] != keyword[None] : keyword[if] identifier[dft] keyword[not] keyword[in] identifier[names] : keyword[if] literal[string] keyword[in] identifier[names] : identifier[tmpdft] = literal[string] keyword[else] : identifier[tmpdft] = identifier[dft] identifier[result] = identifier[show_menu] ( identifier[group] , identifier[dft] = identifier[tmpdft] ,** identifier[kwargs] ) keyword[if] identifier[result] == identifier[nnext] keyword[or] identifier[result] == identifier[dnext] : identifier[istart] += identifier[limit] identifier[iend] += identifier[limit] keyword[elif] identifier[result] == identifier[nprev] keyword[or] identifier[result] == identifier[dprev] : identifier[istart] -= identifier[limit] identifier[iend] -= identifier[limit] keyword[else] : keyword[return] identifier[result]
def show_limit(entries, **kwargs): """Shows a menu but limits the number of entries shown at a time. Functionally equivalent to `show_menu()` with the `limit` parameter set.""" limit = kwargs.pop('limit', 5) if limit <= 0: return show_menu(entries, **kwargs) # depends on [control=['if'], data=[]] istart = 0 # Index of group start. iend = limit # Index of group end. dft = kwargs.pop('dft', None) if type(dft) == int: dft = str(dft) # depends on [control=['if'], data=[]] while True: if iend > len(entries): iend = len(entries) istart = iend - limit # depends on [control=['if'], data=['iend']] if istart < 0: istart = 0 iend = limit # depends on [control=['if'], data=['istart']] unext = len(entries) - iend # Number of next entries. uprev = istart # Number of previous entries. nnext = '' # Name of 'next' menu entry. nprev = '' # Name of 'prev' menu entry. dnext = '' # Description of 'next' menu entry. dprev = '' # Description of 'prev' menu entry. group = copy.deepcopy(entries[istart:iend]) names = [i.name for i in group] if unext > 0: for i in ['n', 'N', 'next', 'NEXT', '->', '>>', '>>>']: if i not in names: nnext = i dnext = 'Next %u of %u entries' % (unext, len(entries)) group.append(MenuEntry(nnext, dnext, None, None, None)) names.append('n') break # depends on [control=['if'], data=['i', 'names']] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=['unext']] if uprev > 0: for i in ['p', 'P', 'prev', 'PREV', '<-', '<<', '<<<']: if i not in names: nprev = i dprev = 'Previous %u of %u entries' % (uprev, len(entries)) group.append(MenuEntry(nprev, dprev, None, None, None)) names.append('p') break # depends on [control=['if'], data=['i', 'names']] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=['uprev']] tmpdft = None if dft != None: if dft not in names: if 'n' in names: tmpdft = 'n' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['names']] else: tmpdft = dft # depends on [control=['if'], data=['dft']] result = show_menu(group, dft=tmpdft, **kwargs) if result == nnext or result == dnext: istart += limit iend += limit # depends on [control=['if'], data=[]] elif result == nprev or result == dprev: istart -= limit iend -= limit # depends on [control=['if'], data=[]] else: return result # depends on [control=['while'], data=[]]
def setOverlayInputMethod(self, ulOverlayHandle, eInputMethod): """Sets the input settings for the specified overlay.""" fn = self.function_table.setOverlayInputMethod result = fn(ulOverlayHandle, eInputMethod) return result
def function[setOverlayInputMethod, parameter[self, ulOverlayHandle, eInputMethod]]: constant[Sets the input settings for the specified overlay.] variable[fn] assign[=] name[self].function_table.setOverlayInputMethod variable[result] assign[=] call[name[fn], parameter[name[ulOverlayHandle], name[eInputMethod]]] return[name[result]]
keyword[def] identifier[setOverlayInputMethod] ( identifier[self] , identifier[ulOverlayHandle] , identifier[eInputMethod] ): literal[string] identifier[fn] = identifier[self] . identifier[function_table] . identifier[setOverlayInputMethod] identifier[result] = identifier[fn] ( identifier[ulOverlayHandle] , identifier[eInputMethod] ) keyword[return] identifier[result]
def setOverlayInputMethod(self, ulOverlayHandle, eInputMethod): """Sets the input settings for the specified overlay.""" fn = self.function_table.setOverlayInputMethod result = fn(ulOverlayHandle, eInputMethod) return result
def register_magics(store_name='_ampl_cells', ampl_object=None): """ Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``. Args: store_name: Name of the store where ``%%ampl cells`` will be stored. ampl_object: Object used to evaluate ``%%ampl_eval`` cells. """ from IPython.core.magic import ( Magics, magics_class, cell_magic, line_magic ) @magics_class class StoreAMPL(Magics): def __init__(self, shell=None, **kwargs): Magics.__init__(self, shell=shell, **kwargs) self._store = [] shell.user_ns[store_name] = self._store @cell_magic def ampl(self, line, cell): """Store the cell in the store""" self._store.append(cell) @cell_magic def ampl_eval(self, line, cell): """Evaluate the cell""" ampl_object.eval(cell) @line_magic def get_ampl(self, line): """Retrieve the store""" return self._store get_ipython().register_magics(StoreAMPL)
def function[register_magics, parameter[store_name, ampl_object]]: constant[ Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``. Args: store_name: Name of the store where ``%%ampl cells`` will be stored. ampl_object: Object used to evaluate ``%%ampl_eval`` cells. ] from relative_module[IPython.core.magic] import module[Magics], module[magics_class], module[cell_magic], module[line_magic] class class[StoreAMPL, parameter[]] begin[:] def function[__init__, parameter[self, shell]]: call[name[Magics].__init__, parameter[name[self]]] name[self]._store assign[=] list[[]] call[name[shell].user_ns][name[store_name]] assign[=] name[self]._store def function[ampl, parameter[self, line, cell]]: constant[Store the cell in the store] call[name[self]._store.append, parameter[name[cell]]] def function[ampl_eval, parameter[self, line, cell]]: constant[Evaluate the cell] call[name[ampl_object].eval, parameter[name[cell]]] def function[get_ampl, parameter[self, line]]: constant[Retrieve the store] return[name[self]._store] call[call[name[get_ipython], parameter[]].register_magics, parameter[name[StoreAMPL]]]
keyword[def] identifier[register_magics] ( identifier[store_name] = literal[string] , identifier[ampl_object] = keyword[None] ): literal[string] keyword[from] identifier[IPython] . identifier[core] . identifier[magic] keyword[import] ( identifier[Magics] , identifier[magics_class] , identifier[cell_magic] , identifier[line_magic] ) @ identifier[magics_class] keyword[class] identifier[StoreAMPL] ( identifier[Magics] ): keyword[def] identifier[__init__] ( identifier[self] , identifier[shell] = keyword[None] ,** identifier[kwargs] ): identifier[Magics] . identifier[__init__] ( identifier[self] , identifier[shell] = identifier[shell] ,** identifier[kwargs] ) identifier[self] . identifier[_store] =[] identifier[shell] . identifier[user_ns] [ identifier[store_name] ]= identifier[self] . identifier[_store] @ identifier[cell_magic] keyword[def] identifier[ampl] ( identifier[self] , identifier[line] , identifier[cell] ): literal[string] identifier[self] . identifier[_store] . identifier[append] ( identifier[cell] ) @ identifier[cell_magic] keyword[def] identifier[ampl_eval] ( identifier[self] , identifier[line] , identifier[cell] ): literal[string] identifier[ampl_object] . identifier[eval] ( identifier[cell] ) @ identifier[line_magic] keyword[def] identifier[get_ampl] ( identifier[self] , identifier[line] ): literal[string] keyword[return] identifier[self] . identifier[_store] identifier[get_ipython] (). identifier[register_magics] ( identifier[StoreAMPL] )
def register_magics(store_name='_ampl_cells', ampl_object=None): """ Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``. Args: store_name: Name of the store where ``%%ampl cells`` will be stored. ampl_object: Object used to evaluate ``%%ampl_eval`` cells. """ from IPython.core.magic import Magics, magics_class, cell_magic, line_magic @magics_class class StoreAMPL(Magics): def __init__(self, shell=None, **kwargs): Magics.__init__(self, shell=shell, **kwargs) self._store = [] shell.user_ns[store_name] = self._store @cell_magic def ampl(self, line, cell): """Store the cell in the store""" self._store.append(cell) @cell_magic def ampl_eval(self, line, cell): """Evaluate the cell""" ampl_object.eval(cell) @line_magic def get_ampl(self, line): """Retrieve the store""" return self._store get_ipython().register_magics(StoreAMPL)
def _error_message(self): """Try and extract the error message from a HTTP error response. :rtype: str """ body = self._deserialize() return body.get('message', body) if isinstance(body, dict) else body
def function[_error_message, parameter[self]]: constant[Try and extract the error message from a HTTP error response. :rtype: str ] variable[body] assign[=] call[name[self]._deserialize, parameter[]] return[<ast.IfExp object at 0x7da20c7caec0>]
keyword[def] identifier[_error_message] ( identifier[self] ): literal[string] identifier[body] = identifier[self] . identifier[_deserialize] () keyword[return] identifier[body] . identifier[get] ( literal[string] , identifier[body] ) keyword[if] identifier[isinstance] ( identifier[body] , identifier[dict] ) keyword[else] identifier[body]
def _error_message(self): """Try and extract the error message from a HTTP error response. :rtype: str """ body = self._deserialize() return body.get('message', body) if isinstance(body, dict) else body
def do_show_logical_switch(self, line): """show_logical_switch <peer> <logical switch> """ def f(p, args): try: (lsw,) = args except: print("argument error") return o = p.get() for s in o.logical_switches.switch: if s.id != lsw: continue print(s.id) print('datapath-id %s' % s.datapath_id) if s.resources.queue: print('queues:') for q in s.resources.queue: print('\t %s' % q) if s.resources.port: print('ports:') for p in s.resources.port: print('\t %s' % p) self._request(line, f)
def function[do_show_logical_switch, parameter[self, line]]: constant[show_logical_switch <peer> <logical switch> ] def function[f, parameter[p, args]]: <ast.Try object at 0x7da1b1b0c8b0> variable[o] assign[=] call[name[p].get, parameter[]] for taget[name[s]] in starred[name[o].logical_switches.switch] begin[:] if compare[name[s].id not_equal[!=] name[lsw]] begin[:] continue call[name[print], parameter[name[s].id]] call[name[print], parameter[binary_operation[constant[datapath-id %s] <ast.Mod object at 0x7da2590d6920> name[s].datapath_id]]] if name[s].resources.queue begin[:] call[name[print], parameter[constant[queues:]]] for taget[name[q]] in starred[name[s].resources.queue] begin[:] call[name[print], parameter[binary_operation[constant[ %s] <ast.Mod object at 0x7da2590d6920> name[q]]]] if name[s].resources.port begin[:] call[name[print], parameter[constant[ports:]]] for taget[name[p]] in starred[name[s].resources.port] begin[:] call[name[print], parameter[binary_operation[constant[ %s] <ast.Mod object at 0x7da2590d6920> name[p]]]] call[name[self]._request, parameter[name[line], name[f]]]
keyword[def] identifier[do_show_logical_switch] ( identifier[self] , identifier[line] ): literal[string] keyword[def] identifier[f] ( identifier[p] , identifier[args] ): keyword[try] : ( identifier[lsw] ,)= identifier[args] keyword[except] : identifier[print] ( literal[string] ) keyword[return] identifier[o] = identifier[p] . identifier[get] () keyword[for] identifier[s] keyword[in] identifier[o] . identifier[logical_switches] . identifier[switch] : keyword[if] identifier[s] . identifier[id] != identifier[lsw] : keyword[continue] identifier[print] ( identifier[s] . identifier[id] ) identifier[print] ( literal[string] % identifier[s] . identifier[datapath_id] ) keyword[if] identifier[s] . identifier[resources] . identifier[queue] : identifier[print] ( literal[string] ) keyword[for] identifier[q] keyword[in] identifier[s] . identifier[resources] . identifier[queue] : identifier[print] ( literal[string] % identifier[q] ) keyword[if] identifier[s] . identifier[resources] . identifier[port] : identifier[print] ( literal[string] ) keyword[for] identifier[p] keyword[in] identifier[s] . identifier[resources] . identifier[port] : identifier[print] ( literal[string] % identifier[p] ) identifier[self] . identifier[_request] ( identifier[line] , identifier[f] )
def do_show_logical_switch(self, line): """show_logical_switch <peer> <logical switch> """ def f(p, args): try: (lsw,) = args # depends on [control=['try'], data=[]] except: print('argument error') return # depends on [control=['except'], data=[]] o = p.get() for s in o.logical_switches.switch: if s.id != lsw: continue # depends on [control=['if'], data=[]] print(s.id) print('datapath-id %s' % s.datapath_id) if s.resources.queue: print('queues:') for q in s.resources.queue: print('\t %s' % q) # depends on [control=['for'], data=['q']] # depends on [control=['if'], data=[]] if s.resources.port: print('ports:') for p in s.resources.port: print('\t %s' % p) # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']] self._request(line, f)
def _gen_md5_filehash(fname, *args): ''' helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ''' _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): _hash.update(chunk) for extra_arg in args: _hash.update(six.b(str(extra_arg))) return _hash.hexdigest()
def function[_gen_md5_filehash, parameter[fname]]: constant[ helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation ] variable[_hash] assign[=] call[name[hashlib].md5, parameter[]] with call[name[salt].utils.files.fopen, parameter[name[fname], constant[rb]]] begin[:] for taget[name[chunk]] in starred[call[name[iter], parameter[<ast.Lambda object at 0x7da1b217b2e0>, constant[b'']]]] begin[:] call[name[_hash].update, parameter[name[chunk]]] for taget[name[extra_arg]] in starred[name[args]] begin[:] call[name[_hash].update, parameter[call[name[six].b, parameter[call[name[str], parameter[name[extra_arg]]]]]]] return[call[name[_hash].hexdigest, parameter[]]]
keyword[def] identifier[_gen_md5_filehash] ( identifier[fname] ,* identifier[args] ): literal[string] identifier[_hash] = identifier[hashlib] . identifier[md5] () keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[fname] , literal[string] ) keyword[as] identifier[f] : keyword[for] identifier[chunk] keyword[in] identifier[iter] ( keyword[lambda] : identifier[f] . identifier[read] ( literal[int] ), literal[string] ): identifier[_hash] . identifier[update] ( identifier[chunk] ) keyword[for] identifier[extra_arg] keyword[in] identifier[args] : identifier[_hash] . identifier[update] ( identifier[six] . identifier[b] ( identifier[str] ( identifier[extra_arg] ))) keyword[return] identifier[_hash] . identifier[hexdigest] ()
def _gen_md5_filehash(fname, *args): """ helper function to generate a md5 hash of the swagger definition file any extra argument passed to the function is converted to a string and participates in the hash calculation """ _hash = hashlib.md5() with salt.utils.files.fopen(fname, 'rb') as f: for chunk in iter(lambda : f.read(4096), b''): _hash.update(chunk) # depends on [control=['for'], data=['chunk']] # depends on [control=['with'], data=['f']] for extra_arg in args: _hash.update(six.b(str(extra_arg))) # depends on [control=['for'], data=['extra_arg']] return _hash.hexdigest()
def get_option_by_id(cls, option_id, **kwargs): """Find Option Return single instance of Option by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_option_by_id(option_id, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to return (required) :return: Option If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_option_by_id_with_http_info(option_id, **kwargs) else: (data) = cls._get_option_by_id_with_http_info(option_id, **kwargs) return data
def function[get_option_by_id, parameter[cls, option_id]]: constant[Find Option Return single instance of Option by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_option_by_id(option_id, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to return (required) :return: Option If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async]]] begin[:] return[call[name[cls]._get_option_by_id_with_http_info, parameter[name[option_id]]]]
keyword[def] identifier[get_option_by_id] ( identifier[cls] , identifier[option_id] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[cls] . identifier[_get_option_by_id_with_http_info] ( identifier[option_id] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[cls] . identifier[_get_option_by_id_with_http_info] ( identifier[option_id] ,** identifier[kwargs] ) keyword[return] identifier[data]
def get_option_by_id(cls, option_id, **kwargs): """Find Option Return single instance of Option by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_option_by_id(option_id, async=True) >>> result = thread.get() :param async bool :param str option_id: ID of option to return (required) :return: Option If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_option_by_id_with_http_info(option_id, **kwargs) # depends on [control=['if'], data=[]] else: data = cls._get_option_by_id_with_http_info(option_id, **kwargs) return data
def _init_client(self, from_archive=False): """Init client""" return RedmineClient(self.url, self.api_token, self.archive, from_archive)
def function[_init_client, parameter[self, from_archive]]: constant[Init client] return[call[name[RedmineClient], parameter[name[self].url, name[self].api_token, name[self].archive, name[from_archive]]]]
keyword[def] identifier[_init_client] ( identifier[self] , identifier[from_archive] = keyword[False] ): literal[string] keyword[return] identifier[RedmineClient] ( identifier[self] . identifier[url] , identifier[self] . identifier[api_token] , identifier[self] . identifier[archive] , identifier[from_archive] )
def _init_client(self, from_archive=False): """Init client""" return RedmineClient(self.url, self.api_token, self.archive, from_archive)
def update(self, eid, data, token): """ Update a given Library Entry. :param eid str: Entry ID :param data dict: Attributes :param token str: OAuth token :return: True or ServerError :rtype: Bool or Exception """ final_dict = {"data": {"id": eid, "type": "libraryEntries", "attributes": data}} final_headers = self.header final_headers['Authorization'] = "Bearer {}".format(token) r = requests.patch(self.apiurl + "/library-entries/{}".format(eid), json=final_dict, headers=final_headers) if r.status_code != 200: raise ConnectionError(r.text) return True
def function[update, parameter[self, eid, data, token]]: constant[ Update a given Library Entry. :param eid str: Entry ID :param data dict: Attributes :param token str: OAuth token :return: True or ServerError :rtype: Bool or Exception ] variable[final_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b0549450>], [<ast.Dict object at 0x7da1b054b4c0>]] variable[final_headers] assign[=] name[self].header call[name[final_headers]][constant[Authorization]] assign[=] call[constant[Bearer {}].format, parameter[name[token]]] variable[r] assign[=] call[name[requests].patch, parameter[binary_operation[name[self].apiurl + call[constant[/library-entries/{}].format, parameter[name[eid]]]]]] if compare[name[r].status_code not_equal[!=] constant[200]] begin[:] <ast.Raise object at 0x7da1b0549510> return[constant[True]]
keyword[def] identifier[update] ( identifier[self] , identifier[eid] , identifier[data] , identifier[token] ): literal[string] identifier[final_dict] ={ literal[string] :{ literal[string] : identifier[eid] , literal[string] : literal[string] , literal[string] : identifier[data] }} identifier[final_headers] = identifier[self] . identifier[header] identifier[final_headers] [ literal[string] ]= literal[string] . identifier[format] ( identifier[token] ) identifier[r] = identifier[requests] . identifier[patch] ( identifier[self] . identifier[apiurl] + literal[string] . identifier[format] ( identifier[eid] ), identifier[json] = identifier[final_dict] , identifier[headers] = identifier[final_headers] ) keyword[if] identifier[r] . identifier[status_code] != literal[int] : keyword[raise] identifier[ConnectionError] ( identifier[r] . identifier[text] ) keyword[return] keyword[True]
def update(self, eid, data, token): """ Update a given Library Entry. :param eid str: Entry ID :param data dict: Attributes :param token str: OAuth token :return: True or ServerError :rtype: Bool or Exception """ final_dict = {'data': {'id': eid, 'type': 'libraryEntries', 'attributes': data}} final_headers = self.header final_headers['Authorization'] = 'Bearer {}'.format(token) r = requests.patch(self.apiurl + '/library-entries/{}'.format(eid), json=final_dict, headers=final_headers) if r.status_code != 200: raise ConnectionError(r.text) # depends on [control=['if'], data=[]] return True
def compare_flat_dicts(i): """ Input: { dict1 - dictionary 1 dict2 - dictionary 2 (ignore_case) - ignore case of letters (space_as_none) - if 'yes', consider "" as None (keys_to_ignore) - list of keys to ignore (can be wildcards) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 equal - if 'yes' dictionaries are equal } """ d1=i.get('dict1',{}) d2=i.get('dict2',{}) equal='yes' ic=False x=i.get('ignore_case','') if x=='yes': ic=True san=None x=i.get('space_as_none','') if x=='yes': san='' # Create common set of keys keys=list(d1.keys()) for q in d2: if q not in keys: keys.append(q) # If keys to ignore kti=i.get('keys_to_ignore',[]) if len(kti)>0: import fnmatch x=[] for q in keys: skip=False for k in kti: if fnmatch.fnmatch(q,k): skip=True if not skip: x.append(q) keys=x # Compare all keys for q in keys: v1=d1.get(q, san) v2=d2.get(q, san) if ic and type(v1)!=int and type(v1)!=float and type(v1)!=bool: v1=v1.lower() v2=v2.lower() if v1!=v2: equal='no' break return {'return':0, 'equal':equal}
def function[compare_flat_dicts, parameter[i]]: constant[ Input: { dict1 - dictionary 1 dict2 - dictionary 2 (ignore_case) - ignore case of letters (space_as_none) - if 'yes', consider "" as None (keys_to_ignore) - list of keys to ignore (can be wildcards) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 equal - if 'yes' dictionaries are equal } ] variable[d1] assign[=] call[name[i].get, parameter[constant[dict1], dictionary[[], []]]] variable[d2] assign[=] call[name[i].get, parameter[constant[dict2], dictionary[[], []]]] variable[equal] assign[=] constant[yes] variable[ic] assign[=] constant[False] variable[x] assign[=] call[name[i].get, parameter[constant[ignore_case], constant[]]] if compare[name[x] equal[==] constant[yes]] begin[:] variable[ic] assign[=] constant[True] variable[san] assign[=] constant[None] variable[x] assign[=] call[name[i].get, parameter[constant[space_as_none], constant[]]] if compare[name[x] equal[==] constant[yes]] begin[:] variable[san] assign[=] constant[] variable[keys] assign[=] call[name[list], parameter[call[name[d1].keys, parameter[]]]] for taget[name[q]] in starred[name[d2]] begin[:] if compare[name[q] <ast.NotIn object at 0x7da2590d7190> name[keys]] begin[:] call[name[keys].append, parameter[name[q]]] variable[kti] assign[=] call[name[i].get, parameter[constant[keys_to_ignore], list[[]]]] if compare[call[name[len], parameter[name[kti]]] greater[>] constant[0]] begin[:] import module[fnmatch] variable[x] assign[=] list[[]] for taget[name[q]] in starred[name[keys]] begin[:] variable[skip] assign[=] constant[False] for taget[name[k]] in starred[name[kti]] begin[:] if call[name[fnmatch].fnmatch, parameter[name[q], name[k]]] begin[:] variable[skip] assign[=] constant[True] if <ast.UnaryOp object at 0x7da1b2273790> begin[:] call[name[x].append, parameter[name[q]]] variable[keys] assign[=] name[x] for taget[name[q]] in starred[name[keys]] begin[:] variable[v1] assign[=] call[name[d1].get, parameter[name[q], name[san]]] variable[v2] assign[=] call[name[d2].get, parameter[name[q], name[san]]] if <ast.BoolOp object at 0x7da1b2271270> begin[:] variable[v1] assign[=] call[name[v1].lower, parameter[]] variable[v2] assign[=] call[name[v2].lower, parameter[]] if compare[name[v1] not_equal[!=] name[v2]] begin[:] variable[equal] assign[=] constant[no] break return[dictionary[[<ast.Constant object at 0x7da1b2273af0>, <ast.Constant object at 0x7da1b2270820>], [<ast.Constant object at 0x7da1b2271990>, <ast.Name object at 0x7da1b2271720>]]]
keyword[def] identifier[compare_flat_dicts] ( identifier[i] ): literal[string] identifier[d1] = identifier[i] . identifier[get] ( literal[string] ,{}) identifier[d2] = identifier[i] . identifier[get] ( literal[string] ,{}) identifier[equal] = literal[string] identifier[ic] = keyword[False] identifier[x] = identifier[i] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[x] == literal[string] : identifier[ic] = keyword[True] identifier[san] = keyword[None] identifier[x] = identifier[i] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[x] == literal[string] : identifier[san] = literal[string] identifier[keys] = identifier[list] ( identifier[d1] . identifier[keys] ()) keyword[for] identifier[q] keyword[in] identifier[d2] : keyword[if] identifier[q] keyword[not] keyword[in] identifier[keys] : identifier[keys] . identifier[append] ( identifier[q] ) identifier[kti] = identifier[i] . identifier[get] ( literal[string] ,[]) keyword[if] identifier[len] ( identifier[kti] )> literal[int] : keyword[import] identifier[fnmatch] identifier[x] =[] keyword[for] identifier[q] keyword[in] identifier[keys] : identifier[skip] = keyword[False] keyword[for] identifier[k] keyword[in] identifier[kti] : keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[q] , identifier[k] ): identifier[skip] = keyword[True] keyword[if] keyword[not] identifier[skip] : identifier[x] . identifier[append] ( identifier[q] ) identifier[keys] = identifier[x] keyword[for] identifier[q] keyword[in] identifier[keys] : identifier[v1] = identifier[d1] . identifier[get] ( identifier[q] , identifier[san] ) identifier[v2] = identifier[d2] . identifier[get] ( identifier[q] , identifier[san] ) keyword[if] identifier[ic] keyword[and] identifier[type] ( identifier[v1] )!= identifier[int] keyword[and] identifier[type] ( identifier[v1] )!= identifier[float] keyword[and] identifier[type] ( identifier[v1] )!= identifier[bool] : identifier[v1] = identifier[v1] . identifier[lower] () identifier[v2] = identifier[v2] . identifier[lower] () keyword[if] identifier[v1] != identifier[v2] : identifier[equal] = literal[string] keyword[break] keyword[return] { literal[string] : literal[int] , literal[string] : identifier[equal] }
def compare_flat_dicts(i): """ Input: { dict1 - dictionary 1 dict2 - dictionary 2 (ignore_case) - ignore case of letters (space_as_none) - if 'yes', consider "" as None (keys_to_ignore) - list of keys to ignore (can be wildcards) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 equal - if 'yes' dictionaries are equal } """ d1 = i.get('dict1', {}) d2 = i.get('dict2', {}) equal = 'yes' ic = False x = i.get('ignore_case', '') if x == 'yes': ic = True # depends on [control=['if'], data=[]] san = None x = i.get('space_as_none', '') if x == 'yes': san = '' # depends on [control=['if'], data=[]] # Create common set of keys keys = list(d1.keys()) for q in d2: if q not in keys: keys.append(q) # depends on [control=['if'], data=['q', 'keys']] # depends on [control=['for'], data=['q']] # If keys to ignore kti = i.get('keys_to_ignore', []) if len(kti) > 0: import fnmatch x = [] for q in keys: skip = False for k in kti: if fnmatch.fnmatch(q, k): skip = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']] if not skip: x.append(q) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['q']] keys = x # depends on [control=['if'], data=[]] # Compare all keys for q in keys: v1 = d1.get(q, san) v2 = d2.get(q, san) if ic and type(v1) != int and (type(v1) != float) and (type(v1) != bool): v1 = v1.lower() v2 = v2.lower() # depends on [control=['if'], data=[]] if v1 != v2: equal = 'no' break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['q']] return {'return': 0, 'equal': equal}
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text return _dict
def function[_to_dict, parameter[self]]: constant[Return a json dictionary representing this model.] variable[_dict] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da1b2347070> begin[:] call[name[_dict]][constant[text]] assign[=] name[self].text return[name[_dict]]
keyword[def] identifier[_to_dict] ( identifier[self] ): literal[string] identifier[_dict] ={} keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[text] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[text] keyword[return] identifier[_dict]
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text # depends on [control=['if'], data=[]] return _dict
def wireshark(pktlist, wait=False, **kwargs): """ Runs Wireshark on a list of packets. See :func:`tcpdump` for more parameter description. Note: this defaults to wait=False, to run Wireshark in the background. """ return tcpdump(pktlist, prog=conf.prog.wireshark, wait=wait, **kwargs)
def function[wireshark, parameter[pktlist, wait]]: constant[ Runs Wireshark on a list of packets. See :func:`tcpdump` for more parameter description. Note: this defaults to wait=False, to run Wireshark in the background. ] return[call[name[tcpdump], parameter[name[pktlist]]]]
keyword[def] identifier[wireshark] ( identifier[pktlist] , identifier[wait] = keyword[False] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[tcpdump] ( identifier[pktlist] , identifier[prog] = identifier[conf] . identifier[prog] . identifier[wireshark] , identifier[wait] = identifier[wait] ,** identifier[kwargs] )
def wireshark(pktlist, wait=False, **kwargs): """ Runs Wireshark on a list of packets. See :func:`tcpdump` for more parameter description. Note: this defaults to wait=False, to run Wireshark in the background. """ return tcpdump(pktlist, prog=conf.prog.wireshark, wait=wait, **kwargs)
def create_issue_link(self, link_type, inwardissue, outwardissue, comment=None): """ Create a link between two issues. Arguments: | link_type (string) | The type of link | | inwardissue (string) | The issue to link from | | outwardissue (string) | The issue to link to | | comment (string) | (Optional) A comment to add when joining issues | Example: | *Keyword* | *Parameters* | | | | connect to jira | asimmons | options= {'http://devjira01'} | | | ${issue} | create issue | ${issue_field_dict} | True | | create issue link | relates to | ${issue} | PROJ-385 | """ self.jira.create_issue_link(type=link_type, inwardIssue=str(inwardissue), outwardIssue=str(outwardissue))
def function[create_issue_link, parameter[self, link_type, inwardissue, outwardissue, comment]]: constant[ Create a link between two issues. Arguments: | link_type (string) | The type of link | | inwardissue (string) | The issue to link from | | outwardissue (string) | The issue to link to | | comment (string) | (Optional) A comment to add when joining issues | Example: | *Keyword* | *Parameters* | | | | connect to jira | asimmons | options= {'http://devjira01'} | | | ${issue} | create issue | ${issue_field_dict} | True | | create issue link | relates to | ${issue} | PROJ-385 | ] call[name[self].jira.create_issue_link, parameter[]]
keyword[def] identifier[create_issue_link] ( identifier[self] , identifier[link_type] , identifier[inwardissue] , identifier[outwardissue] , identifier[comment] = keyword[None] ): literal[string] identifier[self] . identifier[jira] . identifier[create_issue_link] ( identifier[type] = identifier[link_type] , identifier[inwardIssue] = identifier[str] ( identifier[inwardissue] ), identifier[outwardIssue] = identifier[str] ( identifier[outwardissue] ))
def create_issue_link(self, link_type, inwardissue, outwardissue, comment=None): """ Create a link between two issues. Arguments: | link_type (string) | The type of link | | inwardissue (string) | The issue to link from | | outwardissue (string) | The issue to link to | | comment (string) | (Optional) A comment to add when joining issues | Example: | *Keyword* | *Parameters* | | | | connect to jira | asimmons | options= {'http://devjira01'} | | | ${issue} | create issue | ${issue_field_dict} | True | | create issue link | relates to | ${issue} | PROJ-385 | """ self.jira.create_issue_link(type=link_type, inwardIssue=str(inwardissue), outwardIssue=str(outwardissue))
def as_dict(self, use_preliminary=False): """Create a copy of the config in form of a dict :param bool use_preliminary: Whether to include the preliminary config :return: A dict with the copy of the config :rtype: dict """ config = dict() for key in self.config.keys: if use_preliminary and key in self.preliminary_config: value = self.preliminary_config[key] else: value = self.config.get_config_value(key) config[key] = value return config
def function[as_dict, parameter[self, use_preliminary]]: constant[Create a copy of the config in form of a dict :param bool use_preliminary: Whether to include the preliminary config :return: A dict with the copy of the config :rtype: dict ] variable[config] assign[=] call[name[dict], parameter[]] for taget[name[key]] in starred[name[self].config.keys] begin[:] if <ast.BoolOp object at 0x7da2041d9fc0> begin[:] variable[value] assign[=] call[name[self].preliminary_config][name[key]] call[name[config]][name[key]] assign[=] name[value] return[name[config]]
keyword[def] identifier[as_dict] ( identifier[self] , identifier[use_preliminary] = keyword[False] ): literal[string] identifier[config] = identifier[dict] () keyword[for] identifier[key] keyword[in] identifier[self] . identifier[config] . identifier[keys] : keyword[if] identifier[use_preliminary] keyword[and] identifier[key] keyword[in] identifier[self] . identifier[preliminary_config] : identifier[value] = identifier[self] . identifier[preliminary_config] [ identifier[key] ] keyword[else] : identifier[value] = identifier[self] . identifier[config] . identifier[get_config_value] ( identifier[key] ) identifier[config] [ identifier[key] ]= identifier[value] keyword[return] identifier[config]
def as_dict(self, use_preliminary=False): """Create a copy of the config in form of a dict :param bool use_preliminary: Whether to include the preliminary config :return: A dict with the copy of the config :rtype: dict """ config = dict() for key in self.config.keys: if use_preliminary and key in self.preliminary_config: value = self.preliminary_config[key] # depends on [control=['if'], data=[]] else: value = self.config.get_config_value(key) config[key] = value # depends on [control=['for'], data=['key']] return config
def get_ordered_list_type(meta_data, numId, ilvl): """ Return the list type. If numId or ilvl not in the numbering dict then default to returning decimal. This function only cares about ordered lists, unordered lists get dealt with elsewhere. """ # Early return if numId or ilvl are not valid numbering_dict = meta_data.numbering_dict if numId not in numbering_dict: return DEFAULT_LIST_NUMBERING_STYLE if ilvl not in numbering_dict[numId]: return DEFAULT_LIST_NUMBERING_STYLE return meta_data.numbering_dict[numId][ilvl]
def function[get_ordered_list_type, parameter[meta_data, numId, ilvl]]: constant[ Return the list type. If numId or ilvl not in the numbering dict then default to returning decimal. This function only cares about ordered lists, unordered lists get dealt with elsewhere. ] variable[numbering_dict] assign[=] name[meta_data].numbering_dict if compare[name[numId] <ast.NotIn object at 0x7da2590d7190> name[numbering_dict]] begin[:] return[name[DEFAULT_LIST_NUMBERING_STYLE]] if compare[name[ilvl] <ast.NotIn object at 0x7da2590d7190> call[name[numbering_dict]][name[numId]]] begin[:] return[name[DEFAULT_LIST_NUMBERING_STYLE]] return[call[call[name[meta_data].numbering_dict][name[numId]]][name[ilvl]]]
keyword[def] identifier[get_ordered_list_type] ( identifier[meta_data] , identifier[numId] , identifier[ilvl] ): literal[string] identifier[numbering_dict] = identifier[meta_data] . identifier[numbering_dict] keyword[if] identifier[numId] keyword[not] keyword[in] identifier[numbering_dict] : keyword[return] identifier[DEFAULT_LIST_NUMBERING_STYLE] keyword[if] identifier[ilvl] keyword[not] keyword[in] identifier[numbering_dict] [ identifier[numId] ]: keyword[return] identifier[DEFAULT_LIST_NUMBERING_STYLE] keyword[return] identifier[meta_data] . identifier[numbering_dict] [ identifier[numId] ][ identifier[ilvl] ]
def get_ordered_list_type(meta_data, numId, ilvl): """ Return the list type. If numId or ilvl not in the numbering dict then default to returning decimal. This function only cares about ordered lists, unordered lists get dealt with elsewhere. """ # Early return if numId or ilvl are not valid numbering_dict = meta_data.numbering_dict if numId not in numbering_dict: return DEFAULT_LIST_NUMBERING_STYLE # depends on [control=['if'], data=[]] if ilvl not in numbering_dict[numId]: return DEFAULT_LIST_NUMBERING_STYLE # depends on [control=['if'], data=[]] return meta_data.numbering_dict[numId][ilvl]
def notes(self): """Query for notes attached to this incident.""" endpoint = '/'.join((self.endpoint, self.id, 'notes')) return self.noteFactory.find( endpoint=endpoint, api_key=self.api_key, )
def function[notes, parameter[self]]: constant[Query for notes attached to this incident.] variable[endpoint] assign[=] call[constant[/].join, parameter[tuple[[<ast.Attribute object at 0x7da1b06fd120>, <ast.Attribute object at 0x7da1b06fe5f0>, <ast.Constant object at 0x7da1b06fc820>]]]] return[call[name[self].noteFactory.find, parameter[]]]
keyword[def] identifier[notes] ( identifier[self] ): literal[string] identifier[endpoint] = literal[string] . identifier[join] (( identifier[self] . identifier[endpoint] , identifier[self] . identifier[id] , literal[string] )) keyword[return] identifier[self] . identifier[noteFactory] . identifier[find] ( identifier[endpoint] = identifier[endpoint] , identifier[api_key] = identifier[self] . identifier[api_key] , )
def notes(self): """Query for notes attached to this incident.""" endpoint = '/'.join((self.endpoint, self.id, 'notes')) return self.noteFactory.find(endpoint=endpoint, api_key=self.api_key)
def score(self, Z): """Applies transforms to the data, and the score method of the final estimator. Valid only if the final estimator implements score.""" Zt = Z for name, transform in self.steps[:-1]: Zt = transform.transform(Zt) return self.steps[-1][-1].score(Zt)
def function[score, parameter[self, Z]]: constant[Applies transforms to the data, and the score method of the final estimator. Valid only if the final estimator implements score.] variable[Zt] assign[=] name[Z] for taget[tuple[[<ast.Name object at 0x7da18ede49a0>, <ast.Name object at 0x7da18ede5480>]]] in starred[call[name[self].steps][<ast.Slice object at 0x7da18ede6f80>]] begin[:] variable[Zt] assign[=] call[name[transform].transform, parameter[name[Zt]]] return[call[call[call[name[self].steps][<ast.UnaryOp object at 0x7da18dc05bd0>]][<ast.UnaryOp object at 0x7da18dc05e70>].score, parameter[name[Zt]]]]
keyword[def] identifier[score] ( identifier[self] , identifier[Z] ): literal[string] identifier[Zt] = identifier[Z] keyword[for] identifier[name] , identifier[transform] keyword[in] identifier[self] . identifier[steps] [:- literal[int] ]: identifier[Zt] = identifier[transform] . identifier[transform] ( identifier[Zt] ) keyword[return] identifier[self] . identifier[steps] [- literal[int] ][- literal[int] ]. identifier[score] ( identifier[Zt] )
def score(self, Z): """Applies transforms to the data, and the score method of the final estimator. Valid only if the final estimator implements score.""" Zt = Z for (name, transform) in self.steps[:-1]: Zt = transform.transform(Zt) # depends on [control=['for'], data=[]] return self.steps[-1][-1].score(Zt)
def _posix_split_name(self, name): """Split a name longer than 100 chars into a prefix and a name part. """ prefix = name[:LENGTH_PREFIX + 1] while prefix and prefix[-1] != "/": prefix = prefix[:-1] name = name[len(prefix):] prefix = prefix[:-1] if not prefix or len(name) > LENGTH_NAME: raise ValueError("name is too long") return prefix, name
def function[_posix_split_name, parameter[self, name]]: constant[Split a name longer than 100 chars into a prefix and a name part. ] variable[prefix] assign[=] call[name[name]][<ast.Slice object at 0x7da18bccafb0>] while <ast.BoolOp object at 0x7da18bccb1c0> begin[:] variable[prefix] assign[=] call[name[prefix]][<ast.Slice object at 0x7da18bcc9000>] variable[name] assign[=] call[name[name]][<ast.Slice object at 0x7da1b1ea1570>] variable[prefix] assign[=] call[name[prefix]][<ast.Slice object at 0x7da1b1ea16c0>] if <ast.BoolOp object at 0x7da1b1ea2da0> begin[:] <ast.Raise object at 0x7da1b1ea0b20> return[tuple[[<ast.Name object at 0x7da1b1ea3520>, <ast.Name object at 0x7da1b1ea12a0>]]]
keyword[def] identifier[_posix_split_name] ( identifier[self] , identifier[name] ): literal[string] identifier[prefix] = identifier[name] [: identifier[LENGTH_PREFIX] + literal[int] ] keyword[while] identifier[prefix] keyword[and] identifier[prefix] [- literal[int] ]!= literal[string] : identifier[prefix] = identifier[prefix] [:- literal[int] ] identifier[name] = identifier[name] [ identifier[len] ( identifier[prefix] ):] identifier[prefix] = identifier[prefix] [:- literal[int] ] keyword[if] keyword[not] identifier[prefix] keyword[or] identifier[len] ( identifier[name] )> identifier[LENGTH_NAME] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[prefix] , identifier[name]
def _posix_split_name(self, name): """Split a name longer than 100 chars into a prefix and a name part. """ prefix = name[:LENGTH_PREFIX + 1] while prefix and prefix[-1] != '/': prefix = prefix[:-1] # depends on [control=['while'], data=[]] name = name[len(prefix):] prefix = prefix[:-1] if not prefix or len(name) > LENGTH_NAME: raise ValueError('name is too long') # depends on [control=['if'], data=[]] return (prefix, name)
def fn_signature(callable, argument_transform=(lambda name: name), default_transform=(lambda name, value: "%s=%s" % (name, repr(value))), vararg_transform=(lambda name: "*" + name), kwargs_transform=(lambda name: "**" + name)): """Returns the signature of the provided callable as a tuple of strings.""" signature = [] fn = get_fn(callable) avail_ac = fn_available_argcount(fn) kwargs = fn_kwargs(fn) argnames = fn_argnames(fn) for name in stop_at(argnames, avail_ac): if name in kwargs: signature.append(default_transform(name, kwargs[name])) else: signature.append(argument_transform(name)) if fn_has_args(fn): if fn_has_kwargs(fn): signature.append(vararg_transform(argnames[-2])) signature.append(kwargs_transform(argnames[-1])) else: signature.append(vararg_transform(argnames[-1])) elif fn_has_kwargs(fn): signature.append(kwargs_transform(argnames[-1])) return signature
def function[fn_signature, parameter[callable, argument_transform, default_transform, vararg_transform, kwargs_transform]]: constant[Returns the signature of the provided callable as a tuple of strings.] variable[signature] assign[=] list[[]] variable[fn] assign[=] call[name[get_fn], parameter[name[callable]]] variable[avail_ac] assign[=] call[name[fn_available_argcount], parameter[name[fn]]] variable[kwargs] assign[=] call[name[fn_kwargs], parameter[name[fn]]] variable[argnames] assign[=] call[name[fn_argnames], parameter[name[fn]]] for taget[name[name]] in starred[call[name[stop_at], parameter[name[argnames], name[avail_ac]]]] begin[:] if compare[name[name] in name[kwargs]] begin[:] call[name[signature].append, parameter[call[name[default_transform], parameter[name[name], call[name[kwargs]][name[name]]]]]] if call[name[fn_has_args], parameter[name[fn]]] begin[:] if call[name[fn_has_kwargs], parameter[name[fn]]] begin[:] call[name[signature].append, parameter[call[name[vararg_transform], parameter[call[name[argnames]][<ast.UnaryOp object at 0x7da18f810fd0>]]]]] call[name[signature].append, parameter[call[name[kwargs_transform], parameter[call[name[argnames]][<ast.UnaryOp object at 0x7da18f812260>]]]]] return[name[signature]]
keyword[def] identifier[fn_signature] ( identifier[callable] , identifier[argument_transform] =( keyword[lambda] identifier[name] : identifier[name] ), identifier[default_transform] =( keyword[lambda] identifier[name] , identifier[value] : literal[string] % ( identifier[name] , identifier[repr] ( identifier[value] ))), identifier[vararg_transform] =( keyword[lambda] identifier[name] : literal[string] + identifier[name] ), identifier[kwargs_transform] =( keyword[lambda] identifier[name] : literal[string] + identifier[name] )): literal[string] identifier[signature] =[] identifier[fn] = identifier[get_fn] ( identifier[callable] ) identifier[avail_ac] = identifier[fn_available_argcount] ( identifier[fn] ) identifier[kwargs] = identifier[fn_kwargs] ( identifier[fn] ) identifier[argnames] = identifier[fn_argnames] ( identifier[fn] ) keyword[for] identifier[name] keyword[in] identifier[stop_at] ( identifier[argnames] , identifier[avail_ac] ): keyword[if] identifier[name] keyword[in] identifier[kwargs] : identifier[signature] . identifier[append] ( identifier[default_transform] ( identifier[name] , identifier[kwargs] [ identifier[name] ])) keyword[else] : identifier[signature] . identifier[append] ( identifier[argument_transform] ( identifier[name] )) keyword[if] identifier[fn_has_args] ( identifier[fn] ): keyword[if] identifier[fn_has_kwargs] ( identifier[fn] ): identifier[signature] . identifier[append] ( identifier[vararg_transform] ( identifier[argnames] [- literal[int] ])) identifier[signature] . identifier[append] ( identifier[kwargs_transform] ( identifier[argnames] [- literal[int] ])) keyword[else] : identifier[signature] . identifier[append] ( identifier[vararg_transform] ( identifier[argnames] [- literal[int] ])) keyword[elif] identifier[fn_has_kwargs] ( identifier[fn] ): identifier[signature] . identifier[append] ( identifier[kwargs_transform] ( identifier[argnames] [- literal[int] ])) keyword[return] identifier[signature]
def fn_signature(callable, argument_transform=lambda name: name, default_transform=lambda name, value: '%s=%s' % (name, repr(value)), vararg_transform=lambda name: '*' + name, kwargs_transform=lambda name: '**' + name): """Returns the signature of the provided callable as a tuple of strings.""" signature = [] fn = get_fn(callable) avail_ac = fn_available_argcount(fn) kwargs = fn_kwargs(fn) argnames = fn_argnames(fn) for name in stop_at(argnames, avail_ac): if name in kwargs: signature.append(default_transform(name, kwargs[name])) # depends on [control=['if'], data=['name', 'kwargs']] else: signature.append(argument_transform(name)) # depends on [control=['for'], data=['name']] if fn_has_args(fn): if fn_has_kwargs(fn): signature.append(vararg_transform(argnames[-2])) signature.append(kwargs_transform(argnames[-1])) # depends on [control=['if'], data=[]] else: signature.append(vararg_transform(argnames[-1])) # depends on [control=['if'], data=[]] elif fn_has_kwargs(fn): signature.append(kwargs_transform(argnames[-1])) # depends on [control=['if'], data=[]] return signature
def execute_async(self, table_name=None, table_mode='create', use_cache=True, priority='interactive', allow_large_results=False, dialect=None, billing_tier=None): """ Initiate the query and return a QueryJob. Args: table_name: the result table name as a string or TableName; if None (the default), then a temporary table will be used. table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request will fail if the table exists. use_cache: whether to use past query results or ignore cache. Has no effect if destination is specified (default True). priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much as three hours but are not rate-limited. allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is slower and requires a table_name to be specified) (default False). dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryJob. Raises: Exception if query could not be executed. """ batch = priority == 'low' append = table_mode == 'append' overwrite = table_mode == 'overwrite' if table_name is not None: table_name = _utils.parse_table_name(table_name, self._api.project_id) try: query_result = self._api.jobs_insert_query(self._sql, self._code, self._imports, table_name=table_name, append=append, overwrite=overwrite, use_cache=use_cache, batch=batch, allow_large_results=allow_large_results, table_definitions=self._external_tables, dialect=dialect, billing_tier=billing_tier) except Exception as e: raise e if 'jobReference' not in query_result: raise Exception('Unexpected response from server') job_id = query_result['jobReference']['jobId'] if not table_name: try: destination = query_result['configuration']['query']['destinationTable'] table_name = (destination['projectId'], destination['datasetId'], destination['tableId']) except KeyError: # The query was in error raise Exception(_utils.format_query_errors(query_result['status']['errors'])) return _query_job.QueryJob(job_id, table_name, self._sql, context=self._context)
def function[execute_async, parameter[self, table_name, table_mode, use_cache, priority, allow_large_results, dialect, billing_tier]]: constant[ Initiate the query and return a QueryJob. Args: table_name: the result table name as a string or TableName; if None (the default), then a temporary table will be used. table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request will fail if the table exists. use_cache: whether to use past query results or ignore cache. Has no effect if destination is specified (default True). priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much as three hours but are not rate-limited. allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is slower and requires a table_name to be specified) (default False). dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryJob. Raises: Exception if query could not be executed. ] variable[batch] assign[=] compare[name[priority] equal[==] constant[low]] variable[append] assign[=] compare[name[table_mode] equal[==] constant[append]] variable[overwrite] assign[=] compare[name[table_mode] equal[==] constant[overwrite]] if compare[name[table_name] is_not constant[None]] begin[:] variable[table_name] assign[=] call[name[_utils].parse_table_name, parameter[name[table_name], name[self]._api.project_id]] <ast.Try object at 0x7da20c992a40> if compare[constant[jobReference] <ast.NotIn object at 0x7da2590d7190> name[query_result]] begin[:] <ast.Raise object at 0x7da20c9914e0> variable[job_id] assign[=] call[call[name[query_result]][constant[jobReference]]][constant[jobId]] if <ast.UnaryOp object at 0x7da20c990a90> begin[:] <ast.Try object at 0x7da20c991a80> return[call[name[_query_job].QueryJob, parameter[name[job_id], name[table_name], name[self]._sql]]]
keyword[def] identifier[execute_async] ( identifier[self] , identifier[table_name] = keyword[None] , identifier[table_mode] = literal[string] , identifier[use_cache] = keyword[True] , identifier[priority] = literal[string] , identifier[allow_large_results] = keyword[False] , identifier[dialect] = keyword[None] , identifier[billing_tier] = keyword[None] ): literal[string] identifier[batch] = identifier[priority] == literal[string] identifier[append] = identifier[table_mode] == literal[string] identifier[overwrite] = identifier[table_mode] == literal[string] keyword[if] identifier[table_name] keyword[is] keyword[not] keyword[None] : identifier[table_name] = identifier[_utils] . identifier[parse_table_name] ( identifier[table_name] , identifier[self] . identifier[_api] . identifier[project_id] ) keyword[try] : identifier[query_result] = identifier[self] . identifier[_api] . identifier[jobs_insert_query] ( identifier[self] . identifier[_sql] , identifier[self] . identifier[_code] , identifier[self] . identifier[_imports] , identifier[table_name] = identifier[table_name] , identifier[append] = identifier[append] , identifier[overwrite] = identifier[overwrite] , identifier[use_cache] = identifier[use_cache] , identifier[batch] = identifier[batch] , identifier[allow_large_results] = identifier[allow_large_results] , identifier[table_definitions] = identifier[self] . identifier[_external_tables] , identifier[dialect] = identifier[dialect] , identifier[billing_tier] = identifier[billing_tier] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[raise] identifier[e] keyword[if] literal[string] keyword[not] keyword[in] identifier[query_result] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[job_id] = identifier[query_result] [ literal[string] ][ literal[string] ] keyword[if] keyword[not] identifier[table_name] : keyword[try] : identifier[destination] = identifier[query_result] [ literal[string] ][ literal[string] ][ literal[string] ] identifier[table_name] =( identifier[destination] [ literal[string] ], identifier[destination] [ literal[string] ], identifier[destination] [ literal[string] ]) keyword[except] identifier[KeyError] : keyword[raise] identifier[Exception] ( identifier[_utils] . identifier[format_query_errors] ( identifier[query_result] [ literal[string] ][ literal[string] ])) keyword[return] identifier[_query_job] . identifier[QueryJob] ( identifier[job_id] , identifier[table_name] , identifier[self] . identifier[_sql] , identifier[context] = identifier[self] . identifier[_context] )
def execute_async(self, table_name=None, table_mode='create', use_cache=True, priority='interactive', allow_large_results=False, dialect=None, billing_tier=None): """ Initiate the query and return a QueryJob. Args: table_name: the result table name as a string or TableName; if None (the default), then a temporary table will be used. table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request will fail if the table exists. use_cache: whether to use past query results or ignore cache. Has no effect if destination is specified (default True). priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much as three hours but are not rate-limited. allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is slower and requires a table_name to be specified) (default False). dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryJob. Raises: Exception if query could not be executed. """ batch = priority == 'low' append = table_mode == 'append' overwrite = table_mode == 'overwrite' if table_name is not None: table_name = _utils.parse_table_name(table_name, self._api.project_id) # depends on [control=['if'], data=['table_name']] try: query_result = self._api.jobs_insert_query(self._sql, self._code, self._imports, table_name=table_name, append=append, overwrite=overwrite, use_cache=use_cache, batch=batch, allow_large_results=allow_large_results, table_definitions=self._external_tables, dialect=dialect, billing_tier=billing_tier) # depends on [control=['try'], data=[]] except Exception as e: raise e # depends on [control=['except'], data=['e']] if 'jobReference' not in query_result: raise Exception('Unexpected response from server') # depends on [control=['if'], data=[]] job_id = query_result['jobReference']['jobId'] if not table_name: try: destination = query_result['configuration']['query']['destinationTable'] table_name = (destination['projectId'], destination['datasetId'], destination['tableId']) # depends on [control=['try'], data=[]] except KeyError: # The query was in error raise Exception(_utils.format_query_errors(query_result['status']['errors'])) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] return _query_job.QueryJob(job_id, table_name, self._sql, context=self._context)
def set_cache(self, request, response): """ caches the response supresses and logs exceptions""" try: cache_key = self.cache_key(request) #presumably this is to deal with requests with attr functions that won't pickle if hasattr(response, 'render') and callable(response.render): response.add_post_render_callback(lambda r: cache.set(cache_key, (r, time.time(),), settings.BETTERCACHE_LOCAL_MAXAGE)) else: cache.set(cache_key, (response, time.time(),) , settings.BETTERCACHE_LOCAL_MAXAGE) except: logger.error("failed to cache to %s" %cache_key)
def function[set_cache, parameter[self, request, response]]: constant[ caches the response supresses and logs exceptions] <ast.Try object at 0x7da1b1435210>
keyword[def] identifier[set_cache] ( identifier[self] , identifier[request] , identifier[response] ): literal[string] keyword[try] : identifier[cache_key] = identifier[self] . identifier[cache_key] ( identifier[request] ) keyword[if] identifier[hasattr] ( identifier[response] , literal[string] ) keyword[and] identifier[callable] ( identifier[response] . identifier[render] ): identifier[response] . identifier[add_post_render_callback] ( keyword[lambda] identifier[r] : identifier[cache] . identifier[set] ( identifier[cache_key] ,( identifier[r] , identifier[time] . identifier[time] (),), identifier[settings] . identifier[BETTERCACHE_LOCAL_MAXAGE] )) keyword[else] : identifier[cache] . identifier[set] ( identifier[cache_key] ,( identifier[response] , identifier[time] . identifier[time] (),), identifier[settings] . identifier[BETTERCACHE_LOCAL_MAXAGE] ) keyword[except] : identifier[logger] . identifier[error] ( literal[string] % identifier[cache_key] )
def set_cache(self, request, response): """ caches the response supresses and logs exceptions""" try: cache_key = self.cache_key(request) #presumably this is to deal with requests with attr functions that won't pickle if hasattr(response, 'render') and callable(response.render): response.add_post_render_callback(lambda r: cache.set(cache_key, (r, time.time()), settings.BETTERCACHE_LOCAL_MAXAGE)) # depends on [control=['if'], data=[]] else: cache.set(cache_key, (response, time.time()), settings.BETTERCACHE_LOCAL_MAXAGE) # depends on [control=['try'], data=[]] except: logger.error('failed to cache to %s' % cache_key) # depends on [control=['except'], data=[]]
def _create_descriptor_from_property_definition(self, class_name, property_definition, class_name_to_definition): """Return a PropertyDescriptor corresponding to the given OrientDB property definition.""" name = property_definition['name'] type_id = property_definition['type'] linked_class = property_definition.get('linkedClass', None) linked_type = property_definition.get('linkedType', None) qualifier = None validate_supported_property_type_id(name, type_id) if type_id == PROPERTY_TYPE_LINK_ID: if class_name not in self._edge_class_names: raise AssertionError(u'Found a property of type Link on a non-edge class: ' u'{} {}'.format(name, class_name)) if name not in {EDGE_SOURCE_PROPERTY_NAME, EDGE_DESTINATION_PROPERTY_NAME}: raise AssertionError(u'Found a property of type Link with an unexpected name: ' u'{} {}'.format(name, class_name)) if linked_class is None: raise AssertionError(u'Property "{}" is declared with type Link but has no ' u'linked class: {}'.format(name, property_definition)) if linked_class not in self._vertex_class_names: is_linked_class_abstract = class_name_to_definition[linked_class]['abstract'] all_subclasses_are_vertices = True for subclass in self._subclass_sets[linked_class]: if subclass != linked_class and subclass not in self.vertex_class_names: all_subclasses_are_vertices = False break if not (is_linked_class_abstract and all_subclasses_are_vertices): raise AssertionError(u'Property "{}" is declared as a Link to class {}, but ' u'that class is neither a vertex nor is it an ' u'abstract class whose subclasses are all vertices!' .format(name, linked_class)) qualifier = linked_class elif type_id in COLLECTION_PROPERTY_TYPES: if linked_class is not None and linked_type is not None: raise AssertionError(u'Property "{}" unexpectedly has both a linked class and ' u'a linked type: {}'.format(name, property_definition)) elif linked_type is not None and linked_class is None: # No linked class, must be a linked native OrientDB type. validate_supported_property_type_id(name + ' inner type', linked_type) qualifier = linked_type elif linked_class is not None and linked_type is None: # No linked type, must be a linked non-graph user-defined type. if linked_class not in self._non_graph_class_names: raise AssertionError(u'Property "{}" is declared as the inner type of ' u'an embedded collection, but is not a non-graph class: ' u'{}'.format(name, linked_class)) qualifier = linked_class else: raise AssertionError(u'Property "{}" is an embedded collection but has ' u'neither a linked class nor a linked type: ' u'{}'.format(name, property_definition)) default_value = None default_value_string = property_definition.get('defaultValue', None) if default_value_string is not None: default_value = parse_default_property_value(name, type_id, default_value_string) descriptor = PropertyDescriptor(type_id=type_id, qualifier=qualifier, default=default_value) # Sanity-check the descriptor before returning it. _validate_collections_have_default_values(class_name, name, descriptor) return descriptor
def function[_create_descriptor_from_property_definition, parameter[self, class_name, property_definition, class_name_to_definition]]: constant[Return a PropertyDescriptor corresponding to the given OrientDB property definition.] variable[name] assign[=] call[name[property_definition]][constant[name]] variable[type_id] assign[=] call[name[property_definition]][constant[type]] variable[linked_class] assign[=] call[name[property_definition].get, parameter[constant[linkedClass], constant[None]]] variable[linked_type] assign[=] call[name[property_definition].get, parameter[constant[linkedType], constant[None]]] variable[qualifier] assign[=] constant[None] call[name[validate_supported_property_type_id], parameter[name[name], name[type_id]]] if compare[name[type_id] equal[==] name[PROPERTY_TYPE_LINK_ID]] begin[:] if compare[name[class_name] <ast.NotIn object at 0x7da2590d7190> name[self]._edge_class_names] begin[:] <ast.Raise object at 0x7da1b170f430> if compare[name[name] <ast.NotIn object at 0x7da2590d7190> <ast.Set object at 0x7da1b170c430>] begin[:] <ast.Raise object at 0x7da1b170ee90> if compare[name[linked_class] is constant[None]] begin[:] <ast.Raise object at 0x7da1b170ce20> if compare[name[linked_class] <ast.NotIn object at 0x7da2590d7190> name[self]._vertex_class_names] begin[:] variable[is_linked_class_abstract] assign[=] call[call[name[class_name_to_definition]][name[linked_class]]][constant[abstract]] variable[all_subclasses_are_vertices] assign[=] constant[True] for taget[name[subclass]] in starred[call[name[self]._subclass_sets][name[linked_class]]] begin[:] if <ast.BoolOp object at 0x7da1b16b6050> begin[:] variable[all_subclasses_are_vertices] assign[=] constant[False] break if <ast.UnaryOp object at 0x7da1b16b6020> begin[:] <ast.Raise object at 0x7da1b16b5f00> variable[qualifier] assign[=] name[linked_class] variable[default_value] assign[=] constant[None] variable[default_value_string] assign[=] call[name[property_definition].get, parameter[constant[defaultValue], constant[None]]] if compare[name[default_value_string] is_not constant[None]] begin[:] variable[default_value] assign[=] call[name[parse_default_property_value], parameter[name[name], name[type_id], name[default_value_string]]] variable[descriptor] assign[=] call[name[PropertyDescriptor], parameter[]] call[name[_validate_collections_have_default_values], parameter[name[class_name], name[name], name[descriptor]]] return[name[descriptor]]
keyword[def] identifier[_create_descriptor_from_property_definition] ( identifier[self] , identifier[class_name] , identifier[property_definition] , identifier[class_name_to_definition] ): literal[string] identifier[name] = identifier[property_definition] [ literal[string] ] identifier[type_id] = identifier[property_definition] [ literal[string] ] identifier[linked_class] = identifier[property_definition] . identifier[get] ( literal[string] , keyword[None] ) identifier[linked_type] = identifier[property_definition] . identifier[get] ( literal[string] , keyword[None] ) identifier[qualifier] = keyword[None] identifier[validate_supported_property_type_id] ( identifier[name] , identifier[type_id] ) keyword[if] identifier[type_id] == identifier[PROPERTY_TYPE_LINK_ID] : keyword[if] identifier[class_name] keyword[not] keyword[in] identifier[self] . identifier[_edge_class_names] : keyword[raise] identifier[AssertionError] ( literal[string] literal[string] . identifier[format] ( identifier[name] , identifier[class_name] )) keyword[if] identifier[name] keyword[not] keyword[in] { identifier[EDGE_SOURCE_PROPERTY_NAME] , identifier[EDGE_DESTINATION_PROPERTY_NAME] }: keyword[raise] identifier[AssertionError] ( literal[string] literal[string] . identifier[format] ( identifier[name] , identifier[class_name] )) keyword[if] identifier[linked_class] keyword[is] keyword[None] : keyword[raise] identifier[AssertionError] ( literal[string] literal[string] . identifier[format] ( identifier[name] , identifier[property_definition] )) keyword[if] identifier[linked_class] keyword[not] keyword[in] identifier[self] . identifier[_vertex_class_names] : identifier[is_linked_class_abstract] = identifier[class_name_to_definition] [ identifier[linked_class] ][ literal[string] ] identifier[all_subclasses_are_vertices] = keyword[True] keyword[for] identifier[subclass] keyword[in] identifier[self] . identifier[_subclass_sets] [ identifier[linked_class] ]: keyword[if] identifier[subclass] != identifier[linked_class] keyword[and] identifier[subclass] keyword[not] keyword[in] identifier[self] . identifier[vertex_class_names] : identifier[all_subclasses_are_vertices] = keyword[False] keyword[break] keyword[if] keyword[not] ( identifier[is_linked_class_abstract] keyword[and] identifier[all_subclasses_are_vertices] ): keyword[raise] identifier[AssertionError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[name] , identifier[linked_class] )) identifier[qualifier] = identifier[linked_class] keyword[elif] identifier[type_id] keyword[in] identifier[COLLECTION_PROPERTY_TYPES] : keyword[if] identifier[linked_class] keyword[is] keyword[not] keyword[None] keyword[and] identifier[linked_type] keyword[is] keyword[not] keyword[None] : keyword[raise] identifier[AssertionError] ( literal[string] literal[string] . identifier[format] ( identifier[name] , identifier[property_definition] )) keyword[elif] identifier[linked_type] keyword[is] keyword[not] keyword[None] keyword[and] identifier[linked_class] keyword[is] keyword[None] : identifier[validate_supported_property_type_id] ( identifier[name] + literal[string] , identifier[linked_type] ) identifier[qualifier] = identifier[linked_type] keyword[elif] identifier[linked_class] keyword[is] keyword[not] keyword[None] keyword[and] identifier[linked_type] keyword[is] keyword[None] : keyword[if] identifier[linked_class] keyword[not] keyword[in] identifier[self] . identifier[_non_graph_class_names] : keyword[raise] identifier[AssertionError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[name] , identifier[linked_class] )) identifier[qualifier] = identifier[linked_class] keyword[else] : keyword[raise] identifier[AssertionError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[name] , identifier[property_definition] )) identifier[default_value] = keyword[None] identifier[default_value_string] = identifier[property_definition] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[default_value_string] keyword[is] keyword[not] keyword[None] : identifier[default_value] = identifier[parse_default_property_value] ( identifier[name] , identifier[type_id] , identifier[default_value_string] ) identifier[descriptor] = identifier[PropertyDescriptor] ( identifier[type_id] = identifier[type_id] , identifier[qualifier] = identifier[qualifier] , identifier[default] = identifier[default_value] ) identifier[_validate_collections_have_default_values] ( identifier[class_name] , identifier[name] , identifier[descriptor] ) keyword[return] identifier[descriptor]
def _create_descriptor_from_property_definition(self, class_name, property_definition, class_name_to_definition): """Return a PropertyDescriptor corresponding to the given OrientDB property definition.""" name = property_definition['name'] type_id = property_definition['type'] linked_class = property_definition.get('linkedClass', None) linked_type = property_definition.get('linkedType', None) qualifier = None validate_supported_property_type_id(name, type_id) if type_id == PROPERTY_TYPE_LINK_ID: if class_name not in self._edge_class_names: raise AssertionError(u'Found a property of type Link on a non-edge class: {} {}'.format(name, class_name)) # depends on [control=['if'], data=['class_name']] if name not in {EDGE_SOURCE_PROPERTY_NAME, EDGE_DESTINATION_PROPERTY_NAME}: raise AssertionError(u'Found a property of type Link with an unexpected name: {} {}'.format(name, class_name)) # depends on [control=['if'], data=['name']] if linked_class is None: raise AssertionError(u'Property "{}" is declared with type Link but has no linked class: {}'.format(name, property_definition)) # depends on [control=['if'], data=[]] if linked_class not in self._vertex_class_names: is_linked_class_abstract = class_name_to_definition[linked_class]['abstract'] all_subclasses_are_vertices = True for subclass in self._subclass_sets[linked_class]: if subclass != linked_class and subclass not in self.vertex_class_names: all_subclasses_are_vertices = False break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['subclass']] if not (is_linked_class_abstract and all_subclasses_are_vertices): raise AssertionError(u'Property "{}" is declared as a Link to class {}, but that class is neither a vertex nor is it an abstract class whose subclasses are all vertices!'.format(name, linked_class)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['linked_class']] qualifier = linked_class # depends on [control=['if'], data=[]] elif type_id in COLLECTION_PROPERTY_TYPES: if linked_class is not None and linked_type is not None: raise AssertionError(u'Property "{}" unexpectedly has both a linked class and a linked type: {}'.format(name, property_definition)) # depends on [control=['if'], data=[]] elif linked_type is not None and linked_class is None: # No linked class, must be a linked native OrientDB type. validate_supported_property_type_id(name + ' inner type', linked_type) qualifier = linked_type # depends on [control=['if'], data=[]] elif linked_class is not None and linked_type is None: # No linked type, must be a linked non-graph user-defined type. if linked_class not in self._non_graph_class_names: raise AssertionError(u'Property "{}" is declared as the inner type of an embedded collection, but is not a non-graph class: {}'.format(name, linked_class)) # depends on [control=['if'], data=['linked_class']] qualifier = linked_class # depends on [control=['if'], data=[]] else: raise AssertionError(u'Property "{}" is an embedded collection but has neither a linked class nor a linked type: {}'.format(name, property_definition)) # depends on [control=['if'], data=[]] default_value = None default_value_string = property_definition.get('defaultValue', None) if default_value_string is not None: default_value = parse_default_property_value(name, type_id, default_value_string) # depends on [control=['if'], data=['default_value_string']] descriptor = PropertyDescriptor(type_id=type_id, qualifier=qualifier, default=default_value) # Sanity-check the descriptor before returning it. _validate_collections_have_default_values(class_name, name, descriptor) return descriptor
def is_retroflex(c,lang): """ Is the character a retroflex """ o=get_offset(c,lang) return (o>=RETROFLEX_RANGE[0] and o<=RETROFLEX_RANGE[1])
def function[is_retroflex, parameter[c, lang]]: constant[ Is the character a retroflex ] variable[o] assign[=] call[name[get_offset], parameter[name[c], name[lang]]] return[<ast.BoolOp object at 0x7da1b26aeef0>]
keyword[def] identifier[is_retroflex] ( identifier[c] , identifier[lang] ): literal[string] identifier[o] = identifier[get_offset] ( identifier[c] , identifier[lang] ) keyword[return] ( identifier[o] >= identifier[RETROFLEX_RANGE] [ literal[int] ] keyword[and] identifier[o] <= identifier[RETROFLEX_RANGE] [ literal[int] ])
def is_retroflex(c, lang): """ Is the character a retroflex """ o = get_offset(c, lang) return o >= RETROFLEX_RANGE[0] and o <= RETROFLEX_RANGE[1]
def query_all_issues(after): """Hits the github API for all closed issues after the given date, returns the data.""" page = count(1) data = [] while True: page_data = query_issues(next(page), after) if not page_data: break data.extend(page_data) return data
def function[query_all_issues, parameter[after]]: constant[Hits the github API for all closed issues after the given date, returns the data.] variable[page] assign[=] call[name[count], parameter[constant[1]]] variable[data] assign[=] list[[]] while constant[True] begin[:] variable[page_data] assign[=] call[name[query_issues], parameter[call[name[next], parameter[name[page]]], name[after]]] if <ast.UnaryOp object at 0x7da20e9552a0> begin[:] break call[name[data].extend, parameter[name[page_data]]] return[name[data]]
keyword[def] identifier[query_all_issues] ( identifier[after] ): literal[string] identifier[page] = identifier[count] ( literal[int] ) identifier[data] =[] keyword[while] keyword[True] : identifier[page_data] = identifier[query_issues] ( identifier[next] ( identifier[page] ), identifier[after] ) keyword[if] keyword[not] identifier[page_data] : keyword[break] identifier[data] . identifier[extend] ( identifier[page_data] ) keyword[return] identifier[data]
def query_all_issues(after): """Hits the github API for all closed issues after the given date, returns the data.""" page = count(1) data = [] while True: page_data = query_issues(next(page), after) if not page_data: break # depends on [control=['if'], data=[]] data.extend(page_data) # depends on [control=['while'], data=[]] return data
def round_uncertainty(unc, method="publication"): """ Rounds an uncertainty *unc* following a specific *method* and returns a 2-tuple containing the significant digits as a string, and the decimal magnitude that is required to recover the uncertainty. *unc* might also be a numpy array. Rounding methods: - ``"pdg"``: Rounding rules as defined by the `PDG <http://pdg.lbl.gov/2011/reviews/rpp2011-rev-rpp-intro.pdf#page=13>`_. - ``"publication"``, ``"pub``: Like ``"pdg"`` with an extra significant digit for results that need to be combined later. - ``"onedigit"``, ``"one"``: Forces one single significant digit. This is useful when there are multiple uncertainties that vary by more than a factor 10 among themselves. Example: .. code-block:: python round_uncertainty(0.123, "pub") # -> ("123", -3) round_uncertainty(0.123, "pdg") # -> ("12", -2) round_uncertainty(0.123, "one") # -> ("1", -1) round_uncertainty(0.456, "pub") # -> ("46", -2) round_uncertainty(0.456, "pdg") # -> ("5", -1) round_uncertainty(0.456, "one") # -> ("5", -1) round_uncertainty(0.987, "pub") # -> ("987", -3) round_uncertainty(0.987, "pdg") # -> ("10", -1) round_uncertainty(0.987, "one") # -> ("10", -1) a = np.array([0.123, 0.456, 0.987]) round_uncertainty(a, "pub") # -> (["123", "46", "987"], [-3, -2, -3]) """ # validate the method meth = method.lower() if meth not in ("pub", "publication", "pdg", "one", "onedigit"): raise ValueError("unknown rounding method: {}".format(method)) # split the uncertainty sig, mag = split_value(unc) # infer the precision based on the method and get updated significand and magnitude if not is_numpy(unc): prec, sig, mag = _infer_precision(unc, sig, mag, meth) replace_args = (".", "") else: prec = np.ones(unc.shape).astype(np.int) for p, u, s, m in np.nditer([prec, unc, sig, mag], op_flags=["readwrite"]): p[...], s[...], m[...] = _infer_precision(u, s, m, meth) replace_args = (b".", b"") # determine the significant digits and the decimal magnitude that would reconstruct the value digits = match_precision(sig, 10.**(1 - prec)).replace(*replace_args) mag -= prec - 1 return (digits, mag)
def function[round_uncertainty, parameter[unc, method]]: constant[ Rounds an uncertainty *unc* following a specific *method* and returns a 2-tuple containing the significant digits as a string, and the decimal magnitude that is required to recover the uncertainty. *unc* might also be a numpy array. Rounding methods: - ``"pdg"``: Rounding rules as defined by the `PDG <http://pdg.lbl.gov/2011/reviews/rpp2011-rev-rpp-intro.pdf#page=13>`_. - ``"publication"``, ``"pub``: Like ``"pdg"`` with an extra significant digit for results that need to be combined later. - ``"onedigit"``, ``"one"``: Forces one single significant digit. This is useful when there are multiple uncertainties that vary by more than a factor 10 among themselves. Example: .. code-block:: python round_uncertainty(0.123, "pub") # -> ("123", -3) round_uncertainty(0.123, "pdg") # -> ("12", -2) round_uncertainty(0.123, "one") # -> ("1", -1) round_uncertainty(0.456, "pub") # -> ("46", -2) round_uncertainty(0.456, "pdg") # -> ("5", -1) round_uncertainty(0.456, "one") # -> ("5", -1) round_uncertainty(0.987, "pub") # -> ("987", -3) round_uncertainty(0.987, "pdg") # -> ("10", -1) round_uncertainty(0.987, "one") # -> ("10", -1) a = np.array([0.123, 0.456, 0.987]) round_uncertainty(a, "pub") # -> (["123", "46", "987"], [-3, -2, -3]) ] variable[meth] assign[=] call[name[method].lower, parameter[]] if compare[name[meth] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b1f20a90>, <ast.Constant object at 0x7da1b1f21240>, <ast.Constant object at 0x7da1b1f20220>, <ast.Constant object at 0x7da1b1f21600>, <ast.Constant object at 0x7da1b1f20640>]]] begin[:] <ast.Raise object at 0x7da1b1f20850> <ast.Tuple object at 0x7da1b1f21000> assign[=] call[name[split_value], parameter[name[unc]]] if <ast.UnaryOp object at 0x7da1b1f20d60> begin[:] <ast.Tuple object at 0x7da1b1f214b0> assign[=] call[name[_infer_precision], parameter[name[unc], name[sig], name[mag], name[meth]]] variable[replace_args] assign[=] tuple[[<ast.Constant object at 0x7da1b1f20370>, <ast.Constant object at 0x7da1b1f20e50>]] variable[digits] assign[=] call[call[name[match_precision], parameter[name[sig], binary_operation[constant[10.0] ** binary_operation[constant[1] - name[prec]]]]].replace, parameter[<ast.Starred object at 0x7da1b1fa8040>]] <ast.AugAssign object at 0x7da1b1fa9e40> return[tuple[[<ast.Name object at 0x7da1b1fabc40>, <ast.Name object at 0x7da1b1ff8a60>]]]
keyword[def] identifier[round_uncertainty] ( identifier[unc] , identifier[method] = literal[string] ): literal[string] identifier[meth] = identifier[method] . identifier[lower] () keyword[if] identifier[meth] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[method] )) identifier[sig] , identifier[mag] = identifier[split_value] ( identifier[unc] ) keyword[if] keyword[not] identifier[is_numpy] ( identifier[unc] ): identifier[prec] , identifier[sig] , identifier[mag] = identifier[_infer_precision] ( identifier[unc] , identifier[sig] , identifier[mag] , identifier[meth] ) identifier[replace_args] =( literal[string] , literal[string] ) keyword[else] : identifier[prec] = identifier[np] . identifier[ones] ( identifier[unc] . identifier[shape] ). identifier[astype] ( identifier[np] . identifier[int] ) keyword[for] identifier[p] , identifier[u] , identifier[s] , identifier[m] keyword[in] identifier[np] . identifier[nditer] ([ identifier[prec] , identifier[unc] , identifier[sig] , identifier[mag] ], identifier[op_flags] =[ literal[string] ]): identifier[p] [...], identifier[s] [...], identifier[m] [...]= identifier[_infer_precision] ( identifier[u] , identifier[s] , identifier[m] , identifier[meth] ) identifier[replace_args] =( literal[string] , literal[string] ) identifier[digits] = identifier[match_precision] ( identifier[sig] , literal[int] **( literal[int] - identifier[prec] )). identifier[replace] (* identifier[replace_args] ) identifier[mag] -= identifier[prec] - literal[int] keyword[return] ( identifier[digits] , identifier[mag] )
def round_uncertainty(unc, method='publication'): """ Rounds an uncertainty *unc* following a specific *method* and returns a 2-tuple containing the significant digits as a string, and the decimal magnitude that is required to recover the uncertainty. *unc* might also be a numpy array. Rounding methods: - ``"pdg"``: Rounding rules as defined by the `PDG <http://pdg.lbl.gov/2011/reviews/rpp2011-rev-rpp-intro.pdf#page=13>`_. - ``"publication"``, ``"pub``: Like ``"pdg"`` with an extra significant digit for results that need to be combined later. - ``"onedigit"``, ``"one"``: Forces one single significant digit. This is useful when there are multiple uncertainties that vary by more than a factor 10 among themselves. Example: .. code-block:: python round_uncertainty(0.123, "pub") # -> ("123", -3) round_uncertainty(0.123, "pdg") # -> ("12", -2) round_uncertainty(0.123, "one") # -> ("1", -1) round_uncertainty(0.456, "pub") # -> ("46", -2) round_uncertainty(0.456, "pdg") # -> ("5", -1) round_uncertainty(0.456, "one") # -> ("5", -1) round_uncertainty(0.987, "pub") # -> ("987", -3) round_uncertainty(0.987, "pdg") # -> ("10", -1) round_uncertainty(0.987, "one") # -> ("10", -1) a = np.array([0.123, 0.456, 0.987]) round_uncertainty(a, "pub") # -> (["123", "46", "987"], [-3, -2, -3]) """ # validate the method meth = method.lower() if meth not in ('pub', 'publication', 'pdg', 'one', 'onedigit'): raise ValueError('unknown rounding method: {}'.format(method)) # depends on [control=['if'], data=[]] # split the uncertainty (sig, mag) = split_value(unc) # infer the precision based on the method and get updated significand and magnitude if not is_numpy(unc): (prec, sig, mag) = _infer_precision(unc, sig, mag, meth) replace_args = ('.', '') # depends on [control=['if'], data=[]] else: prec = np.ones(unc.shape).astype(np.int) for (p, u, s, m) in np.nditer([prec, unc, sig, mag], op_flags=['readwrite']): (p[...], s[...], m[...]) = _infer_precision(u, s, m, meth) # depends on [control=['for'], data=[]] replace_args = (b'.', b'') # determine the significant digits and the decimal magnitude that would reconstruct the value digits = match_precision(sig, 10.0 ** (1 - prec)).replace(*replace_args) mag -= prec - 1 return (digits, mag)
def _refresh(self): """Background refreshing thread.""" while not self._stopevent.isSet(): line = self._serial.readline() #this is for python2/python3 compatibility. Is there a better way? try: line = line.encode().decode('utf-8') except AttributeError: line = line.decode('utf-8') if LaCrosseSensor.re_reading.match(line): sensor = LaCrosseSensor(line) self.sensors[sensor.sensorid] = sensor if self._callback: self._callback(sensor, self._callback_data) if sensor.sensorid in self._registry: for cbs in self._registry[sensor.sensorid]: cbs[0](sensor, cbs[1])
def function[_refresh, parameter[self]]: constant[Background refreshing thread.] while <ast.UnaryOp object at 0x7da207f01f60> begin[:] variable[line] assign[=] call[name[self]._serial.readline, parameter[]] <ast.Try object at 0x7da207f01630> if call[name[LaCrosseSensor].re_reading.match, parameter[name[line]]] begin[:] variable[sensor] assign[=] call[name[LaCrosseSensor], parameter[name[line]]] call[name[self].sensors][name[sensor].sensorid] assign[=] name[sensor] if name[self]._callback begin[:] call[name[self]._callback, parameter[name[sensor], name[self]._callback_data]] if compare[name[sensor].sensorid in name[self]._registry] begin[:] for taget[name[cbs]] in starred[call[name[self]._registry][name[sensor].sensorid]] begin[:] call[call[name[cbs]][constant[0]], parameter[name[sensor], call[name[cbs]][constant[1]]]]
keyword[def] identifier[_refresh] ( identifier[self] ): literal[string] keyword[while] keyword[not] identifier[self] . identifier[_stopevent] . identifier[isSet] (): identifier[line] = identifier[self] . identifier[_serial] . identifier[readline] () keyword[try] : identifier[line] = identifier[line] . identifier[encode] (). identifier[decode] ( literal[string] ) keyword[except] identifier[AttributeError] : identifier[line] = identifier[line] . identifier[decode] ( literal[string] ) keyword[if] identifier[LaCrosseSensor] . identifier[re_reading] . identifier[match] ( identifier[line] ): identifier[sensor] = identifier[LaCrosseSensor] ( identifier[line] ) identifier[self] . identifier[sensors] [ identifier[sensor] . identifier[sensorid] ]= identifier[sensor] keyword[if] identifier[self] . identifier[_callback] : identifier[self] . identifier[_callback] ( identifier[sensor] , identifier[self] . identifier[_callback_data] ) keyword[if] identifier[sensor] . identifier[sensorid] keyword[in] identifier[self] . identifier[_registry] : keyword[for] identifier[cbs] keyword[in] identifier[self] . identifier[_registry] [ identifier[sensor] . identifier[sensorid] ]: identifier[cbs] [ literal[int] ]( identifier[sensor] , identifier[cbs] [ literal[int] ])
def _refresh(self): """Background refreshing thread.""" while not self._stopevent.isSet(): line = self._serial.readline() #this is for python2/python3 compatibility. Is there a better way? try: line = line.encode().decode('utf-8') # depends on [control=['try'], data=[]] except AttributeError: line = line.decode('utf-8') # depends on [control=['except'], data=[]] if LaCrosseSensor.re_reading.match(line): sensor = LaCrosseSensor(line) self.sensors[sensor.sensorid] = sensor if self._callback: self._callback(sensor, self._callback_data) # depends on [control=['if'], data=[]] if sensor.sensorid in self._registry: for cbs in self._registry[sensor.sensorid]: cbs[0](sensor, cbs[1]) # depends on [control=['for'], data=['cbs']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def ris(self): """Bibliographic entry in RIS (Research Information System Format) format. Returns ------- ris : str The RIS string representing an item. Raises ------ ValueError : If the item's aggregationType is not Journal. """ if self.aggregationType != 'Journal': raise ValueError('Only Journal articles supported.') template = u'''TY - JOUR TI - {title} JO - {journal} VL - {volume} DA - {date} SP - {pages} PY - {year} DO - {doi} UR - https://doi.org/{doi} ''' ris = template.format( title=self.title, journal=self.publicationName, volume=self.volume, date=self.coverDate, pages=self.pageRange, year=self.coverDate[0:4], doi=self.doi) for au in self.authors: ris += 'AU - {}\n'.format(au.indexed_name) if self.issueIdentifier is not None: ris += 'IS - {}\n'.format(self.issueIdentifier) ris += 'ER - \n\n' return ris
def function[ris, parameter[self]]: constant[Bibliographic entry in RIS (Research Information System Format) format. Returns ------- ris : str The RIS string representing an item. Raises ------ ValueError : If the item's aggregationType is not Journal. ] if compare[name[self].aggregationType not_equal[!=] constant[Journal]] begin[:] <ast.Raise object at 0x7da18ede4b50> variable[template] assign[=] constant[TY - JOUR TI - {title} JO - {journal} VL - {volume} DA - {date} SP - {pages} PY - {year} DO - {doi} UR - https://doi.org/{doi} ] variable[ris] assign[=] call[name[template].format, parameter[]] for taget[name[au]] in starred[name[self].authors] begin[:] <ast.AugAssign object at 0x7da18ede6b90> if compare[name[self].issueIdentifier is_not constant[None]] begin[:] <ast.AugAssign object at 0x7da18ede7f10> <ast.AugAssign object at 0x7da18ede4190> return[name[ris]]
keyword[def] identifier[ris] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[aggregationType] != literal[string] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[template] = literal[string] identifier[ris] = identifier[template] . identifier[format] ( identifier[title] = identifier[self] . identifier[title] , identifier[journal] = identifier[self] . identifier[publicationName] , identifier[volume] = identifier[self] . identifier[volume] , identifier[date] = identifier[self] . identifier[coverDate] , identifier[pages] = identifier[self] . identifier[pageRange] , identifier[year] = identifier[self] . identifier[coverDate] [ literal[int] : literal[int] ], identifier[doi] = identifier[self] . identifier[doi] ) keyword[for] identifier[au] keyword[in] identifier[self] . identifier[authors] : identifier[ris] += literal[string] . identifier[format] ( identifier[au] . identifier[indexed_name] ) keyword[if] identifier[self] . identifier[issueIdentifier] keyword[is] keyword[not] keyword[None] : identifier[ris] += literal[string] . identifier[format] ( identifier[self] . identifier[issueIdentifier] ) identifier[ris] += literal[string] keyword[return] identifier[ris]
def ris(self): """Bibliographic entry in RIS (Research Information System Format) format. Returns ------- ris : str The RIS string representing an item. Raises ------ ValueError : If the item's aggregationType is not Journal. """ if self.aggregationType != 'Journal': raise ValueError('Only Journal articles supported.') # depends on [control=['if'], data=[]] template = u'TY - JOUR\nTI - {title}\nJO - {journal}\nVL - {volume}\nDA - {date}\nSP - {pages}\nPY - {year}\nDO - {doi}\nUR - https://doi.org/{doi}\n' ris = template.format(title=self.title, journal=self.publicationName, volume=self.volume, date=self.coverDate, pages=self.pageRange, year=self.coverDate[0:4], doi=self.doi) for au in self.authors: ris += 'AU - {}\n'.format(au.indexed_name) # depends on [control=['for'], data=['au']] if self.issueIdentifier is not None: ris += 'IS - {}\n'.format(self.issueIdentifier) # depends on [control=['if'], data=[]] ris += 'ER - \n\n' return ris
def set_mac_addr_adv_interval(self, name, vrid, value=None, disable=False, default=False, run=True): """Set the mac_addr_adv_interval property of the vrrp Args: name (string): The interface to configure. vrid (integer): The vrid number for the vrrp to be managed. value (integer): mac-address advertisement-interval value to assign to the vrrp. disable (boolean): Unset mac-address advertisement-interval if True. default (boolean): Set mac-address advertisement-interval to default if True. run (boolean): Set to True to execute the command, False to return a string with the formatted command. Returns: If run is True, returns True if the command executed successfully, error if failure. If run is False, returns the formatted command string which can be passed to the node """ if not default and not disable: if not int(value) or int(value) < 1 or int(value) > 3600: raise ValueError("vrrp property 'mac_addr_adv_interval' must " "be in the range 1-3600") cmd = self.command_builder('vrrp %d mac-address advertisement-interval' % vrid, value=value, default=default, disable=disable) # Run the command if requested if run: result = self.configure_interface(name, cmd) # And verify the command succeeded if result is False: return self.error return result # Otherwise return the formatted command return cmd
def function[set_mac_addr_adv_interval, parameter[self, name, vrid, value, disable, default, run]]: constant[Set the mac_addr_adv_interval property of the vrrp Args: name (string): The interface to configure. vrid (integer): The vrid number for the vrrp to be managed. value (integer): mac-address advertisement-interval value to assign to the vrrp. disable (boolean): Unset mac-address advertisement-interval if True. default (boolean): Set mac-address advertisement-interval to default if True. run (boolean): Set to True to execute the command, False to return a string with the formatted command. Returns: If run is True, returns True if the command executed successfully, error if failure. If run is False, returns the formatted command string which can be passed to the node ] if <ast.BoolOp object at 0x7da1b23ef8e0> begin[:] if <ast.BoolOp object at 0x7da1b23edbd0> begin[:] <ast.Raise object at 0x7da1b23eec80> variable[cmd] assign[=] call[name[self].command_builder, parameter[binary_operation[constant[vrrp %d mac-address advertisement-interval] <ast.Mod object at 0x7da2590d6920> name[vrid]]]] if name[run] begin[:] variable[result] assign[=] call[name[self].configure_interface, parameter[name[name], name[cmd]]] if compare[name[result] is constant[False]] begin[:] return[name[self].error] return[name[result]] return[name[cmd]]
keyword[def] identifier[set_mac_addr_adv_interval] ( identifier[self] , identifier[name] , identifier[vrid] , identifier[value] = keyword[None] , identifier[disable] = keyword[False] , identifier[default] = keyword[False] , identifier[run] = keyword[True] ): literal[string] keyword[if] keyword[not] identifier[default] keyword[and] keyword[not] identifier[disable] : keyword[if] keyword[not] identifier[int] ( identifier[value] ) keyword[or] identifier[int] ( identifier[value] )< literal[int] keyword[or] identifier[int] ( identifier[value] )> literal[int] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[cmd] = identifier[self] . identifier[command_builder] ( literal[string] % identifier[vrid] , identifier[value] = identifier[value] , identifier[default] = identifier[default] , identifier[disable] = identifier[disable] ) keyword[if] identifier[run] : identifier[result] = identifier[self] . identifier[configure_interface] ( identifier[name] , identifier[cmd] ) keyword[if] identifier[result] keyword[is] keyword[False] : keyword[return] identifier[self] . identifier[error] keyword[return] identifier[result] keyword[return] identifier[cmd]
def set_mac_addr_adv_interval(self, name, vrid, value=None, disable=False, default=False, run=True): """Set the mac_addr_adv_interval property of the vrrp Args: name (string): The interface to configure. vrid (integer): The vrid number for the vrrp to be managed. value (integer): mac-address advertisement-interval value to assign to the vrrp. disable (boolean): Unset mac-address advertisement-interval if True. default (boolean): Set mac-address advertisement-interval to default if True. run (boolean): Set to True to execute the command, False to return a string with the formatted command. Returns: If run is True, returns True if the command executed successfully, error if failure. If run is False, returns the formatted command string which can be passed to the node """ if not default and (not disable): if not int(value) or int(value) < 1 or int(value) > 3600: raise ValueError("vrrp property 'mac_addr_adv_interval' must be in the range 1-3600") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] cmd = self.command_builder('vrrp %d mac-address advertisement-interval' % vrid, value=value, default=default, disable=disable) # Run the command if requested if run: result = self.configure_interface(name, cmd) # And verify the command succeeded if result is False: return self.error # depends on [control=['if'], data=[]] return result # depends on [control=['if'], data=[]] # Otherwise return the formatted command return cmd
def getStatus(self): """ RDY - Ready Bit. This bit provides the status of the RDY flag from the part. The status and function of this bit is the same as the RDY output pin. A number of events set the RDY bit high as indicated in Table XVIII in datasheet STDY - Steady Bit. This bit is updated when the filter writes a result to the Data Register. If the filter is in FASTStep mode (see Filter Register section) and responding to a step input, the STDY bit remains high as the initial conversion results become available. The RDY output and bit are set low on these initial conversions to indicate that a result is available. If the STDY is high, however, it indicates that the result being provided is not from a fully settled second-stage FIR filter. When the FIR filter has fully settled, the STDY bit will go low coincident with RDY. If the part is never placed into its FASTStep mode, the STDY bit will go low at the first Data Register read and it is not cleared by subsequent Data Register reads. A number of events set the STDY bit high as indicated in Table XVIII. STDY is set high along with RDY by all events in the table except a Data Register read. STBY - Standby Bit. This bit indicates whether the AD7730 is in its Standby Mode or normal mode of operation. The part can be placed in its standby mode using the STANDBY input pin or by writing 011 to the MD2 to MD0 bits of the Mode Register. The power-on/reset status of this bit is 0 assuming the STANDBY pin is high. NOREF - No Reference Bit. If the voltage between the REF IN(+) and REF IN(-) pins is below 0.3 V, or either of these inputs is open-circuit, the NOREF bit goes to 1. If NOREF is active on completion of a conversion, the Data Register is loaded with all 1s. If NOREF is active on completion of a calibration, updating of the calibration registers is inhibited.""" status = self.single_read(self.AD7730_STATUS_REG) bits_values = dict([('NOREF',status[0] & 0x10 == 0x10), ('STBY',status[0] & 0x20 == 0x20), ('STDY',status[0] & 0x40 == 0x40), ('RDY',status[0] & 0x80 == 0x80)]) return bits_values
def function[getStatus, parameter[self]]: constant[ RDY - Ready Bit. This bit provides the status of the RDY flag from the part. The status and function of this bit is the same as the RDY output pin. A number of events set the RDY bit high as indicated in Table XVIII in datasheet STDY - Steady Bit. This bit is updated when the filter writes a result to the Data Register. If the filter is in FASTStep mode (see Filter Register section) and responding to a step input, the STDY bit remains high as the initial conversion results become available. The RDY output and bit are set low on these initial conversions to indicate that a result is available. If the STDY is high, however, it indicates that the result being provided is not from a fully settled second-stage FIR filter. When the FIR filter has fully settled, the STDY bit will go low coincident with RDY. If the part is never placed into its FASTStep mode, the STDY bit will go low at the first Data Register read and it is not cleared by subsequent Data Register reads. A number of events set the STDY bit high as indicated in Table XVIII. STDY is set high along with RDY by all events in the table except a Data Register read. STBY - Standby Bit. This bit indicates whether the AD7730 is in its Standby Mode or normal mode of operation. The part can be placed in its standby mode using the STANDBY input pin or by writing 011 to the MD2 to MD0 bits of the Mode Register. The power-on/reset status of this bit is 0 assuming the STANDBY pin is high. NOREF - No Reference Bit. If the voltage between the REF IN(+) and REF IN(-) pins is below 0.3 V, or either of these inputs is open-circuit, the NOREF bit goes to 1. If NOREF is active on completion of a conversion, the Data Register is loaded with all 1s. If NOREF is active on completion of a calibration, updating of the calibration registers is inhibited.] variable[status] assign[=] call[name[self].single_read, parameter[name[self].AD7730_STATUS_REG]] variable[bits_values] assign[=] call[name[dict], parameter[list[[<ast.Tuple object at 0x7da18f811000>, <ast.Tuple object at 0x7da18f812b30>, <ast.Tuple object at 0x7da18f813340>, <ast.Tuple object at 0x7da18f811870>]]]] return[name[bits_values]]
keyword[def] identifier[getStatus] ( identifier[self] ): literal[string] identifier[status] = identifier[self] . identifier[single_read] ( identifier[self] . identifier[AD7730_STATUS_REG] ) identifier[bits_values] = identifier[dict] ([( literal[string] , identifier[status] [ literal[int] ]& literal[int] == literal[int] ), ( literal[string] , identifier[status] [ literal[int] ]& literal[int] == literal[int] ), ( literal[string] , identifier[status] [ literal[int] ]& literal[int] == literal[int] ), ( literal[string] , identifier[status] [ literal[int] ]& literal[int] == literal[int] )]) keyword[return] identifier[bits_values]
def getStatus(self): """ RDY - Ready Bit. This bit provides the status of the RDY flag from the part. The status and function of this bit is the same as the RDY output pin. A number of events set the RDY bit high as indicated in Table XVIII in datasheet STDY - Steady Bit. This bit is updated when the filter writes a result to the Data Register. If the filter is in FASTStep mode (see Filter Register section) and responding to a step input, the STDY bit remains high as the initial conversion results become available. The RDY output and bit are set low on these initial conversions to indicate that a result is available. If the STDY is high, however, it indicates that the result being provided is not from a fully settled second-stage FIR filter. When the FIR filter has fully settled, the STDY bit will go low coincident with RDY. If the part is never placed into its FASTStep mode, the STDY bit will go low at the first Data Register read and it is not cleared by subsequent Data Register reads. A number of events set the STDY bit high as indicated in Table XVIII. STDY is set high along with RDY by all events in the table except a Data Register read. STBY - Standby Bit. This bit indicates whether the AD7730 is in its Standby Mode or normal mode of operation. The part can be placed in its standby mode using the STANDBY input pin or by writing 011 to the MD2 to MD0 bits of the Mode Register. The power-on/reset status of this bit is 0 assuming the STANDBY pin is high. NOREF - No Reference Bit. If the voltage between the REF IN(+) and REF IN(-) pins is below 0.3 V, or either of these inputs is open-circuit, the NOREF bit goes to 1. If NOREF is active on completion of a conversion, the Data Register is loaded with all 1s. If NOREF is active on completion of a calibration, updating of the calibration registers is inhibited.""" status = self.single_read(self.AD7730_STATUS_REG) bits_values = dict([('NOREF', status[0] & 16 == 16), ('STBY', status[0] & 32 == 32), ('STDY', status[0] & 64 == 64), ('RDY', status[0] & 128 == 128)]) return bits_values
def write_pem(text, path, overwrite=True, pem_type=None): ''' Writes out a PEM string fixing any formatting or whitespace issues before writing. text: PEM string input to be written out. path: Path of the file to write the pem out to. overwrite: If True(default), write_pem will overwrite the entire pem file. Set False to preserve existing private keys and dh params that may exist in the pem file. pem_type: The PEM type to be saved, for example ``CERTIFICATE`` or ``PUBLIC KEY``. Adding this will allow the function to take input that may contain multiple pem types. CLI Example: .. code-block:: bash salt '*' x509.write_pem "-----BEGIN CERTIFICATE-----MIIGMzCCBBugA..." path=/etc/pki/mycert.crt ''' with salt.utils.files.set_umask(0o077): text = get_pem_entry(text, pem_type=pem_type) _dhparams = '' _private_key = '' if pem_type and pem_type == 'CERTIFICATE' and os.path.isfile(path) and not overwrite: _filecontents = _text_or_file(path) try: _dhparams = get_pem_entry(_filecontents, 'DH PARAMETERS') except salt.exceptions.SaltInvocationError as err: log.debug("Error when getting DH PARAMETERS: %s", err) log.trace(err, exc_info=err) try: _private_key = get_pem_entry(_filecontents, '(?:RSA )?PRIVATE KEY') except salt.exceptions.SaltInvocationError as err: log.debug("Error when getting PRIVATE KEY: %s", err) log.trace(err, exc_info=err) with salt.utils.files.fopen(path, 'w') as _fp: if pem_type and pem_type == 'CERTIFICATE' and _private_key: _fp.write(salt.utils.stringutils.to_str(_private_key)) _fp.write(salt.utils.stringutils.to_str(text)) if pem_type and pem_type == 'CERTIFICATE' and _dhparams: _fp.write(salt.utils.stringutils.to_str(_dhparams)) return 'PEM written to {0}'.format(path)
def function[write_pem, parameter[text, path, overwrite, pem_type]]: constant[ Writes out a PEM string fixing any formatting or whitespace issues before writing. text: PEM string input to be written out. path: Path of the file to write the pem out to. overwrite: If True(default), write_pem will overwrite the entire pem file. Set False to preserve existing private keys and dh params that may exist in the pem file. pem_type: The PEM type to be saved, for example ``CERTIFICATE`` or ``PUBLIC KEY``. Adding this will allow the function to take input that may contain multiple pem types. CLI Example: .. code-block:: bash salt '*' x509.write_pem "-----BEGIN CERTIFICATE-----MIIGMzCCBBugA..." path=/etc/pki/mycert.crt ] with call[name[salt].utils.files.set_umask, parameter[constant[63]]] begin[:] variable[text] assign[=] call[name[get_pem_entry], parameter[name[text]]] variable[_dhparams] assign[=] constant[] variable[_private_key] assign[=] constant[] if <ast.BoolOp object at 0x7da1b21f28f0> begin[:] variable[_filecontents] assign[=] call[name[_text_or_file], parameter[name[path]]] <ast.Try object at 0x7da1b21f1f90> <ast.Try object at 0x7da1b21f16c0> with call[name[salt].utils.files.fopen, parameter[name[path], constant[w]]] begin[:] if <ast.BoolOp object at 0x7da207f9a500> begin[:] call[name[_fp].write, parameter[call[name[salt].utils.stringutils.to_str, parameter[name[_private_key]]]]] call[name[_fp].write, parameter[call[name[salt].utils.stringutils.to_str, parameter[name[text]]]]] if <ast.BoolOp object at 0x7da207f987c0> begin[:] call[name[_fp].write, parameter[call[name[salt].utils.stringutils.to_str, parameter[name[_dhparams]]]]] return[call[constant[PEM written to {0}].format, parameter[name[path]]]]
keyword[def] identifier[write_pem] ( identifier[text] , identifier[path] , identifier[overwrite] = keyword[True] , identifier[pem_type] = keyword[None] ): literal[string] keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[set_umask] ( literal[int] ): identifier[text] = identifier[get_pem_entry] ( identifier[text] , identifier[pem_type] = identifier[pem_type] ) identifier[_dhparams] = literal[string] identifier[_private_key] = literal[string] keyword[if] identifier[pem_type] keyword[and] identifier[pem_type] == literal[string] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ) keyword[and] keyword[not] identifier[overwrite] : identifier[_filecontents] = identifier[_text_or_file] ( identifier[path] ) keyword[try] : identifier[_dhparams] = identifier[get_pem_entry] ( identifier[_filecontents] , literal[string] ) keyword[except] identifier[salt] . identifier[exceptions] . identifier[SaltInvocationError] keyword[as] identifier[err] : identifier[log] . identifier[debug] ( literal[string] , identifier[err] ) identifier[log] . identifier[trace] ( identifier[err] , identifier[exc_info] = identifier[err] ) keyword[try] : identifier[_private_key] = identifier[get_pem_entry] ( identifier[_filecontents] , literal[string] ) keyword[except] identifier[salt] . identifier[exceptions] . identifier[SaltInvocationError] keyword[as] identifier[err] : identifier[log] . identifier[debug] ( literal[string] , identifier[err] ) identifier[log] . identifier[trace] ( identifier[err] , identifier[exc_info] = identifier[err] ) keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[path] , literal[string] ) keyword[as] identifier[_fp] : keyword[if] identifier[pem_type] keyword[and] identifier[pem_type] == literal[string] keyword[and] identifier[_private_key] : identifier[_fp] . identifier[write] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( identifier[_private_key] )) identifier[_fp] . identifier[write] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( identifier[text] )) keyword[if] identifier[pem_type] keyword[and] identifier[pem_type] == literal[string] keyword[and] identifier[_dhparams] : identifier[_fp] . identifier[write] ( identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( identifier[_dhparams] )) keyword[return] literal[string] . identifier[format] ( identifier[path] )
def write_pem(text, path, overwrite=True, pem_type=None): """ Writes out a PEM string fixing any formatting or whitespace issues before writing. text: PEM string input to be written out. path: Path of the file to write the pem out to. overwrite: If True(default), write_pem will overwrite the entire pem file. Set False to preserve existing private keys and dh params that may exist in the pem file. pem_type: The PEM type to be saved, for example ``CERTIFICATE`` or ``PUBLIC KEY``. Adding this will allow the function to take input that may contain multiple pem types. CLI Example: .. code-block:: bash salt '*' x509.write_pem "-----BEGIN CERTIFICATE-----MIIGMzCCBBugA..." path=/etc/pki/mycert.crt """ with salt.utils.files.set_umask(63): text = get_pem_entry(text, pem_type=pem_type) _dhparams = '' _private_key = '' if pem_type and pem_type == 'CERTIFICATE' and os.path.isfile(path) and (not overwrite): _filecontents = _text_or_file(path) try: _dhparams = get_pem_entry(_filecontents, 'DH PARAMETERS') # depends on [control=['try'], data=[]] except salt.exceptions.SaltInvocationError as err: log.debug('Error when getting DH PARAMETERS: %s', err) log.trace(err, exc_info=err) # depends on [control=['except'], data=['err']] try: _private_key = get_pem_entry(_filecontents, '(?:RSA )?PRIVATE KEY') # depends on [control=['try'], data=[]] except salt.exceptions.SaltInvocationError as err: log.debug('Error when getting PRIVATE KEY: %s', err) log.trace(err, exc_info=err) # depends on [control=['except'], data=['err']] # depends on [control=['if'], data=[]] with salt.utils.files.fopen(path, 'w') as _fp: if pem_type and pem_type == 'CERTIFICATE' and _private_key: _fp.write(salt.utils.stringutils.to_str(_private_key)) # depends on [control=['if'], data=[]] _fp.write(salt.utils.stringutils.to_str(text)) if pem_type and pem_type == 'CERTIFICATE' and _dhparams: _fp.write(salt.utils.stringutils.to_str(_dhparams)) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['_fp']] # depends on [control=['with'], data=[]] return 'PEM written to {0}'.format(path)
def squared_error(y, y_pred): """Calculates the sum of the squared differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, the sum of the squared differences between target and prediction """ y, y_pred = convert_assert(y, y_pred) return np.sum((y - y_pred) ** 2)
def function[squared_error, parameter[y, y_pred]]: constant[Calculates the sum of the squared differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, the sum of the squared differences between target and prediction ] <ast.Tuple object at 0x7da1b1f7c130> assign[=] call[name[convert_assert], parameter[name[y], name[y_pred]]] return[call[name[np].sum, parameter[binary_operation[binary_operation[name[y] - name[y_pred]] ** constant[2]]]]]
keyword[def] identifier[squared_error] ( identifier[y] , identifier[y_pred] ): literal[string] identifier[y] , identifier[y_pred] = identifier[convert_assert] ( identifier[y] , identifier[y_pred] ) keyword[return] identifier[np] . identifier[sum] (( identifier[y] - identifier[y_pred] )** literal[int] )
def squared_error(y, y_pred): """Calculates the sum of the squared differences between target and prediction. Parameters: ----------- y : vector, shape (n_samples,) The target values. y_pred : vector, shape (n_samples,) The predicted values. Returns: -------- error : float number, the sum of the squared differences between target and prediction """ (y, y_pred) = convert_assert(y, y_pred) return np.sum((y - y_pred) ** 2)
def make_tarfile(output_filename, source_dir): ''' Tar a directory ''' with tarfile.open(output_filename, "w:gz") as tar: tar.add(source_dir, arcname=os.path.basename(source_dir))
def function[make_tarfile, parameter[output_filename, source_dir]]: constant[ Tar a directory ] with call[name[tarfile].open, parameter[name[output_filename], constant[w:gz]]] begin[:] call[name[tar].add, parameter[name[source_dir]]]
keyword[def] identifier[make_tarfile] ( identifier[output_filename] , identifier[source_dir] ): literal[string] keyword[with] identifier[tarfile] . identifier[open] ( identifier[output_filename] , literal[string] ) keyword[as] identifier[tar] : identifier[tar] . identifier[add] ( identifier[source_dir] , identifier[arcname] = identifier[os] . identifier[path] . identifier[basename] ( identifier[source_dir] ))
def make_tarfile(output_filename, source_dir): """ Tar a directory """ with tarfile.open(output_filename, 'w:gz') as tar: tar.add(source_dir, arcname=os.path.basename(source_dir)) # depends on [control=['with'], data=['tar']]
def compute_collection_measures(self, no_singletons=False): """ Computes summaries of measures using the discovered collections. :param no_singletons: if True, omits collections of length 1 from all measures and includes "no_singletons_" in the measure name. Adds the following measures to the self.measures dictionary, prefaced by COLLECTION_(similarity_measure)_(collection_type)_: - count: number of collections - size_mean: mean size of collections - size_max: size of largest collection - switch_count: number of changes between clusters """ prefix = "COLLECTION_" + self.current_similarity_measure + "_" + self.current_collection_type + "_" if no_singletons: prefix += "no_singletons_" if no_singletons: collection_sizes_temp = [x for x in self.collection_sizes if x != 1] else: #include singletons collection_sizes_temp = self.collection_sizes self.measures[prefix + 'count'] = len(collection_sizes_temp) self.measures[prefix + 'size_mean'] = get_mean(collection_sizes_temp) \ if self.measures[prefix + 'count'] > 0 else 0 self.measures[prefix + 'size_max'] = max(collection_sizes_temp) \ if len(collection_sizes_temp) > 0 else 0 self.measures[prefix + 'switch_count'] = self.measures[prefix + 'count'] - 1
def function[compute_collection_measures, parameter[self, no_singletons]]: constant[ Computes summaries of measures using the discovered collections. :param no_singletons: if True, omits collections of length 1 from all measures and includes "no_singletons_" in the measure name. Adds the following measures to the self.measures dictionary, prefaced by COLLECTION_(similarity_measure)_(collection_type)_: - count: number of collections - size_mean: mean size of collections - size_max: size of largest collection - switch_count: number of changes between clusters ] variable[prefix] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[COLLECTION_] + name[self].current_similarity_measure] + constant[_]] + name[self].current_collection_type] + constant[_]] if name[no_singletons] begin[:] <ast.AugAssign object at 0x7da18f58dd50> if name[no_singletons] begin[:] variable[collection_sizes_temp] assign[=] <ast.ListComp object at 0x7da18f58e050> call[name[self].measures][binary_operation[name[prefix] + constant[count]]] assign[=] call[name[len], parameter[name[collection_sizes_temp]]] call[name[self].measures][binary_operation[name[prefix] + constant[size_mean]]] assign[=] <ast.IfExp object at 0x7da18f58d960> call[name[self].measures][binary_operation[name[prefix] + constant[size_max]]] assign[=] <ast.IfExp object at 0x7da18f58d990> call[name[self].measures][binary_operation[name[prefix] + constant[switch_count]]] assign[=] binary_operation[call[name[self].measures][binary_operation[name[prefix] + constant[count]]] - constant[1]]
keyword[def] identifier[compute_collection_measures] ( identifier[self] , identifier[no_singletons] = keyword[False] ): literal[string] identifier[prefix] = literal[string] + identifier[self] . identifier[current_similarity_measure] + literal[string] + identifier[self] . identifier[current_collection_type] + literal[string] keyword[if] identifier[no_singletons] : identifier[prefix] += literal[string] keyword[if] identifier[no_singletons] : identifier[collection_sizes_temp] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[collection_sizes] keyword[if] identifier[x] != literal[int] ] keyword[else] : identifier[collection_sizes_temp] = identifier[self] . identifier[collection_sizes] identifier[self] . identifier[measures] [ identifier[prefix] + literal[string] ]= identifier[len] ( identifier[collection_sizes_temp] ) identifier[self] . identifier[measures] [ identifier[prefix] + literal[string] ]= identifier[get_mean] ( identifier[collection_sizes_temp] ) keyword[if] identifier[self] . identifier[measures] [ identifier[prefix] + literal[string] ]> literal[int] keyword[else] literal[int] identifier[self] . identifier[measures] [ identifier[prefix] + literal[string] ]= identifier[max] ( identifier[collection_sizes_temp] ) keyword[if] identifier[len] ( identifier[collection_sizes_temp] )> literal[int] keyword[else] literal[int] identifier[self] . identifier[measures] [ identifier[prefix] + literal[string] ]= identifier[self] . identifier[measures] [ identifier[prefix] + literal[string] ]- literal[int]
def compute_collection_measures(self, no_singletons=False): """ Computes summaries of measures using the discovered collections. :param no_singletons: if True, omits collections of length 1 from all measures and includes "no_singletons_" in the measure name. Adds the following measures to the self.measures dictionary, prefaced by COLLECTION_(similarity_measure)_(collection_type)_: - count: number of collections - size_mean: mean size of collections - size_max: size of largest collection - switch_count: number of changes between clusters """ prefix = 'COLLECTION_' + self.current_similarity_measure + '_' + self.current_collection_type + '_' if no_singletons: prefix += 'no_singletons_' # depends on [control=['if'], data=[]] if no_singletons: collection_sizes_temp = [x for x in self.collection_sizes if x != 1] # depends on [control=['if'], data=[]] else: #include singletons collection_sizes_temp = self.collection_sizes self.measures[prefix + 'count'] = len(collection_sizes_temp) self.measures[prefix + 'size_mean'] = get_mean(collection_sizes_temp) if self.measures[prefix + 'count'] > 0 else 0 self.measures[prefix + 'size_max'] = max(collection_sizes_temp) if len(collection_sizes_temp) > 0 else 0 self.measures[prefix + 'switch_count'] = self.measures[prefix + 'count'] - 1
async def start(self, remoteCaps, remotePort): """ Start the transport. """ if not self.__started: self.__started = True self.__state = 'connecting' self._remote_port = remotePort # configure logging if logger.isEnabledFor(logging.DEBUG): prefix = self.is_server and 'server ' or 'client ' self.__log_debug = lambda msg, *args: logger.debug(prefix + msg, *args) # initialise local channel ID counter # one side should be using even IDs, the other odd IDs if self.is_server: self._data_channel_id = 0 else: self._data_channel_id = 1 self.__transport._register_data_receiver(self) if not self.is_server: await self._init()
<ast.AsyncFunctionDef object at 0x7da2054a4100>
keyword[async] keyword[def] identifier[start] ( identifier[self] , identifier[remoteCaps] , identifier[remotePort] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[__started] : identifier[self] . identifier[__started] = keyword[True] identifier[self] . identifier[__state] = literal[string] identifier[self] . identifier[_remote_port] = identifier[remotePort] keyword[if] identifier[logger] . identifier[isEnabledFor] ( identifier[logging] . identifier[DEBUG] ): identifier[prefix] = identifier[self] . identifier[is_server] keyword[and] literal[string] keyword[or] literal[string] identifier[self] . identifier[__log_debug] = keyword[lambda] identifier[msg] ,* identifier[args] : identifier[logger] . identifier[debug] ( identifier[prefix] + identifier[msg] ,* identifier[args] ) keyword[if] identifier[self] . identifier[is_server] : identifier[self] . identifier[_data_channel_id] = literal[int] keyword[else] : identifier[self] . identifier[_data_channel_id] = literal[int] identifier[self] . identifier[__transport] . identifier[_register_data_receiver] ( identifier[self] ) keyword[if] keyword[not] identifier[self] . identifier[is_server] : keyword[await] identifier[self] . identifier[_init] ()
async def start(self, remoteCaps, remotePort): """ Start the transport. """ if not self.__started: self.__started = True self.__state = 'connecting' self._remote_port = remotePort # configure logging if logger.isEnabledFor(logging.DEBUG): prefix = self.is_server and 'server ' or 'client ' self.__log_debug = lambda msg, *args: logger.debug(prefix + msg, *args) # depends on [control=['if'], data=[]] # initialise local channel ID counter # one side should be using even IDs, the other odd IDs if self.is_server: self._data_channel_id = 0 # depends on [control=['if'], data=[]] else: self._data_channel_id = 1 self.__transport._register_data_receiver(self) if not self.is_server: await self._init() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def satisfies(self, requirement, allow_prereleases=None): """Determine whether this package matches the requirement. :param requirement: The requirement to compare this Package against :type requirement: string or :class:`pkg_resources.Requirement` :param Optional[bool] allow_prereleases: Whether to allow prereleases to satisfy the `requirement`. :returns: True if the package matches the requirement, otherwise False """ requirement = maybe_requirement(requirement) link_name = safe_name(self.name).lower() if link_name != requirement.key: return False # NB: If we upgrade to setuptools>=34 the SpecifierSet used here (requirement.specifier) will # come from a non-vendored `packaging` package and pex's bootstrap code in `PEXBuilder` will # need an update. return requirement.specifier.contains(self.raw_version, prereleases=allow_prereleases)
def function[satisfies, parameter[self, requirement, allow_prereleases]]: constant[Determine whether this package matches the requirement. :param requirement: The requirement to compare this Package against :type requirement: string or :class:`pkg_resources.Requirement` :param Optional[bool] allow_prereleases: Whether to allow prereleases to satisfy the `requirement`. :returns: True if the package matches the requirement, otherwise False ] variable[requirement] assign[=] call[name[maybe_requirement], parameter[name[requirement]]] variable[link_name] assign[=] call[call[name[safe_name], parameter[name[self].name]].lower, parameter[]] if compare[name[link_name] not_equal[!=] name[requirement].key] begin[:] return[constant[False]] return[call[name[requirement].specifier.contains, parameter[name[self].raw_version]]]
keyword[def] identifier[satisfies] ( identifier[self] , identifier[requirement] , identifier[allow_prereleases] = keyword[None] ): literal[string] identifier[requirement] = identifier[maybe_requirement] ( identifier[requirement] ) identifier[link_name] = identifier[safe_name] ( identifier[self] . identifier[name] ). identifier[lower] () keyword[if] identifier[link_name] != identifier[requirement] . identifier[key] : keyword[return] keyword[False] keyword[return] identifier[requirement] . identifier[specifier] . identifier[contains] ( identifier[self] . identifier[raw_version] , identifier[prereleases] = identifier[allow_prereleases] )
def satisfies(self, requirement, allow_prereleases=None): """Determine whether this package matches the requirement. :param requirement: The requirement to compare this Package against :type requirement: string or :class:`pkg_resources.Requirement` :param Optional[bool] allow_prereleases: Whether to allow prereleases to satisfy the `requirement`. :returns: True if the package matches the requirement, otherwise False """ requirement = maybe_requirement(requirement) link_name = safe_name(self.name).lower() if link_name != requirement.key: return False # depends on [control=['if'], data=[]] # NB: If we upgrade to setuptools>=34 the SpecifierSet used here (requirement.specifier) will # come from a non-vendored `packaging` package and pex's bootstrap code in `PEXBuilder` will # need an update. return requirement.specifier.contains(self.raw_version, prereleases=allow_prereleases)
def get_hosts(self, path, start, length): """ Get hostnames where a particular block (determined by pos and blocksize) of a file is stored. Due to replication, a single block could be present on multiple hosts. :type path: str :param path: the path of the file :type start: int :param start: the start of the block :type length: int :param length: the length of the block :rtype: list :return: list of hosts that store the block """ _complain_ifclosed(self.closed) return self.fs.get_hosts(path, start, length)
def function[get_hosts, parameter[self, path, start, length]]: constant[ Get hostnames where a particular block (determined by pos and blocksize) of a file is stored. Due to replication, a single block could be present on multiple hosts. :type path: str :param path: the path of the file :type start: int :param start: the start of the block :type length: int :param length: the length of the block :rtype: list :return: list of hosts that store the block ] call[name[_complain_ifclosed], parameter[name[self].closed]] return[call[name[self].fs.get_hosts, parameter[name[path], name[start], name[length]]]]
keyword[def] identifier[get_hosts] ( identifier[self] , identifier[path] , identifier[start] , identifier[length] ): literal[string] identifier[_complain_ifclosed] ( identifier[self] . identifier[closed] ) keyword[return] identifier[self] . identifier[fs] . identifier[get_hosts] ( identifier[path] , identifier[start] , identifier[length] )
def get_hosts(self, path, start, length): """ Get hostnames where a particular block (determined by pos and blocksize) of a file is stored. Due to replication, a single block could be present on multiple hosts. :type path: str :param path: the path of the file :type start: int :param start: the start of the block :type length: int :param length: the length of the block :rtype: list :return: list of hosts that store the block """ _complain_ifclosed(self.closed) return self.fs.get_hosts(path, start, length)
def print(*a): """ print just one that returns what you give it instead of None """ try: _print(*a) return a[0] if len(a) == 1 else a except: _print(*a)
def function[print, parameter[]]: constant[ print just one that returns what you give it instead of None ] <ast.Try object at 0x7da1b2879f90>
keyword[def] identifier[print] (* identifier[a] ): literal[string] keyword[try] : identifier[_print] (* identifier[a] ) keyword[return] identifier[a] [ literal[int] ] keyword[if] identifier[len] ( identifier[a] )== literal[int] keyword[else] identifier[a] keyword[except] : identifier[_print] (* identifier[a] )
def print(*a): """ print just one that returns what you give it instead of None """ try: _print(*a) return a[0] if len(a) == 1 else a # depends on [control=['try'], data=[]] except: _print(*a) # depends on [control=['except'], data=[]]
def _set_results_dir(self): """Create results directory if not exists.""" if self.running_instance_id: self.results_dir = os.path.join( self.results_dir, self.cloud, self.image_id, self.running_instance_id ) else: self.results_dir = os.path.join( self.results_dir, self.cloud, self.instance_ip ) try: os.makedirs(self.results_dir) except OSError as error: if not os.path.isdir(self.results_dir): raise IpaCloudException( 'Unable to create ipa results directory: %s' % error ) self.time_stamp = datetime.now().strftime('%Y%m%d%H%M%S') self.log_file = ''.join( [self.results_dir, os.sep, self.time_stamp, '.log'] ) self.logger.debug('Created log file %s' % self.log_file) self.results_file = ''.join( [self.results_dir, os.sep, self.time_stamp, '.results'] ) self.logger.debug('Created results file %s' % self.results_file) # Add log file handler file_handler = logging.FileHandler(self.log_file) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(logging.Formatter('\n%(message)s\n')) self.logger.addHandler(file_handler)
def function[_set_results_dir, parameter[self]]: constant[Create results directory if not exists.] if name[self].running_instance_id begin[:] name[self].results_dir assign[=] call[name[os].path.join, parameter[name[self].results_dir, name[self].cloud, name[self].image_id, name[self].running_instance_id]] <ast.Try object at 0x7da1b1a126e0> name[self].time_stamp assign[=] call[call[name[datetime].now, parameter[]].strftime, parameter[constant[%Y%m%d%H%M%S]]] name[self].log_file assign[=] call[constant[].join, parameter[list[[<ast.Attribute object at 0x7da1b1a10ca0>, <ast.Attribute object at 0x7da1b1a13160>, <ast.Attribute object at 0x7da1b1a13130>, <ast.Constant object at 0x7da1b1a12020>]]]] call[name[self].logger.debug, parameter[binary_operation[constant[Created log file %s] <ast.Mod object at 0x7da2590d6920> name[self].log_file]]] name[self].results_file assign[=] call[constant[].join, parameter[list[[<ast.Attribute object at 0x7da1b1a10430>, <ast.Attribute object at 0x7da1b1a13940>, <ast.Attribute object at 0x7da1b1a103d0>, <ast.Constant object at 0x7da1b1a10190>]]]] call[name[self].logger.debug, parameter[binary_operation[constant[Created results file %s] <ast.Mod object at 0x7da2590d6920> name[self].results_file]]] variable[file_handler] assign[=] call[name[logging].FileHandler, parameter[name[self].log_file]] call[name[file_handler].setLevel, parameter[name[logging].DEBUG]] call[name[file_handler].setFormatter, parameter[call[name[logging].Formatter, parameter[constant[ %(message)s ]]]]] call[name[self].logger.addHandler, parameter[name[file_handler]]]
keyword[def] identifier[_set_results_dir] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[running_instance_id] : identifier[self] . identifier[results_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[results_dir] , identifier[self] . identifier[cloud] , identifier[self] . identifier[image_id] , identifier[self] . identifier[running_instance_id] ) keyword[else] : identifier[self] . identifier[results_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[results_dir] , identifier[self] . identifier[cloud] , identifier[self] . identifier[instance_ip] ) keyword[try] : identifier[os] . identifier[makedirs] ( identifier[self] . identifier[results_dir] ) keyword[except] identifier[OSError] keyword[as] identifier[error] : keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[results_dir] ): keyword[raise] identifier[IpaCloudException] ( literal[string] % identifier[error] ) identifier[self] . identifier[time_stamp] = identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] ) identifier[self] . identifier[log_file] = literal[string] . identifier[join] ( [ identifier[self] . identifier[results_dir] , identifier[os] . identifier[sep] , identifier[self] . identifier[time_stamp] , literal[string] ] ) identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[log_file] ) identifier[self] . identifier[results_file] = literal[string] . identifier[join] ( [ identifier[self] . identifier[results_dir] , identifier[os] . identifier[sep] , identifier[self] . identifier[time_stamp] , literal[string] ] ) identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[results_file] ) identifier[file_handler] = identifier[logging] . identifier[FileHandler] ( identifier[self] . identifier[log_file] ) identifier[file_handler] . identifier[setLevel] ( identifier[logging] . identifier[DEBUG] ) identifier[file_handler] . identifier[setFormatter] ( identifier[logging] . identifier[Formatter] ( literal[string] )) identifier[self] . identifier[logger] . identifier[addHandler] ( identifier[file_handler] )
def _set_results_dir(self): """Create results directory if not exists.""" if self.running_instance_id: self.results_dir = os.path.join(self.results_dir, self.cloud, self.image_id, self.running_instance_id) # depends on [control=['if'], data=[]] else: self.results_dir = os.path.join(self.results_dir, self.cloud, self.instance_ip) try: os.makedirs(self.results_dir) # depends on [control=['try'], data=[]] except OSError as error: if not os.path.isdir(self.results_dir): raise IpaCloudException('Unable to create ipa results directory: %s' % error) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['error']] self.time_stamp = datetime.now().strftime('%Y%m%d%H%M%S') self.log_file = ''.join([self.results_dir, os.sep, self.time_stamp, '.log']) self.logger.debug('Created log file %s' % self.log_file) self.results_file = ''.join([self.results_dir, os.sep, self.time_stamp, '.results']) self.logger.debug('Created results file %s' % self.results_file) # Add log file handler file_handler = logging.FileHandler(self.log_file) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(logging.Formatter('\n%(message)s\n')) self.logger.addHandler(file_handler)
def local_constraints(self): """ Return the constraints defined in the local AVM. """ cs = [] for feat, val in self._avm.items(): try: if val.supertypes and not val._avm: cs.append((feat, val)) else: for subfeat, subval in val.features(): cs.append(('{}.{}'.format(feat, subfeat), subval)) except AttributeError: cs.append((feat, val)) return cs
def function[local_constraints, parameter[self]]: constant[ Return the constraints defined in the local AVM. ] variable[cs] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b06cbcd0>, <ast.Name object at 0x7da1b06ca770>]]] in starred[call[name[self]._avm.items, parameter[]]] begin[:] <ast.Try object at 0x7da1b06c93f0> return[name[cs]]
keyword[def] identifier[local_constraints] ( identifier[self] ): literal[string] identifier[cs] =[] keyword[for] identifier[feat] , identifier[val] keyword[in] identifier[self] . identifier[_avm] . identifier[items] (): keyword[try] : keyword[if] identifier[val] . identifier[supertypes] keyword[and] keyword[not] identifier[val] . identifier[_avm] : identifier[cs] . identifier[append] (( identifier[feat] , identifier[val] )) keyword[else] : keyword[for] identifier[subfeat] , identifier[subval] keyword[in] identifier[val] . identifier[features] (): identifier[cs] . identifier[append] (( literal[string] . identifier[format] ( identifier[feat] , identifier[subfeat] ), identifier[subval] )) keyword[except] identifier[AttributeError] : identifier[cs] . identifier[append] (( identifier[feat] , identifier[val] )) keyword[return] identifier[cs]
def local_constraints(self): """ Return the constraints defined in the local AVM. """ cs = [] for (feat, val) in self._avm.items(): try: if val.supertypes and (not val._avm): cs.append((feat, val)) # depends on [control=['if'], data=[]] else: for (subfeat, subval) in val.features(): cs.append(('{}.{}'.format(feat, subfeat), subval)) # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except AttributeError: cs.append((feat, val)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] return cs
def expand_config(d, dirs): """ Expand configuration XDG variables, environmental variables, and tildes. Parameters ---------- d : dict config information dirs : appdirs.AppDirs XDG application mapping Notes ----- *Environmentable variables* are expanded via :py:func:`os.path.expandvars`. So ``${PWD}`` would be replaced by the current PWD in the shell, ``${USER}`` would be the user running the app. *XDG variables* are expanded via :py:meth:`str.format`. These do not have a dollar sign. They are: - ``{user_cache_dir}`` - ``{user_config_dir}`` - ``{user_data_dir}`` - ``{user_log_dir}`` - ``{site_config_dir}`` - ``{site_data_dir}`` See Also -------- os.path.expanduser, os.path.expandvars : Standard library functions for expanding variables. Same concept, used inside. """ context = { 'user_cache_dir': dirs.user_cache_dir, 'user_config_dir': dirs.user_config_dir, 'user_data_dir': dirs.user_data_dir, 'user_log_dir': dirs.user_log_dir, 'site_config_dir': dirs.site_config_dir, 'site_data_dir': dirs.site_data_dir, } for k, v in d.items(): if isinstance(v, dict): expand_config(v, dirs) if isinstance(v, string_types): d[k] = os.path.expanduser(os.path.expandvars(d[k])) d[k] = d[k].format(**context)
def function[expand_config, parameter[d, dirs]]: constant[ Expand configuration XDG variables, environmental variables, and tildes. Parameters ---------- d : dict config information dirs : appdirs.AppDirs XDG application mapping Notes ----- *Environmentable variables* are expanded via :py:func:`os.path.expandvars`. So ``${PWD}`` would be replaced by the current PWD in the shell, ``${USER}`` would be the user running the app. *XDG variables* are expanded via :py:meth:`str.format`. These do not have a dollar sign. They are: - ``{user_cache_dir}`` - ``{user_config_dir}`` - ``{user_data_dir}`` - ``{user_log_dir}`` - ``{site_config_dir}`` - ``{site_data_dir}`` See Also -------- os.path.expanduser, os.path.expandvars : Standard library functions for expanding variables. Same concept, used inside. ] variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da1b19b5540>, <ast.Constant object at 0x7da1b19b6080>, <ast.Constant object at 0x7da1b19b6e00>, <ast.Constant object at 0x7da1b19b4bb0>, <ast.Constant object at 0x7da1b19b7d60>, <ast.Constant object at 0x7da1b19b40d0>], [<ast.Attribute object at 0x7da1b19b6020>, <ast.Attribute object at 0x7da1b19b5bd0>, <ast.Attribute object at 0x7da1b19b5840>, <ast.Attribute object at 0x7da1b19b74f0>, <ast.Attribute object at 0x7da1b19b5030>, <ast.Attribute object at 0x7da1b19b70d0>]] for taget[tuple[[<ast.Name object at 0x7da1b19b5a80>, <ast.Name object at 0x7da1b19b7ee0>]]] in starred[call[name[d].items, parameter[]]] begin[:] if call[name[isinstance], parameter[name[v], name[dict]]] begin[:] call[name[expand_config], parameter[name[v], name[dirs]]] if call[name[isinstance], parameter[name[v], name[string_types]]] begin[:] call[name[d]][name[k]] assign[=] call[name[os].path.expanduser, parameter[call[name[os].path.expandvars, parameter[call[name[d]][name[k]]]]]] call[name[d]][name[k]] assign[=] call[call[name[d]][name[k]].format, parameter[]]
keyword[def] identifier[expand_config] ( identifier[d] , identifier[dirs] ): literal[string] identifier[context] ={ literal[string] : identifier[dirs] . identifier[user_cache_dir] , literal[string] : identifier[dirs] . identifier[user_config_dir] , literal[string] : identifier[dirs] . identifier[user_data_dir] , literal[string] : identifier[dirs] . identifier[user_log_dir] , literal[string] : identifier[dirs] . identifier[site_config_dir] , literal[string] : identifier[dirs] . identifier[site_data_dir] , } keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[items] (): keyword[if] identifier[isinstance] ( identifier[v] , identifier[dict] ): identifier[expand_config] ( identifier[v] , identifier[dirs] ) keyword[if] identifier[isinstance] ( identifier[v] , identifier[string_types] ): identifier[d] [ identifier[k] ]= identifier[os] . identifier[path] . identifier[expanduser] ( identifier[os] . identifier[path] . identifier[expandvars] ( identifier[d] [ identifier[k] ])) identifier[d] [ identifier[k] ]= identifier[d] [ identifier[k] ]. identifier[format] (** identifier[context] )
def expand_config(d, dirs): """ Expand configuration XDG variables, environmental variables, and tildes. Parameters ---------- d : dict config information dirs : appdirs.AppDirs XDG application mapping Notes ----- *Environmentable variables* are expanded via :py:func:`os.path.expandvars`. So ``${PWD}`` would be replaced by the current PWD in the shell, ``${USER}`` would be the user running the app. *XDG variables* are expanded via :py:meth:`str.format`. These do not have a dollar sign. They are: - ``{user_cache_dir}`` - ``{user_config_dir}`` - ``{user_data_dir}`` - ``{user_log_dir}`` - ``{site_config_dir}`` - ``{site_data_dir}`` See Also -------- os.path.expanduser, os.path.expandvars : Standard library functions for expanding variables. Same concept, used inside. """ context = {'user_cache_dir': dirs.user_cache_dir, 'user_config_dir': dirs.user_config_dir, 'user_data_dir': dirs.user_data_dir, 'user_log_dir': dirs.user_log_dir, 'site_config_dir': dirs.site_config_dir, 'site_data_dir': dirs.site_data_dir} for (k, v) in d.items(): if isinstance(v, dict): expand_config(v, dirs) # depends on [control=['if'], data=[]] if isinstance(v, string_types): d[k] = os.path.expanduser(os.path.expandvars(d[k])) d[k] = d[k].format(**context) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def _set_switchport(self, v, load=False): """ Setter method for switchport, mapped from YANG variable /interface/ethernet/switchport (container) If this variable is read-only (config: false) in the source YANG file, then _set_switchport is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_switchport() directly. YANG Description: The L2 switching characteristics of an interface. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=switchport.switchport, is_container='container', presence=False, yang_name="switchport", rest_name="switchport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the switching characteristics of the Layer2 \ninterface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """switchport must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=switchport.switchport, is_container='container', presence=False, yang_name="switchport", rest_name="switchport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the switching characteristics of the Layer2 \ninterface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""", }) self.__switchport = t if hasattr(self, '_set'): self._set()
def function[_set_switchport, parameter[self, v, load]]: constant[ Setter method for switchport, mapped from YANG variable /interface/ethernet/switchport (container) If this variable is read-only (config: false) in the source YANG file, then _set_switchport is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_switchport() directly. YANG Description: The L2 switching characteristics of an interface. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da18dc9b070> name[self].__switchport assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_switchport] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[switchport] . identifier[switchport] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__switchport] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_switchport(self, v, load=False): """ Setter method for switchport, mapped from YANG variable /interface/ethernet/switchport (container) If this variable is read-only (config: false) in the source YANG file, then _set_switchport is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_switchport() directly. YANG Description: The L2 switching characteristics of an interface. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=switchport.switchport, is_container='container', presence=False, yang_name='switchport', rest_name='switchport', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the switching characteristics of the Layer2 \ninterface', u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'switchport must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=switchport.switchport, is_container=\'container\', presence=False, yang_name="switchport", rest_name="switchport", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Set the switching characteristics of the Layer2 \ninterface\', u\'sort-priority\': u\'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG\', u\'cli-incomplete-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__switchport = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def course_menu(course, template_helper): """ Displays the link to the scoreboards on the course page, if the plugin is activated for this course """ scoreboards = course.get_descriptor().get('scoreboard', []) if scoreboards != []: return str(template_helper.get_custom_renderer('frontend/plugins/scoreboard', layout=False).course_menu(course)) else: return None
def function[course_menu, parameter[course, template_helper]]: constant[ Displays the link to the scoreboards on the course page, if the plugin is activated for this course ] variable[scoreboards] assign[=] call[call[name[course].get_descriptor, parameter[]].get, parameter[constant[scoreboard], list[[]]]] if compare[name[scoreboards] not_equal[!=] list[[]]] begin[:] return[call[name[str], parameter[call[call[name[template_helper].get_custom_renderer, parameter[constant[frontend/plugins/scoreboard]]].course_menu, parameter[name[course]]]]]]
keyword[def] identifier[course_menu] ( identifier[course] , identifier[template_helper] ): literal[string] identifier[scoreboards] = identifier[course] . identifier[get_descriptor] (). identifier[get] ( literal[string] ,[]) keyword[if] identifier[scoreboards] !=[]: keyword[return] identifier[str] ( identifier[template_helper] . identifier[get_custom_renderer] ( literal[string] , identifier[layout] = keyword[False] ). identifier[course_menu] ( identifier[course] )) keyword[else] : keyword[return] keyword[None]
def course_menu(course, template_helper): """ Displays the link to the scoreboards on the course page, if the plugin is activated for this course """ scoreboards = course.get_descriptor().get('scoreboard', []) if scoreboards != []: return str(template_helper.get_custom_renderer('frontend/plugins/scoreboard', layout=False).course_menu(course)) # depends on [control=['if'], data=[]] else: return None
def get_mtime(fname): """ Find the time this file was last modified. :param fname: File name :return: The last time the file was modified. """ try: mtime = os.stat(fname).st_mtime_ns except OSError: # The file might be right in the middle of being written # so sleep time.sleep(1) mtime = os.stat(fname).st_mtime_ns return mtime
def function[get_mtime, parameter[fname]]: constant[ Find the time this file was last modified. :param fname: File name :return: The last time the file was modified. ] <ast.Try object at 0x7da1b021d000> return[name[mtime]]
keyword[def] identifier[get_mtime] ( identifier[fname] ): literal[string] keyword[try] : identifier[mtime] = identifier[os] . identifier[stat] ( identifier[fname] ). identifier[st_mtime_ns] keyword[except] identifier[OSError] : identifier[time] . identifier[sleep] ( literal[int] ) identifier[mtime] = identifier[os] . identifier[stat] ( identifier[fname] ). identifier[st_mtime_ns] keyword[return] identifier[mtime]
def get_mtime(fname): """ Find the time this file was last modified. :param fname: File name :return: The last time the file was modified. """ try: mtime = os.stat(fname).st_mtime_ns # depends on [control=['try'], data=[]] except OSError: # The file might be right in the middle of being written # so sleep time.sleep(1) mtime = os.stat(fname).st_mtime_ns # depends on [control=['except'], data=[]] return mtime
def all(self, workflow_id): """ Get information about subscribers who were removed from an Automation workflow. :param workflow_id: The unique id for the Automation workflow. :type workflow_id: :py:class:`str` """ self.workflow_id = workflow_id return self._mc_client._get(url=self._build_path(workflow_id, 'removed-subscribers'))
def function[all, parameter[self, workflow_id]]: constant[ Get information about subscribers who were removed from an Automation workflow. :param workflow_id: The unique id for the Automation workflow. :type workflow_id: :py:class:`str` ] name[self].workflow_id assign[=] name[workflow_id] return[call[name[self]._mc_client._get, parameter[]]]
keyword[def] identifier[all] ( identifier[self] , identifier[workflow_id] ): literal[string] identifier[self] . identifier[workflow_id] = identifier[workflow_id] keyword[return] identifier[self] . identifier[_mc_client] . identifier[_get] ( identifier[url] = identifier[self] . identifier[_build_path] ( identifier[workflow_id] , literal[string] ))
def all(self, workflow_id): """ Get information about subscribers who were removed from an Automation workflow. :param workflow_id: The unique id for the Automation workflow. :type workflow_id: :py:class:`str` """ self.workflow_id = workflow_id return self._mc_client._get(url=self._build_path(workflow_id, 'removed-subscribers'))
def add_transform_chain(self, tc): """Insert the GLSL snippets of a transform chain.""" # Generate the transforms snippet. for t in tc.gpu_transforms: if isinstance(t, Clip): # Set the varying value in the vertex shader. self.insert_vert('v_temp_pos_tr = temp_pos_tr;') continue self.insert_vert(t.glsl('temp_pos_tr')) # Clipping. clip = tc.get('Clip') if clip: self.insert_frag(clip.glsl('v_temp_pos_tr'), 'before_transforms')
def function[add_transform_chain, parameter[self, tc]]: constant[Insert the GLSL snippets of a transform chain.] for taget[name[t]] in starred[name[tc].gpu_transforms] begin[:] if call[name[isinstance], parameter[name[t], name[Clip]]] begin[:] call[name[self].insert_vert, parameter[constant[v_temp_pos_tr = temp_pos_tr;]]] continue call[name[self].insert_vert, parameter[call[name[t].glsl, parameter[constant[temp_pos_tr]]]]] variable[clip] assign[=] call[name[tc].get, parameter[constant[Clip]]] if name[clip] begin[:] call[name[self].insert_frag, parameter[call[name[clip].glsl, parameter[constant[v_temp_pos_tr]]], constant[before_transforms]]]
keyword[def] identifier[add_transform_chain] ( identifier[self] , identifier[tc] ): literal[string] keyword[for] identifier[t] keyword[in] identifier[tc] . identifier[gpu_transforms] : keyword[if] identifier[isinstance] ( identifier[t] , identifier[Clip] ): identifier[self] . identifier[insert_vert] ( literal[string] ) keyword[continue] identifier[self] . identifier[insert_vert] ( identifier[t] . identifier[glsl] ( literal[string] )) identifier[clip] = identifier[tc] . identifier[get] ( literal[string] ) keyword[if] identifier[clip] : identifier[self] . identifier[insert_frag] ( identifier[clip] . identifier[glsl] ( literal[string] ), literal[string] )
def add_transform_chain(self, tc): """Insert the GLSL snippets of a transform chain.""" # Generate the transforms snippet. for t in tc.gpu_transforms: if isinstance(t, Clip): # Set the varying value in the vertex shader. self.insert_vert('v_temp_pos_tr = temp_pos_tr;') continue # depends on [control=['if'], data=[]] self.insert_vert(t.glsl('temp_pos_tr')) # depends on [control=['for'], data=['t']] # Clipping. clip = tc.get('Clip') if clip: self.insert_frag(clip.glsl('v_temp_pos_tr'), 'before_transforms') # depends on [control=['if'], data=[]]
def wait_until_finished(self): """Blocking method to wait until the driver finished its execution.""" try: self.thread.join() except KeyboardInterrupt: self._logger.debug('Keyboard interrupt detected, stopping driver.') self._active = False self._wake_queue()
def function[wait_until_finished, parameter[self]]: constant[Blocking method to wait until the driver finished its execution.] <ast.Try object at 0x7da204620e50>
keyword[def] identifier[wait_until_finished] ( identifier[self] ): literal[string] keyword[try] : identifier[self] . identifier[thread] . identifier[join] () keyword[except] identifier[KeyboardInterrupt] : identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[_active] = keyword[False] identifier[self] . identifier[_wake_queue] ()
def wait_until_finished(self): """Blocking method to wait until the driver finished its execution.""" try: self.thread.join() # depends on [control=['try'], data=[]] except KeyboardInterrupt: self._logger.debug('Keyboard interrupt detected, stopping driver.') self._active = False self._wake_queue() # depends on [control=['except'], data=[]]
def parse_year_days(year_info): """Parse year days from a year info. """ leap_month, leap_days = _parse_leap(year_info) res = leap_days for month in range(1, 13): res += (year_info >> (16 - month)) % 2 + 29 return res
def function[parse_year_days, parameter[year_info]]: constant[Parse year days from a year info. ] <ast.Tuple object at 0x7da1b0881480> assign[=] call[name[_parse_leap], parameter[name[year_info]]] variable[res] assign[=] name[leap_days] for taget[name[month]] in starred[call[name[range], parameter[constant[1], constant[13]]]] begin[:] <ast.AugAssign object at 0x7da1b07334c0> return[name[res]]
keyword[def] identifier[parse_year_days] ( identifier[year_info] ): literal[string] identifier[leap_month] , identifier[leap_days] = identifier[_parse_leap] ( identifier[year_info] ) identifier[res] = identifier[leap_days] keyword[for] identifier[month] keyword[in] identifier[range] ( literal[int] , literal[int] ): identifier[res] +=( identifier[year_info] >>( literal[int] - identifier[month] ))% literal[int] + literal[int] keyword[return] identifier[res]
def parse_year_days(year_info): """Parse year days from a year info. """ (leap_month, leap_days) = _parse_leap(year_info) res = leap_days for month in range(1, 13): res += (year_info >> 16 - month) % 2 + 29 # depends on [control=['for'], data=['month']] return res
def __refillOTPKs(self): """ If the amount of available OTPKs fell under the minimum, refills the OTPKs up to the maximum limit again. """ remainingOTPKs = len(self.__otpks) if remainingOTPKs < self.__min_num_otpks: self.__generateOTPKs(self.__max_num_otpks - remainingOTPKs)
def function[__refillOTPKs, parameter[self]]: constant[ If the amount of available OTPKs fell under the minimum, refills the OTPKs up to the maximum limit again. ] variable[remainingOTPKs] assign[=] call[name[len], parameter[name[self].__otpks]] if compare[name[remainingOTPKs] less[<] name[self].__min_num_otpks] begin[:] call[name[self].__generateOTPKs, parameter[binary_operation[name[self].__max_num_otpks - name[remainingOTPKs]]]]
keyword[def] identifier[__refillOTPKs] ( identifier[self] ): literal[string] identifier[remainingOTPKs] = identifier[len] ( identifier[self] . identifier[__otpks] ) keyword[if] identifier[remainingOTPKs] < identifier[self] . identifier[__min_num_otpks] : identifier[self] . identifier[__generateOTPKs] ( identifier[self] . identifier[__max_num_otpks] - identifier[remainingOTPKs] )
def __refillOTPKs(self): """ If the amount of available OTPKs fell under the minimum, refills the OTPKs up to the maximum limit again. """ remainingOTPKs = len(self.__otpks) if remainingOTPKs < self.__min_num_otpks: self.__generateOTPKs(self.__max_num_otpks - remainingOTPKs) # depends on [control=['if'], data=['remainingOTPKs']]
def get_credentials(): """ Get the credentials to use. We try application credentials first, followed by user credentials. The path to the application credentials can be overridden by pointing the GOOGLE_APPLICATION_CREDENTIALS environment variable to some file; the path to the user credentials can be overridden by pointing the CLOUDSDK_CONFIG environment variable to some directory (after which we will look for the file $CLOUDSDK_CONFIG/gcloud/credentials). Unless you have specific reasons for overriding these the defaults should suffice. """ try: credentials, _ = google.auth.default() credentials = google.auth.credentials.with_scopes_if_required(credentials, CREDENTIAL_SCOPES) return credentials except Exception as e: # Try load user creds from file cred_file = get_config_dir() + '/credentials' if os.path.exists(cred_file): with open(cred_file) as f: creds = json.loads(f.read()) # Use the first gcloud one we find for entry in creds['data']: if entry['key']['type'] == 'google-cloud-sdk': creds = oauth2client.client.OAuth2Credentials.from_json(json.dumps(entry['credential'])) return _convert_oauth2client_creds(creds) if type(e) == google.auth.exceptions.DefaultCredentialsError: # If we are in Datalab container, change the message to be about signing in. if _in_datalab_docker(): raise Exception('No application credentials found. Perhaps you should sign in.') raise e
def function[get_credentials, parameter[]]: constant[ Get the credentials to use. We try application credentials first, followed by user credentials. The path to the application credentials can be overridden by pointing the GOOGLE_APPLICATION_CREDENTIALS environment variable to some file; the path to the user credentials can be overridden by pointing the CLOUDSDK_CONFIG environment variable to some directory (after which we will look for the file $CLOUDSDK_CONFIG/gcloud/credentials). Unless you have specific reasons for overriding these the defaults should suffice. ] <ast.Try object at 0x7da1b2345d50>
keyword[def] identifier[get_credentials] (): literal[string] keyword[try] : identifier[credentials] , identifier[_] = identifier[google] . identifier[auth] . identifier[default] () identifier[credentials] = identifier[google] . identifier[auth] . identifier[credentials] . identifier[with_scopes_if_required] ( identifier[credentials] , identifier[CREDENTIAL_SCOPES] ) keyword[return] identifier[credentials] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[cred_file] = identifier[get_config_dir] ()+ literal[string] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[cred_file] ): keyword[with] identifier[open] ( identifier[cred_file] ) keyword[as] identifier[f] : identifier[creds] = identifier[json] . identifier[loads] ( identifier[f] . identifier[read] ()) keyword[for] identifier[entry] keyword[in] identifier[creds] [ literal[string] ]: keyword[if] identifier[entry] [ literal[string] ][ literal[string] ]== literal[string] : identifier[creds] = identifier[oauth2client] . identifier[client] . identifier[OAuth2Credentials] . identifier[from_json] ( identifier[json] . identifier[dumps] ( identifier[entry] [ literal[string] ])) keyword[return] identifier[_convert_oauth2client_creds] ( identifier[creds] ) keyword[if] identifier[type] ( identifier[e] )== identifier[google] . identifier[auth] . identifier[exceptions] . identifier[DefaultCredentialsError] : keyword[if] identifier[_in_datalab_docker] (): keyword[raise] identifier[Exception] ( literal[string] ) keyword[raise] identifier[e]
def get_credentials(): """ Get the credentials to use. We try application credentials first, followed by user credentials. The path to the application credentials can be overridden by pointing the GOOGLE_APPLICATION_CREDENTIALS environment variable to some file; the path to the user credentials can be overridden by pointing the CLOUDSDK_CONFIG environment variable to some directory (after which we will look for the file $CLOUDSDK_CONFIG/gcloud/credentials). Unless you have specific reasons for overriding these the defaults should suffice. """ try: (credentials, _) = google.auth.default() credentials = google.auth.credentials.with_scopes_if_required(credentials, CREDENTIAL_SCOPES) return credentials # depends on [control=['try'], data=[]] except Exception as e: # Try load user creds from file cred_file = get_config_dir() + '/credentials' if os.path.exists(cred_file): with open(cred_file) as f: creds = json.loads(f.read()) # depends on [control=['with'], data=['f']] # Use the first gcloud one we find for entry in creds['data']: if entry['key']['type'] == 'google-cloud-sdk': creds = oauth2client.client.OAuth2Credentials.from_json(json.dumps(entry['credential'])) return _convert_oauth2client_creds(creds) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']] # depends on [control=['if'], data=[]] if type(e) == google.auth.exceptions.DefaultCredentialsError: # If we are in Datalab container, change the message to be about signing in. if _in_datalab_docker(): raise Exception('No application credentials found. Perhaps you should sign in.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] raise e # depends on [control=['except'], data=['e']]
def setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs, outputDir, tags=None): """ This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_single_call_perifo this call will one call to the datafind server for every science segment. This function will return a list of output files that correspond to the cache .lcf files that are produced, which list the locations of all frame files. This will cause problems with pegasus, which expects to know about all input files (ie. the frame files themselves.) Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline. """ if tags is None: tags = [] # First job is to do setup for the datafind jobs # First get the server name logging.info("Setting up connection to datafind server.") connection = setup_datafind_server_connection(cp, tags=tags) # Now ready to loop over the input segments datafindouts = [] datafindcaches = [] logging.info("Querying datafind server for all science segments.") for ifo, scienceSegsIfo in scienceSegs.items(): observatory = ifo[0].upper() frameType = cp.get_opt_tags("workflow-datafind", "datafind-%s-frame-type" % (ifo.lower()), tags) for seg in scienceSegsIfo: msg = "Finding data between %d and %d " %(seg[0],seg[1]) msg += "for ifo %s" %(ifo) logging.debug(msg) # WARNING: For now the workflow will expect times to be in integer seconds startTime = int(seg[0]) endTime = int(seg[1]) # Sometimes the connection can drop, so try a backup here try: cache, cache_file = run_datafind_instance(cp, outputDir, connection, observatory, frameType, startTime, endTime, ifo, tags=tags) except: connection = setup_datafind_server_connection(cp, tags=tags) cache, cache_file = run_datafind_instance(cp, outputDir, connection, observatory, frameType, startTime, endTime, ifo, tags=tags) datafindouts.append(cache_file) datafindcaches.append(cache) return datafindcaches, datafindouts
def function[setup_datafind_runtime_cache_multi_calls_perifo, parameter[cp, scienceSegs, outputDir, tags]]: constant[ This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_single_call_perifo this call will one call to the datafind server for every science segment. This function will return a list of output files that correspond to the cache .lcf files that are produced, which list the locations of all frame files. This will cause problems with pegasus, which expects to know about all input files (ie. the frame files themselves.) Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline. ] if compare[name[tags] is constant[None]] begin[:] variable[tags] assign[=] list[[]] call[name[logging].info, parameter[constant[Setting up connection to datafind server.]]] variable[connection] assign[=] call[name[setup_datafind_server_connection], parameter[name[cp]]] variable[datafindouts] assign[=] list[[]] variable[datafindcaches] assign[=] list[[]] call[name[logging].info, parameter[constant[Querying datafind server for all science segments.]]] for taget[tuple[[<ast.Name object at 0x7da20c6aa530>, <ast.Name object at 0x7da20c6abe20>]]] in starred[call[name[scienceSegs].items, parameter[]]] begin[:] variable[observatory] assign[=] call[call[name[ifo]][constant[0]].upper, parameter[]] variable[frameType] assign[=] call[name[cp].get_opt_tags, parameter[constant[workflow-datafind], binary_operation[constant[datafind-%s-frame-type] <ast.Mod object at 0x7da2590d6920> call[name[ifo].lower, parameter[]]], name[tags]]] for taget[name[seg]] in starred[name[scienceSegsIfo]] begin[:] variable[msg] assign[=] binary_operation[constant[Finding data between %d and %d ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c6ab460>, <ast.Subscript object at 0x7da20c6ab310>]]] <ast.AugAssign object at 0x7da20c6a8550> call[name[logging].debug, parameter[name[msg]]] variable[startTime] assign[=] call[name[int], parameter[call[name[seg]][constant[0]]]] variable[endTime] assign[=] call[name[int], parameter[call[name[seg]][constant[1]]]] <ast.Try object at 0x7da20c6a9000> call[name[datafindouts].append, parameter[name[cache_file]]] call[name[datafindcaches].append, parameter[name[cache]]] return[tuple[[<ast.Name object at 0x7da2054a49d0>, <ast.Name object at 0x7da2054a6230>]]]
keyword[def] identifier[setup_datafind_runtime_cache_multi_calls_perifo] ( identifier[cp] , identifier[scienceSegs] , identifier[outputDir] , identifier[tags] = keyword[None] ): literal[string] keyword[if] identifier[tags] keyword[is] keyword[None] : identifier[tags] =[] identifier[logging] . identifier[info] ( literal[string] ) identifier[connection] = identifier[setup_datafind_server_connection] ( identifier[cp] , identifier[tags] = identifier[tags] ) identifier[datafindouts] =[] identifier[datafindcaches] =[] identifier[logging] . identifier[info] ( literal[string] ) keyword[for] identifier[ifo] , identifier[scienceSegsIfo] keyword[in] identifier[scienceSegs] . identifier[items] (): identifier[observatory] = identifier[ifo] [ literal[int] ]. identifier[upper] () identifier[frameType] = identifier[cp] . identifier[get_opt_tags] ( literal[string] , literal[string] %( identifier[ifo] . identifier[lower] ()), identifier[tags] ) keyword[for] identifier[seg] keyword[in] identifier[scienceSegsIfo] : identifier[msg] = literal[string] %( identifier[seg] [ literal[int] ], identifier[seg] [ literal[int] ]) identifier[msg] += literal[string] %( identifier[ifo] ) identifier[logging] . identifier[debug] ( identifier[msg] ) identifier[startTime] = identifier[int] ( identifier[seg] [ literal[int] ]) identifier[endTime] = identifier[int] ( identifier[seg] [ literal[int] ]) keyword[try] : identifier[cache] , identifier[cache_file] = identifier[run_datafind_instance] ( identifier[cp] , identifier[outputDir] , identifier[connection] , identifier[observatory] , identifier[frameType] , identifier[startTime] , identifier[endTime] , identifier[ifo] , identifier[tags] = identifier[tags] ) keyword[except] : identifier[connection] = identifier[setup_datafind_server_connection] ( identifier[cp] , identifier[tags] = identifier[tags] ) identifier[cache] , identifier[cache_file] = identifier[run_datafind_instance] ( identifier[cp] , identifier[outputDir] , identifier[connection] , identifier[observatory] , identifier[frameType] , identifier[startTime] , identifier[endTime] , identifier[ifo] , identifier[tags] = identifier[tags] ) identifier[datafindouts] . identifier[append] ( identifier[cache_file] ) identifier[datafindcaches] . identifier[append] ( identifier[cache] ) keyword[return] identifier[datafindcaches] , identifier[datafindouts]
def setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs, outputDir, tags=None): """ This function uses the glue.datafind library to obtain the location of all the frame files that will be needed to cover the analysis of the data given in scienceSegs. This function will not check if the returned frames cover the whole time requested, such sanity checks are done in the pycbc.workflow.setup_datafind_workflow entry function. As opposed to setup_datafind_runtime_single_call_perifo this call will one call to the datafind server for every science segment. This function will return a list of output files that correspond to the cache .lcf files that are produced, which list the locations of all frame files. This will cause problems with pegasus, which expects to know about all input files (ie. the frame files themselves.) Parameters ----------- cp : ConfigParser.ConfigParser instance This contains a representation of the information stored within the workflow configuration files scienceSegs : Dictionary of ifo keyed glue.segment.segmentlist instances This contains the times that the workflow is expected to analyse. outputDir : path All output files written by datafind processes will be written to this directory. tags : list of strings, optional (default=None) Use this to specify tags. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! Returns -------- datafindcaches : list of glue.lal.Cache instances The glue.lal.Cache representations of the various calls to the datafind server and the returned frame files. datafindOuts : pycbc.workflow.core.FileList List of all the datafind output files for use later in the pipeline. """ if tags is None: tags = [] # depends on [control=['if'], data=['tags']] # First job is to do setup for the datafind jobs # First get the server name logging.info('Setting up connection to datafind server.') connection = setup_datafind_server_connection(cp, tags=tags) # Now ready to loop over the input segments datafindouts = [] datafindcaches = [] logging.info('Querying datafind server for all science segments.') for (ifo, scienceSegsIfo) in scienceSegs.items(): observatory = ifo[0].upper() frameType = cp.get_opt_tags('workflow-datafind', 'datafind-%s-frame-type' % ifo.lower(), tags) for seg in scienceSegsIfo: msg = 'Finding data between %d and %d ' % (seg[0], seg[1]) msg += 'for ifo %s' % ifo logging.debug(msg) # WARNING: For now the workflow will expect times to be in integer seconds startTime = int(seg[0]) endTime = int(seg[1]) # Sometimes the connection can drop, so try a backup here try: (cache, cache_file) = run_datafind_instance(cp, outputDir, connection, observatory, frameType, startTime, endTime, ifo, tags=tags) # depends on [control=['try'], data=[]] except: connection = setup_datafind_server_connection(cp, tags=tags) (cache, cache_file) = run_datafind_instance(cp, outputDir, connection, observatory, frameType, startTime, endTime, ifo, tags=tags) # depends on [control=['except'], data=[]] datafindouts.append(cache_file) datafindcaches.append(cache) # depends on [control=['for'], data=['seg']] # depends on [control=['for'], data=[]] return (datafindcaches, datafindouts)
def response(self, msgtype, msgid, error, result): """Handle an incoming response.""" self._proxy.response(msgid, error, result)
def function[response, parameter[self, msgtype, msgid, error, result]]: constant[Handle an incoming response.] call[name[self]._proxy.response, parameter[name[msgid], name[error], name[result]]]
keyword[def] identifier[response] ( identifier[self] , identifier[msgtype] , identifier[msgid] , identifier[error] , identifier[result] ): literal[string] identifier[self] . identifier[_proxy] . identifier[response] ( identifier[msgid] , identifier[error] , identifier[result] )
def response(self, msgtype, msgid, error, result): """Handle an incoming response.""" self._proxy.response(msgid, error, result)
def discoverPoints(bacnetapp, address, devID): """ Discover the BACnet points in a BACnet device. :param bacnetApp: The app itself so we can call read :param address: address of the device as a string (ex. '2:5') :param devID: device ID of the bacnet device as a string (ex. '1001') :returns: a tuple with deviceName, pss, objList, df * *deviceName* : name of the device * *pss* : protocole service supported * *objList* : list of bacnet object (ex. analogInput, 1) * *df* : is a dataFrame containing pointType, pointAddress, pointName, description presentValue and units If pandas can't be found, df will be a simple array """ pss = bacnetapp.read( "{} device {} protocolServicesSupported".format(address, devID) ) deviceName = bacnetapp.read("{} device {} objectName".format(address, devID)) # print('Device {}- building points list'.format(deviceName)) objList = bacnetapp.read("{} device {] objectList".format(address, devID)) newLine = [] result = [] points = [] for pointType, pointAddr in objList: if "binary" in pointType: # BI/BO/BV newLine = [pointType, pointAddr] infos = bacnetapp.readMultiple( "{} {} {} objectName description presentValue inactiveText activeText".format( address, pointType, pointAddr ) ) newLine.extend(infos[:-2]) newLine.extend([infos[-2:]]) newPoint = BooleanPoint( pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5], ) elif "multiState" in pointType: # MI/MV/MO newLine = [pointType, pointAddr] newLine.extend( bacnetapp.readMultiple( "{} {} {} objectName description presentValue stateText".format( address, pointType, pointAddr ) ) ) newPoint = EnumPoint( pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5], ) elif "analog" in pointType: # AI/AO/AV newLine = [pointType, pointAddr] newLine.extend( bacnetapp.readMultiple( "{} {} {} objectName description presentValue units".format( address, pointType, pointAddr ) ) ) newPoint = NumericPoint( pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5], ) else: continue # skip result.append(newLine) points.append(newPoint) if _PANDA: df = pd.DataFrame( result, columns=[ "pointType", "pointAddress", "pointName", "description", "presentValue", "units_state", ], ).set_index(["pointName"]) else: df = result # print('Ready!') return (deviceName, pss, objList, df, points)
def function[discoverPoints, parameter[bacnetapp, address, devID]]: constant[ Discover the BACnet points in a BACnet device. :param bacnetApp: The app itself so we can call read :param address: address of the device as a string (ex. '2:5') :param devID: device ID of the bacnet device as a string (ex. '1001') :returns: a tuple with deviceName, pss, objList, df * *deviceName* : name of the device * *pss* : protocole service supported * *objList* : list of bacnet object (ex. analogInput, 1) * *df* : is a dataFrame containing pointType, pointAddress, pointName, description presentValue and units If pandas can't be found, df will be a simple array ] variable[pss] assign[=] call[name[bacnetapp].read, parameter[call[constant[{} device {} protocolServicesSupported].format, parameter[name[address], name[devID]]]]] variable[deviceName] assign[=] call[name[bacnetapp].read, parameter[call[constant[{} device {} objectName].format, parameter[name[address], name[devID]]]]] variable[objList] assign[=] call[name[bacnetapp].read, parameter[call[constant[{} device {] objectList].format, parameter[name[address], name[devID]]]]] variable[newLine] assign[=] list[[]] variable[result] assign[=] list[[]] variable[points] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b040a020>, <ast.Name object at 0x7da1b040a980>]]] in starred[name[objList]] begin[:] if compare[constant[binary] in name[pointType]] begin[:] variable[newLine] assign[=] list[[<ast.Name object at 0x7da1b040a920>, <ast.Name object at 0x7da1b04085e0>]] variable[infos] assign[=] call[name[bacnetapp].readMultiple, parameter[call[constant[{} {} {} objectName description presentValue inactiveText activeText].format, parameter[name[address], name[pointType], name[pointAddr]]]]] call[name[newLine].extend, parameter[call[name[infos]][<ast.Slice object at 0x7da1b040a500>]]] call[name[newLine].extend, parameter[list[[<ast.Subscript object at 0x7da1b045cbb0>]]]] variable[newPoint] assign[=] call[name[BooleanPoint], parameter[]] call[name[result].append, parameter[name[newLine]]] call[name[points].append, parameter[name[newPoint]]] if name[_PANDA] begin[:] variable[df] assign[=] call[call[name[pd].DataFrame, parameter[name[result]]].set_index, parameter[list[[<ast.Constant object at 0x7da1b047b130>]]]] return[tuple[[<ast.Name object at 0x7da1b047afe0>, <ast.Name object at 0x7da1b047afb0>, <ast.Name object at 0x7da1b047af80>, <ast.Name object at 0x7da1b047af50>, <ast.Name object at 0x7da1b047af20>]]]
keyword[def] identifier[discoverPoints] ( identifier[bacnetapp] , identifier[address] , identifier[devID] ): literal[string] identifier[pss] = identifier[bacnetapp] . identifier[read] ( literal[string] . identifier[format] ( identifier[address] , identifier[devID] ) ) identifier[deviceName] = identifier[bacnetapp] . identifier[read] ( literal[string] . identifier[format] ( identifier[address] , identifier[devID] )) identifier[objList] = identifier[bacnetapp] . identifier[read] ( literal[string] . identifier[format] ( identifier[address] , identifier[devID] )) identifier[newLine] =[] identifier[result] =[] identifier[points] =[] keyword[for] identifier[pointType] , identifier[pointAddr] keyword[in] identifier[objList] : keyword[if] literal[string] keyword[in] identifier[pointType] : identifier[newLine] =[ identifier[pointType] , identifier[pointAddr] ] identifier[infos] = identifier[bacnetapp] . identifier[readMultiple] ( literal[string] . identifier[format] ( identifier[address] , identifier[pointType] , identifier[pointAddr] ) ) identifier[newLine] . identifier[extend] ( identifier[infos] [:- literal[int] ]) identifier[newLine] . identifier[extend] ([ identifier[infos] [- literal[int] :]]) identifier[newPoint] = identifier[BooleanPoint] ( identifier[pointType] = identifier[newLine] [ literal[int] ], identifier[pointAddress] = identifier[newLine] [ literal[int] ], identifier[pointName] = identifier[newLine] [ literal[int] ], identifier[description] = identifier[newLine] [ literal[int] ], identifier[presentValue] = identifier[newLine] [ literal[int] ], identifier[units_state] = identifier[newLine] [ literal[int] ], ) keyword[elif] literal[string] keyword[in] identifier[pointType] : identifier[newLine] =[ identifier[pointType] , identifier[pointAddr] ] identifier[newLine] . identifier[extend] ( identifier[bacnetapp] . identifier[readMultiple] ( literal[string] . identifier[format] ( identifier[address] , identifier[pointType] , identifier[pointAddr] ) ) ) identifier[newPoint] = identifier[EnumPoint] ( identifier[pointType] = identifier[newLine] [ literal[int] ], identifier[pointAddress] = identifier[newLine] [ literal[int] ], identifier[pointName] = identifier[newLine] [ literal[int] ], identifier[description] = identifier[newLine] [ literal[int] ], identifier[presentValue] = identifier[newLine] [ literal[int] ], identifier[units_state] = identifier[newLine] [ literal[int] ], ) keyword[elif] literal[string] keyword[in] identifier[pointType] : identifier[newLine] =[ identifier[pointType] , identifier[pointAddr] ] identifier[newLine] . identifier[extend] ( identifier[bacnetapp] . identifier[readMultiple] ( literal[string] . identifier[format] ( identifier[address] , identifier[pointType] , identifier[pointAddr] ) ) ) identifier[newPoint] = identifier[NumericPoint] ( identifier[pointType] = identifier[newLine] [ literal[int] ], identifier[pointAddress] = identifier[newLine] [ literal[int] ], identifier[pointName] = identifier[newLine] [ literal[int] ], identifier[description] = identifier[newLine] [ literal[int] ], identifier[presentValue] = identifier[newLine] [ literal[int] ], identifier[units_state] = identifier[newLine] [ literal[int] ], ) keyword[else] : keyword[continue] identifier[result] . identifier[append] ( identifier[newLine] ) identifier[points] . identifier[append] ( identifier[newPoint] ) keyword[if] identifier[_PANDA] : identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[result] , identifier[columns] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , ], ). identifier[set_index] ([ literal[string] ]) keyword[else] : identifier[df] = identifier[result] keyword[return] ( identifier[deviceName] , identifier[pss] , identifier[objList] , identifier[df] , identifier[points] )
def discoverPoints(bacnetapp, address, devID): """ Discover the BACnet points in a BACnet device. :param bacnetApp: The app itself so we can call read :param address: address of the device as a string (ex. '2:5') :param devID: device ID of the bacnet device as a string (ex. '1001') :returns: a tuple with deviceName, pss, objList, df * *deviceName* : name of the device * *pss* : protocole service supported * *objList* : list of bacnet object (ex. analogInput, 1) * *df* : is a dataFrame containing pointType, pointAddress, pointName, description presentValue and units If pandas can't be found, df will be a simple array """ pss = bacnetapp.read('{} device {} protocolServicesSupported'.format(address, devID)) deviceName = bacnetapp.read('{} device {} objectName'.format(address, devID)) # print('Device {}- building points list'.format(deviceName)) objList = bacnetapp.read('{} device {] objectList'.format(address, devID)) newLine = [] result = [] points = [] for (pointType, pointAddr) in objList: if 'binary' in pointType: # BI/BO/BV newLine = [pointType, pointAddr] infos = bacnetapp.readMultiple('{} {} {} objectName description presentValue inactiveText activeText'.format(address, pointType, pointAddr)) newLine.extend(infos[:-2]) newLine.extend([infos[-2:]]) newPoint = BooleanPoint(pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5]) # depends on [control=['if'], data=['pointType']] elif 'multiState' in pointType: # MI/MV/MO newLine = [pointType, pointAddr] newLine.extend(bacnetapp.readMultiple('{} {} {} objectName description presentValue stateText'.format(address, pointType, pointAddr))) newPoint = EnumPoint(pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5]) # depends on [control=['if'], data=['pointType']] elif 'analog' in pointType: # AI/AO/AV newLine = [pointType, pointAddr] newLine.extend(bacnetapp.readMultiple('{} {} {} objectName description presentValue units'.format(address, pointType, pointAddr))) newPoint = NumericPoint(pointType=newLine[0], pointAddress=newLine[1], pointName=newLine[2], description=newLine[3], presentValue=newLine[4], units_state=newLine[5]) # depends on [control=['if'], data=['pointType']] else: continue # skip result.append(newLine) points.append(newPoint) # depends on [control=['for'], data=[]] if _PANDA: df = pd.DataFrame(result, columns=['pointType', 'pointAddress', 'pointName', 'description', 'presentValue', 'units_state']).set_index(['pointName']) # depends on [control=['if'], data=[]] else: df = result # print('Ready!') return (deviceName, pss, objList, df, points)
def QA_data_tick_resample_1min(tick, type_='1min', if_drop=True): """ tick 采样为 分钟数据 1. 仅使用将 tick 采样为 1 分钟数据 2. 仅测试过,与通达信 1 分钟数据达成一致 3. 经测试,可以匹配 QA.QA_fetch_get_stock_transaction 得到的数据,其他类型数据未测试 demo: df = QA.QA_fetch_get_stock_transaction(package='tdx', code='000001', start='2018-08-01 09:25:00', end='2018-08-03 15:00:00') df_min = QA_data_tick_resample_1min(df) """ tick = tick.assign(amount=tick.price * tick.vol) resx = pd.DataFrame() _dates = set(tick.date) for date in sorted(list(_dates)): _data = tick.loc[tick.date == date] # morning min bar _data1 = _data[time(9, 25):time(11, 30)].resample( type_, closed='left', base=30, loffset=type_ ).apply( { 'price': 'ohlc', 'vol': 'sum', 'code': 'last', 'amount': 'sum' } ) _data1.columns = _data1.columns.droplevel(0) # do fix on the first and last bar # 某些股票某些日期没有集合竞价信息,譬如 002468 在 2017 年 6 月 5 日的数据 if len(_data.loc[time(9, 25):time(9, 25)]) > 0: _data1.loc[time(9, 31):time(9, 31), 'open'] = _data1.loc[time(9, 26):time(9, 26), 'open'].values _data1.loc[time(9, 31):time(9, 31), 'high'] = _data1.loc[time(9, 26):time(9, 31), 'high'].max() _data1.loc[time(9, 31):time(9, 31), 'low'] = _data1.loc[time(9, 26):time(9, 31), 'low'].min() _data1.loc[time(9, 31):time(9, 31), 'vol'] = _data1.loc[time(9, 26):time(9, 31), 'vol'].sum() _data1.loc[time(9, 31):time(9, 31), 'amount'] = _data1.loc[time(9, 26):time(9, 31), 'amount'].sum() # 通达信分笔数据有的有 11:30 数据,有的没有 if len(_data.loc[time(11, 30):time(11, 30)]) > 0: _data1.loc[time(11, 30):time(11, 30), 'high'] = _data1.loc[time(11, 30):time(11, 31), 'high'].max() _data1.loc[time(11, 30):time(11, 30), 'low'] = _data1.loc[time(11, 30):time(11, 31), 'low'].min() _data1.loc[time(11, 30):time(11, 30), 'close'] = _data1.loc[time(11, 31):time(11, 31), 'close'].values _data1.loc[time(11, 30):time(11, 30), 'vol'] = _data1.loc[time(11, 30):time(11, 31), 'vol'].sum() _data1.loc[time(11, 30):time(11, 30), 'amount'] = _data1.loc[time(11, 30):time(11, 31), 'amount'].sum() _data1 = _data1.loc[time(9, 31):time(11, 30)] # afternoon min bar _data2 = _data[time(13, 0):time(15, 0)].resample( type_, closed='left', base=30, loffset=type_ ).apply( { 'price': 'ohlc', 'vol': 'sum', 'code': 'last', 'amount': 'sum' } ) _data2.columns = _data2.columns.droplevel(0) # 沪市股票在 2018-08-20 起,尾盘 3 分钟集合竞价 if (pd.Timestamp(date) < pd.Timestamp('2018-08-20')) and (tick.code.iloc[0][0] == '6'): # 避免出现 tick 数据没有 1:00 的值 if len(_data.loc[time(13, 0):time(13, 0)]) > 0: _data2.loc[time(15, 0):time(15, 0), 'high'] = _data2.loc[time(15, 0):time(15, 1), 'high'].max() _data2.loc[time(15, 0):time(15, 0), 'low'] = _data2.loc[time(15, 0):time(15, 1), 'low'].min() _data2.loc[time(15, 0):time(15, 0), 'close'] = _data2.loc[time(15, 1):time(15, 1), 'close'].values else: # 避免出现 tick 数据没有 15:00 的值 if len(_data.loc[time(13, 0):time(13, 0)]) > 0: _data2.loc[time(15, 0):time(15, 0)] = _data2.loc[time(15, 1):time(15, 1)].values _data2 = _data2.loc[time(13, 1):time(15, 0)] resx = resx.append(_data1).append(_data2) resx['vol'] = resx['vol'] * 100.0 resx['volume'] = resx['vol'] resx['type'] = '1min' if if_drop: resx = resx.dropna() return resx.reset_index().drop_duplicates().set_index(['datetime', 'code'])
def function[QA_data_tick_resample_1min, parameter[tick, type_, if_drop]]: constant[ tick 采样为 分钟数据 1. 仅使用将 tick 采样为 1 分钟数据 2. 仅测试过,与通达信 1 分钟数据达成一致 3. 经测试,可以匹配 QA.QA_fetch_get_stock_transaction 得到的数据,其他类型数据未测试 demo: df = QA.QA_fetch_get_stock_transaction(package='tdx', code='000001', start='2018-08-01 09:25:00', end='2018-08-03 15:00:00') df_min = QA_data_tick_resample_1min(df) ] variable[tick] assign[=] call[name[tick].assign, parameter[]] variable[resx] assign[=] call[name[pd].DataFrame, parameter[]] variable[_dates] assign[=] call[name[set], parameter[name[tick].date]] for taget[name[date]] in starred[call[name[sorted], parameter[call[name[list], parameter[name[_dates]]]]]] begin[:] variable[_data] assign[=] call[name[tick].loc][compare[name[tick].date equal[==] name[date]]] variable[_data1] assign[=] call[call[call[name[_data]][<ast.Slice object at 0x7da1b1faf520>].resample, parameter[name[type_]]].apply, parameter[dictionary[[<ast.Constant object at 0x7da1b1fae140>, <ast.Constant object at 0x7da1b1fae170>, <ast.Constant object at 0x7da1b1fae1a0>, <ast.Constant object at 0x7da1b1fae1d0>], [<ast.Constant object at 0x7da1b1fae200>, <ast.Constant object at 0x7da1b1fae230>, <ast.Constant object at 0x7da1b1fae260>, <ast.Constant object at 0x7da1b1fae290>]]]] name[_data1].columns assign[=] call[name[_data1].columns.droplevel, parameter[constant[0]]] if compare[call[name[len], parameter[call[name[_data].loc][<ast.Slice object at 0x7da1b1fae590>]]] greater[>] constant[0]] begin[:] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fae890>, <ast.Constant object at 0x7da1b1faea40>]]] assign[=] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1faeb60>, <ast.Constant object at 0x7da1b1faf010>]]].values call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1faeef0>, <ast.Constant object at 0x7da1b1faed40>]]] assign[=] call[call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fabf40>, <ast.Constant object at 0x7da1b1fabd90>]]].max, parameter[]] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fabc70>, <ast.Constant object at 0x7da1b1fabac0>]]] assign[=] call[call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fab970>, <ast.Constant object at 0x7da1b2045030>]]].min, parameter[]] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b2044430>, <ast.Constant object at 0x7da1b2045c60>]]] assign[=] call[call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b2044790>, <ast.Constant object at 0x7da1b1fab700>]]].sum, parameter[]] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fab5e0>, <ast.Constant object at 0x7da1b1fab430>]]] assign[=] call[call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fab2e0>, <ast.Constant object at 0x7da1b1fab130>]]].sum, parameter[]] if compare[call[name[len], parameter[call[name[_data].loc][<ast.Slice object at 0x7da1b1fa9e70>]]] greater[>] constant[0]] begin[:] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa9b70>, <ast.Constant object at 0x7da1b1fa99c0>]]] assign[=] call[call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa9870>, <ast.Constant object at 0x7da1b1fa96c0>]]].max, parameter[]] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa95a0>, <ast.Constant object at 0x7da1b1fa93f0>]]] assign[=] call[call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa92a0>, <ast.Constant object at 0x7da1b1fa90f0>]]].min, parameter[]] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa8fd0>, <ast.Constant object at 0x7da1b1fa8e20>]]] assign[=] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa8d00>, <ast.Constant object at 0x7da1b1fa8b50>]]].values call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa8a30>, <ast.Constant object at 0x7da1b1fa8880>]]] assign[=] call[call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa8730>, <ast.Constant object at 0x7da1b1fa8580>]]].sum, parameter[]] call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa8460>, <ast.Constant object at 0x7da1b1fa82b0>]]] assign[=] call[call[name[_data1].loc][tuple[[<ast.Slice object at 0x7da1b1fa8160>, <ast.Constant object at 0x7da1b1f44580>]]].sum, parameter[]] variable[_data1] assign[=] call[name[_data1].loc][<ast.Slice object at 0x7da1b1f458a0>] variable[_data2] assign[=] call[call[call[name[_data]][<ast.Slice object at 0x7da1b1f46710>].resample, parameter[name[type_]]].apply, parameter[dictionary[[<ast.Constant object at 0x7da1b1f44a90>, <ast.Constant object at 0x7da1b1f44df0>, <ast.Constant object at 0x7da1b1f445e0>, <ast.Constant object at 0x7da1b1f47df0>], [<ast.Constant object at 0x7da1b1f45810>, <ast.Constant object at 0x7da1b1f47820>, <ast.Constant object at 0x7da1b1f47d00>, <ast.Constant object at 0x7da1b1f47b80>]]]] name[_data2].columns assign[=] call[name[_data2].columns.droplevel, parameter[constant[0]]] if <ast.BoolOp object at 0x7da1b1f45900> begin[:] if compare[call[name[len], parameter[call[name[_data].loc][<ast.Slice object at 0x7da1b1f453c0>]]] greater[>] constant[0]] begin[:] call[name[_data2].loc][tuple[[<ast.Slice object at 0x7da1b1f45d80>, <ast.Constant object at 0x7da1b1f883a0>]]] assign[=] call[call[name[_data2].loc][tuple[[<ast.Slice object at 0x7da1b1f884f0>, <ast.Constant object at 0x7da1b1f88790>]]].max, parameter[]] call[name[_data2].loc][tuple[[<ast.Slice object at 0x7da1b1f88a60>, <ast.Constant object at 0x7da1b1f88910>]]] assign[=] call[call[name[_data2].loc][tuple[[<ast.Slice object at 0x7da1b1f89510>, <ast.Constant object at 0x7da1b1f88ee0>]]].min, parameter[]] call[name[_data2].loc][tuple[[<ast.Slice object at 0x7da1b1f89090>, <ast.Constant object at 0x7da1b1f893c0>]]] assign[=] call[name[_data2].loc][tuple[[<ast.Slice object at 0x7da1b1f88d30>, <ast.Constant object at 0x7da1b1f8be20>]]].values variable[_data2] assign[=] call[name[_data2].loc][<ast.Slice object at 0x7da1b1f8a980>] variable[resx] assign[=] call[call[name[resx].append, parameter[name[_data1]]].append, parameter[name[_data2]]] call[name[resx]][constant[vol]] assign[=] binary_operation[call[name[resx]][constant[vol]] * constant[100.0]] call[name[resx]][constant[volume]] assign[=] call[name[resx]][constant[vol]] call[name[resx]][constant[type]] assign[=] constant[1min] if name[if_drop] begin[:] variable[resx] assign[=] call[name[resx].dropna, parameter[]] return[call[call[call[name[resx].reset_index, parameter[]].drop_duplicates, parameter[]].set_index, parameter[list[[<ast.Constant object at 0x7da1b1f89e10>, <ast.Constant object at 0x7da1b1f88df0>]]]]]
keyword[def] identifier[QA_data_tick_resample_1min] ( identifier[tick] , identifier[type_] = literal[string] , identifier[if_drop] = keyword[True] ): literal[string] identifier[tick] = identifier[tick] . identifier[assign] ( identifier[amount] = identifier[tick] . identifier[price] * identifier[tick] . identifier[vol] ) identifier[resx] = identifier[pd] . identifier[DataFrame] () identifier[_dates] = identifier[set] ( identifier[tick] . identifier[date] ) keyword[for] identifier[date] keyword[in] identifier[sorted] ( identifier[list] ( identifier[_dates] )): identifier[_data] = identifier[tick] . identifier[loc] [ identifier[tick] . identifier[date] == identifier[date] ] identifier[_data1] = identifier[_data] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )]. identifier[resample] ( identifier[type_] , identifier[closed] = literal[string] , identifier[base] = literal[int] , identifier[loffset] = identifier[type_] ). identifier[apply] ( { literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } ) identifier[_data1] . identifier[columns] = identifier[_data1] . identifier[columns] . identifier[droplevel] ( literal[int] ) keyword[if] identifier[len] ( identifier[_data] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )])> literal[int] : identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[values] identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[max] () identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[min] () identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[sum] () identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[sum] () keyword[if] identifier[len] ( identifier[_data] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )])> literal[int] : identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[max] () identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[min] () identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[values] identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[sum] () identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[sum] () identifier[_data1] = identifier[_data1] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )] identifier[_data2] = identifier[_data] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )]. identifier[resample] ( identifier[type_] , identifier[closed] = literal[string] , identifier[base] = literal[int] , identifier[loffset] = identifier[type_] ). identifier[apply] ( { literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } ) identifier[_data2] . identifier[columns] = identifier[_data2] . identifier[columns] . identifier[droplevel] ( literal[int] ) keyword[if] ( identifier[pd] . identifier[Timestamp] ( identifier[date] )< identifier[pd] . identifier[Timestamp] ( literal[string] )) keyword[and] ( identifier[tick] . identifier[code] . identifier[iloc] [ literal[int] ][ literal[int] ]== literal[string] ): keyword[if] identifier[len] ( identifier[_data] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )])> literal[int] : identifier[_data2] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data2] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[max] () identifier[_data2] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data2] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[min] () identifier[_data2] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]= identifier[_data2] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] ), literal[string] ]. identifier[values] keyword[else] : keyword[if] identifier[len] ( identifier[_data] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )])> literal[int] : identifier[_data2] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )]= identifier[_data2] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )]. identifier[values] identifier[_data2] = identifier[_data2] . identifier[loc] [ identifier[time] ( literal[int] , literal[int] ): identifier[time] ( literal[int] , literal[int] )] identifier[resx] = identifier[resx] . identifier[append] ( identifier[_data1] ). identifier[append] ( identifier[_data2] ) identifier[resx] [ literal[string] ]= identifier[resx] [ literal[string] ]* literal[int] identifier[resx] [ literal[string] ]= identifier[resx] [ literal[string] ] identifier[resx] [ literal[string] ]= literal[string] keyword[if] identifier[if_drop] : identifier[resx] = identifier[resx] . identifier[dropna] () keyword[return] identifier[resx] . identifier[reset_index] (). identifier[drop_duplicates] (). identifier[set_index] ([ literal[string] , literal[string] ])
def QA_data_tick_resample_1min(tick, type_='1min', if_drop=True): """ tick 采样为 分钟数据 1. 仅使用将 tick 采样为 1 分钟数据 2. 仅测试过,与通达信 1 分钟数据达成一致 3. 经测试,可以匹配 QA.QA_fetch_get_stock_transaction 得到的数据,其他类型数据未测试 demo: df = QA.QA_fetch_get_stock_transaction(package='tdx', code='000001', start='2018-08-01 09:25:00', end='2018-08-03 15:00:00') df_min = QA_data_tick_resample_1min(df) """ tick = tick.assign(amount=tick.price * tick.vol) resx = pd.DataFrame() _dates = set(tick.date) for date in sorted(list(_dates)): _data = tick.loc[tick.date == date] # morning min bar _data1 = _data[time(9, 25):time(11, 30)].resample(type_, closed='left', base=30, loffset=type_).apply({'price': 'ohlc', 'vol': 'sum', 'code': 'last', 'amount': 'sum'}) _data1.columns = _data1.columns.droplevel(0) # do fix on the first and last bar # 某些股票某些日期没有集合竞价信息,譬如 002468 在 2017 年 6 月 5 日的数据 if len(_data.loc[time(9, 25):time(9, 25)]) > 0: _data1.loc[time(9, 31):time(9, 31), 'open'] = _data1.loc[time(9, 26):time(9, 26), 'open'].values _data1.loc[time(9, 31):time(9, 31), 'high'] = _data1.loc[time(9, 26):time(9, 31), 'high'].max() _data1.loc[time(9, 31):time(9, 31), 'low'] = _data1.loc[time(9, 26):time(9, 31), 'low'].min() _data1.loc[time(9, 31):time(9, 31), 'vol'] = _data1.loc[time(9, 26):time(9, 31), 'vol'].sum() _data1.loc[time(9, 31):time(9, 31), 'amount'] = _data1.loc[time(9, 26):time(9, 31), 'amount'].sum() # depends on [control=['if'], data=[]] # 通达信分笔数据有的有 11:30 数据,有的没有 if len(_data.loc[time(11, 30):time(11, 30)]) > 0: _data1.loc[time(11, 30):time(11, 30), 'high'] = _data1.loc[time(11, 30):time(11, 31), 'high'].max() _data1.loc[time(11, 30):time(11, 30), 'low'] = _data1.loc[time(11, 30):time(11, 31), 'low'].min() _data1.loc[time(11, 30):time(11, 30), 'close'] = _data1.loc[time(11, 31):time(11, 31), 'close'].values _data1.loc[time(11, 30):time(11, 30), 'vol'] = _data1.loc[time(11, 30):time(11, 31), 'vol'].sum() _data1.loc[time(11, 30):time(11, 30), 'amount'] = _data1.loc[time(11, 30):time(11, 31), 'amount'].sum() # depends on [control=['if'], data=[]] _data1 = _data1.loc[time(9, 31):time(11, 30)] # afternoon min bar _data2 = _data[time(13, 0):time(15, 0)].resample(type_, closed='left', base=30, loffset=type_).apply({'price': 'ohlc', 'vol': 'sum', 'code': 'last', 'amount': 'sum'}) _data2.columns = _data2.columns.droplevel(0) # 沪市股票在 2018-08-20 起,尾盘 3 分钟集合竞价 if pd.Timestamp(date) < pd.Timestamp('2018-08-20') and tick.code.iloc[0][0] == '6': # 避免出现 tick 数据没有 1:00 的值 if len(_data.loc[time(13, 0):time(13, 0)]) > 0: _data2.loc[time(15, 0):time(15, 0), 'high'] = _data2.loc[time(15, 0):time(15, 1), 'high'].max() _data2.loc[time(15, 0):time(15, 0), 'low'] = _data2.loc[time(15, 0):time(15, 1), 'low'].min() _data2.loc[time(15, 0):time(15, 0), 'close'] = _data2.loc[time(15, 1):time(15, 1), 'close'].values # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # 避免出现 tick 数据没有 15:00 的值 elif len(_data.loc[time(13, 0):time(13, 0)]) > 0: _data2.loc[time(15, 0):time(15, 0)] = _data2.loc[time(15, 1):time(15, 1)].values # depends on [control=['if'], data=[]] _data2 = _data2.loc[time(13, 1):time(15, 0)] resx = resx.append(_data1).append(_data2) # depends on [control=['for'], data=['date']] resx['vol'] = resx['vol'] * 100.0 resx['volume'] = resx['vol'] resx['type'] = '1min' if if_drop: resx = resx.dropna() # depends on [control=['if'], data=[]] return resx.reset_index().drop_duplicates().set_index(['datetime', 'code'])
def encodedFileID(self, jobStoreFileID): """ Uses a url safe base64 encoding to encode the jobStoreFileID into a unique identifier to use as filename within the cache folder. jobstore IDs are essentially urls/paths to files and thus cannot be used as is. Base64 encoding is used since it is reversible. :param jobStoreFileID: string representing a job store file ID :return: outCachedFile: A path to the hashed file in localCacheDir :rtype: str """ base64Text = base64.urlsafe_b64encode(jobStoreFileID.encode('utf-8')).decode('utf-8') outCachedFile = os.path.join(self.localCacheDir, base64Text) return outCachedFile
def function[encodedFileID, parameter[self, jobStoreFileID]]: constant[ Uses a url safe base64 encoding to encode the jobStoreFileID into a unique identifier to use as filename within the cache folder. jobstore IDs are essentially urls/paths to files and thus cannot be used as is. Base64 encoding is used since it is reversible. :param jobStoreFileID: string representing a job store file ID :return: outCachedFile: A path to the hashed file in localCacheDir :rtype: str ] variable[base64Text] assign[=] call[call[name[base64].urlsafe_b64encode, parameter[call[name[jobStoreFileID].encode, parameter[constant[utf-8]]]]].decode, parameter[constant[utf-8]]] variable[outCachedFile] assign[=] call[name[os].path.join, parameter[name[self].localCacheDir, name[base64Text]]] return[name[outCachedFile]]
keyword[def] identifier[encodedFileID] ( identifier[self] , identifier[jobStoreFileID] ): literal[string] identifier[base64Text] = identifier[base64] . identifier[urlsafe_b64encode] ( identifier[jobStoreFileID] . identifier[encode] ( literal[string] )). identifier[decode] ( literal[string] ) identifier[outCachedFile] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[localCacheDir] , identifier[base64Text] ) keyword[return] identifier[outCachedFile]
def encodedFileID(self, jobStoreFileID): """ Uses a url safe base64 encoding to encode the jobStoreFileID into a unique identifier to use as filename within the cache folder. jobstore IDs are essentially urls/paths to files and thus cannot be used as is. Base64 encoding is used since it is reversible. :param jobStoreFileID: string representing a job store file ID :return: outCachedFile: A path to the hashed file in localCacheDir :rtype: str """ base64Text = base64.urlsafe_b64encode(jobStoreFileID.encode('utf-8')).decode('utf-8') outCachedFile = os.path.join(self.localCacheDir, base64Text) return outCachedFile
def _aes_encrypt(data, algorithm, key): '''AES encrypt''' if algorithm['subtype'] == 'cbc': mode = AES.MODE_CBC else: raise Exception('AES subtype not supported: %s' % algorithm['subtype']) iv_size = algorithm['iv_size'] block_size = iv_size include_iv = True if 'iv'in algorithm and algorithm['iv']: if len(algorithm['iv']) != algorithm['iv_size']: raise Exception('Invalid IV size') iv_value = algorithm['iv'] include_iv = False else: iv_value = get_random_bytes(iv_size) numpad = block_size - (len(data) % block_size) data = data + numpad * chr(numpad) enc = AES.new(key, mode, iv_value).encrypt(data) if include_iv: enc = iv_value + enc return enc
def function[_aes_encrypt, parameter[data, algorithm, key]]: constant[AES encrypt] if compare[call[name[algorithm]][constant[subtype]] equal[==] constant[cbc]] begin[:] variable[mode] assign[=] name[AES].MODE_CBC variable[iv_size] assign[=] call[name[algorithm]][constant[iv_size]] variable[block_size] assign[=] name[iv_size] variable[include_iv] assign[=] constant[True] if <ast.BoolOp object at 0x7da2054a58d0> begin[:] if compare[call[name[len], parameter[call[name[algorithm]][constant[iv]]]] not_equal[!=] call[name[algorithm]][constant[iv_size]]] begin[:] <ast.Raise object at 0x7da2054a51e0> variable[iv_value] assign[=] call[name[algorithm]][constant[iv]] variable[include_iv] assign[=] constant[False] variable[numpad] assign[=] binary_operation[name[block_size] - binary_operation[call[name[len], parameter[name[data]]] <ast.Mod object at 0x7da2590d6920> name[block_size]]] variable[data] assign[=] binary_operation[name[data] + binary_operation[name[numpad] * call[name[chr], parameter[name[numpad]]]]] variable[enc] assign[=] call[call[name[AES].new, parameter[name[key], name[mode], name[iv_value]]].encrypt, parameter[name[data]]] if name[include_iv] begin[:] variable[enc] assign[=] binary_operation[name[iv_value] + name[enc]] return[name[enc]]
keyword[def] identifier[_aes_encrypt] ( identifier[data] , identifier[algorithm] , identifier[key] ): literal[string] keyword[if] identifier[algorithm] [ literal[string] ]== literal[string] : identifier[mode] = identifier[AES] . identifier[MODE_CBC] keyword[else] : keyword[raise] identifier[Exception] ( literal[string] % identifier[algorithm] [ literal[string] ]) identifier[iv_size] = identifier[algorithm] [ literal[string] ] identifier[block_size] = identifier[iv_size] identifier[include_iv] = keyword[True] keyword[if] literal[string] keyword[in] identifier[algorithm] keyword[and] identifier[algorithm] [ literal[string] ]: keyword[if] identifier[len] ( identifier[algorithm] [ literal[string] ])!= identifier[algorithm] [ literal[string] ]: keyword[raise] identifier[Exception] ( literal[string] ) identifier[iv_value] = identifier[algorithm] [ literal[string] ] identifier[include_iv] = keyword[False] keyword[else] : identifier[iv_value] = identifier[get_random_bytes] ( identifier[iv_size] ) identifier[numpad] = identifier[block_size] -( identifier[len] ( identifier[data] )% identifier[block_size] ) identifier[data] = identifier[data] + identifier[numpad] * identifier[chr] ( identifier[numpad] ) identifier[enc] = identifier[AES] . identifier[new] ( identifier[key] , identifier[mode] , identifier[iv_value] ). identifier[encrypt] ( identifier[data] ) keyword[if] identifier[include_iv] : identifier[enc] = identifier[iv_value] + identifier[enc] keyword[return] identifier[enc]
def _aes_encrypt(data, algorithm, key): """AES encrypt""" if algorithm['subtype'] == 'cbc': mode = AES.MODE_CBC # depends on [control=['if'], data=[]] else: raise Exception('AES subtype not supported: %s' % algorithm['subtype']) iv_size = algorithm['iv_size'] block_size = iv_size include_iv = True if 'iv' in algorithm and algorithm['iv']: if len(algorithm['iv']) != algorithm['iv_size']: raise Exception('Invalid IV size') # depends on [control=['if'], data=[]] iv_value = algorithm['iv'] include_iv = False # depends on [control=['if'], data=[]] else: iv_value = get_random_bytes(iv_size) numpad = block_size - len(data) % block_size data = data + numpad * chr(numpad) enc = AES.new(key, mode, iv_value).encrypt(data) if include_iv: enc = iv_value + enc # depends on [control=['if'], data=[]] return enc
def concatenate(self, others, axis=0): """Join a sequence of arrays along an existing axis. Parameters ---------- others : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- res : ndarray The concatenated array. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.concatenate([h], axis=0) <HaplotypeArray shape=(6, 4) dtype=int8> 0 0 0 1 0 1 1 1 0 2 . . 0 0 0 1 0 1 1 1 0 2 . . >>> h.concatenate([h], axis=1) <HaplotypeArray shape=(3, 8) dtype=int8> 0 0 0 1 0 0 0 1 0 1 1 1 0 1 1 1 0 2 . . 0 2 . . """ return concatenate_haplotype_array(self, others, axis=axis, cls=type(self), concatenate=np.concatenate)
def function[concatenate, parameter[self, others, axis]]: constant[Join a sequence of arrays along an existing axis. Parameters ---------- others : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- res : ndarray The concatenated array. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.concatenate([h], axis=0) <HaplotypeArray shape=(6, 4) dtype=int8> 0 0 0 1 0 1 1 1 0 2 . . 0 0 0 1 0 1 1 1 0 2 . . >>> h.concatenate([h], axis=1) <HaplotypeArray shape=(3, 8) dtype=int8> 0 0 0 1 0 0 0 1 0 1 1 1 0 1 1 1 0 2 . . 0 2 . . ] return[call[name[concatenate_haplotype_array], parameter[name[self], name[others]]]]
keyword[def] identifier[concatenate] ( identifier[self] , identifier[others] , identifier[axis] = literal[int] ): literal[string] keyword[return] identifier[concatenate_haplotype_array] ( identifier[self] , identifier[others] , identifier[axis] = identifier[axis] , identifier[cls] = identifier[type] ( identifier[self] ), identifier[concatenate] = identifier[np] . identifier[concatenate] )
def concatenate(self, others, axis=0): """Join a sequence of arrays along an existing axis. Parameters ---------- others : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- res : ndarray The concatenated array. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.concatenate([h], axis=0) <HaplotypeArray shape=(6, 4) dtype=int8> 0 0 0 1 0 1 1 1 0 2 . . 0 0 0 1 0 1 1 1 0 2 . . >>> h.concatenate([h], axis=1) <HaplotypeArray shape=(3, 8) dtype=int8> 0 0 0 1 0 0 0 1 0 1 1 1 0 1 1 1 0 2 . . 0 2 . . """ return concatenate_haplotype_array(self, others, axis=axis, cls=type(self), concatenate=np.concatenate)
def assign_vertexid(self): """1. create list of Vertex which are referred by blocks only. 2. sort vertex according to (x, y, z) 3. assign sequence number for each Vertex 4. sorted list is saved as self.valid_vertices """ # gather 'uniq' names which are refferred by blocks validvnames = set() self.valid_vertices = [] for b in self.blocks.values(): for n in b.vnames: v = self.vertices[n] if v.name not in validvnames: validvnames.update([v.name]) self.valid_vertices.append(v) self.valid_vertices = sorted(self.valid_vertices) for i, v in enumerate(self.valid_vertices): v.index = i
def function[assign_vertexid, parameter[self]]: constant[1. create list of Vertex which are referred by blocks only. 2. sort vertex according to (x, y, z) 3. assign sequence number for each Vertex 4. sorted list is saved as self.valid_vertices ] variable[validvnames] assign[=] call[name[set], parameter[]] name[self].valid_vertices assign[=] list[[]] for taget[name[b]] in starred[call[name[self].blocks.values, parameter[]]] begin[:] for taget[name[n]] in starred[name[b].vnames] begin[:] variable[v] assign[=] call[name[self].vertices][name[n]] if compare[name[v].name <ast.NotIn object at 0x7da2590d7190> name[validvnames]] begin[:] call[name[validvnames].update, parameter[list[[<ast.Attribute object at 0x7da18ede5de0>]]]] call[name[self].valid_vertices.append, parameter[name[v]]] name[self].valid_vertices assign[=] call[name[sorted], parameter[name[self].valid_vertices]] for taget[tuple[[<ast.Name object at 0x7da20c6e49a0>, <ast.Name object at 0x7da20c6e7970>]]] in starred[call[name[enumerate], parameter[name[self].valid_vertices]]] begin[:] name[v].index assign[=] name[i]
keyword[def] identifier[assign_vertexid] ( identifier[self] ): literal[string] identifier[validvnames] = identifier[set] () identifier[self] . identifier[valid_vertices] =[] keyword[for] identifier[b] keyword[in] identifier[self] . identifier[blocks] . identifier[values] (): keyword[for] identifier[n] keyword[in] identifier[b] . identifier[vnames] : identifier[v] = identifier[self] . identifier[vertices] [ identifier[n] ] keyword[if] identifier[v] . identifier[name] keyword[not] keyword[in] identifier[validvnames] : identifier[validvnames] . identifier[update] ([ identifier[v] . identifier[name] ]) identifier[self] . identifier[valid_vertices] . identifier[append] ( identifier[v] ) identifier[self] . identifier[valid_vertices] = identifier[sorted] ( identifier[self] . identifier[valid_vertices] ) keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[self] . identifier[valid_vertices] ): identifier[v] . identifier[index] = identifier[i]
def assign_vertexid(self): """1. create list of Vertex which are referred by blocks only. 2. sort vertex according to (x, y, z) 3. assign sequence number for each Vertex 4. sorted list is saved as self.valid_vertices """ # gather 'uniq' names which are refferred by blocks validvnames = set() self.valid_vertices = [] for b in self.blocks.values(): for n in b.vnames: v = self.vertices[n] if v.name not in validvnames: validvnames.update([v.name]) self.valid_vertices.append(v) # depends on [control=['if'], data=['validvnames']] # depends on [control=['for'], data=['n']] # depends on [control=['for'], data=['b']] self.valid_vertices = sorted(self.valid_vertices) for (i, v) in enumerate(self.valid_vertices): v.index = i # depends on [control=['for'], data=[]]
def rlevinson(a, efinal): """computes the autocorrelation coefficients, R based on the prediction polynomial A and the final prediction error Efinal, using the stepdown algorithm. Works for real or complex data :param a: :param efinal: :return: * R, the autocorrelation * U prediction coefficient * kr reflection coefficients * e errors A should be a minimum phase polynomial and A(1) is assumed to be unity. :returns: (P+1) by (P+1) upper triangular matrix, U, that holds the i'th order prediction polynomials Ai, i=1:P, where P is the order of the input polynomial, A. [ 1 a1(1)* a2(2)* ..... aP(P) * ] [ 0 1 a2(1)* ..... aP(P-1)* ] U = [ .................................] [ 0 0 0 ..... 1 ] from which the i'th order prediction polynomial can be extracted using Ai=U(i+1:-1:1,i+1)'. The first row of U contains the conjugates of the reflection coefficients, and the K's may be extracted using, K=conj(U(1,2:end)). .. todo:: remove the conjugate when data is real data, clean up the code test and doc. """ a = numpy.array(a) realdata = numpy.isrealobj(a) assert a[0] == 1, 'First coefficient of the prediction polynomial must be unity' p = len(a) if p < 2: raise ValueError('Polynomial should have at least two coefficients') if realdata == True: U = numpy.zeros((p, p)) # This matrix will have the prediction # polynomials of orders 1:p else: U = numpy.zeros((p, p), dtype=complex) U[:, p-1] = numpy.conj(a[-1::-1]) # Prediction coefficients of order p p = p -1 e = numpy.zeros(p) # First we find the prediction coefficients of smaller orders and form the # Matrix U # Initialize the step down e[-1] = efinal # Prediction error of order p # Step down for k in range(p-1, 0, -1): [a, e[k-1]] = levdown(a, e[k]) U[:, k] = numpy.concatenate((numpy.conj(a[-1::-1].transpose()) , [0]*(p-k) )) e0 = e[0]/(1.-abs(a[1]**2)) #% Because a[1]=1 (true polynomial) U[0,0] = 1 #% Prediction coefficient of zeroth order kr = numpy.conj(U[0,1:]) #% The reflection coefficients kr = kr.transpose() #% To make it into a column vector # % Once we have the matrix U and the prediction error at various orders, we can # % use this information to find the autocorrelation coefficients. R = numpy.zeros(1, dtype=complex) #% Initialize recursion k = 1 R0 = e0 # To take care of the zero indexing problem R[0] = -numpy.conj(U[0,1])*R0 # R[1]=-a1[1]*R[0] # Actual recursion for k in range(1,p): r = -sum(numpy.conj(U[k-1::-1,k])*R[-1::-1]) - kr[k]*e[k-1] R = numpy.insert(R, len(R), r) # Include R(0) and make it a column vector. Note the dot transpose #R = [R0 R].'; R = numpy.insert(R, 0, e0) return R, U, kr, e
def function[rlevinson, parameter[a, efinal]]: constant[computes the autocorrelation coefficients, R based on the prediction polynomial A and the final prediction error Efinal, using the stepdown algorithm. Works for real or complex data :param a: :param efinal: :return: * R, the autocorrelation * U prediction coefficient * kr reflection coefficients * e errors A should be a minimum phase polynomial and A(1) is assumed to be unity. :returns: (P+1) by (P+1) upper triangular matrix, U, that holds the i'th order prediction polynomials Ai, i=1:P, where P is the order of the input polynomial, A. [ 1 a1(1)* a2(2)* ..... aP(P) * ] [ 0 1 a2(1)* ..... aP(P-1)* ] U = [ .................................] [ 0 0 0 ..... 1 ] from which the i'th order prediction polynomial can be extracted using Ai=U(i+1:-1:1,i+1)'. The first row of U contains the conjugates of the reflection coefficients, and the K's may be extracted using, K=conj(U(1,2:end)). .. todo:: remove the conjugate when data is real data, clean up the code test and doc. ] variable[a] assign[=] call[name[numpy].array, parameter[name[a]]] variable[realdata] assign[=] call[name[numpy].isrealobj, parameter[name[a]]] assert[compare[call[name[a]][constant[0]] equal[==] constant[1]]] variable[p] assign[=] call[name[len], parameter[name[a]]] if compare[name[p] less[<] constant[2]] begin[:] <ast.Raise object at 0x7da1b010b070> if compare[name[realdata] equal[==] constant[True]] begin[:] variable[U] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Name object at 0x7da1b0109c00>, <ast.Name object at 0x7da1b01081c0>]]]] call[name[U]][tuple[[<ast.Slice object at 0x7da1b010be20>, <ast.BinOp object at 0x7da1b010ba60>]]] assign[=] call[name[numpy].conj, parameter[call[name[a]][<ast.Slice object at 0x7da1b0109ae0>]]] variable[p] assign[=] binary_operation[name[p] - constant[1]] variable[e] assign[=] call[name[numpy].zeros, parameter[name[p]]] call[name[e]][<ast.UnaryOp object at 0x7da1b010b250>] assign[=] name[efinal] for taget[name[k]] in starred[call[name[range], parameter[binary_operation[name[p] - constant[1]], constant[0], <ast.UnaryOp object at 0x7da1b010aad0>]]] begin[:] <ast.List object at 0x7da1b010bc40> assign[=] call[name[levdown], parameter[name[a], call[name[e]][name[k]]]] call[name[U]][tuple[[<ast.Slice object at 0x7da1b01099f0>, <ast.Name object at 0x7da1b010b3d0>]]] assign[=] call[name[numpy].concatenate, parameter[tuple[[<ast.Call object at 0x7da1b0108220>, <ast.BinOp object at 0x7da1b010b850>]]]] variable[e0] assign[=] binary_operation[call[name[e]][constant[0]] / binary_operation[constant[1.0] - call[name[abs], parameter[binary_operation[call[name[a]][constant[1]] ** constant[2]]]]]] call[name[U]][tuple[[<ast.Constant object at 0x7da1b010b160>, <ast.Constant object at 0x7da1b0108a30>]]] assign[=] constant[1] variable[kr] assign[=] call[name[numpy].conj, parameter[call[name[U]][tuple[[<ast.Constant object at 0x7da1b0108d30>, <ast.Slice object at 0x7da1b0109930>]]]]] variable[kr] assign[=] call[name[kr].transpose, parameter[]] variable[R] assign[=] call[name[numpy].zeros, parameter[constant[1]]] variable[k] assign[=] constant[1] variable[R0] assign[=] name[e0] call[name[R]][constant[0]] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b0108cd0> * name[R0]] for taget[name[k]] in starred[call[name[range], parameter[constant[1], name[p]]]] begin[:] variable[r] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b0108850> - binary_operation[call[name[kr]][name[k]] * call[name[e]][binary_operation[name[k] - constant[1]]]]] variable[R] assign[=] call[name[numpy].insert, parameter[name[R], call[name[len], parameter[name[R]]], name[r]]] variable[R] assign[=] call[name[numpy].insert, parameter[name[R], constant[0], name[e0]]] return[tuple[[<ast.Name object at 0x7da1b021cc70>, <ast.Name object at 0x7da1b021ed10>, <ast.Name object at 0x7da1b021d8d0>, <ast.Name object at 0x7da1b021c790>]]]
keyword[def] identifier[rlevinson] ( identifier[a] , identifier[efinal] ): literal[string] identifier[a] = identifier[numpy] . identifier[array] ( identifier[a] ) identifier[realdata] = identifier[numpy] . identifier[isrealobj] ( identifier[a] ) keyword[assert] identifier[a] [ literal[int] ]== literal[int] , literal[string] identifier[p] = identifier[len] ( identifier[a] ) keyword[if] identifier[p] < literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[realdata] == keyword[True] : identifier[U] = identifier[numpy] . identifier[zeros] (( identifier[p] , identifier[p] )) keyword[else] : identifier[U] = identifier[numpy] . identifier[zeros] (( identifier[p] , identifier[p] ), identifier[dtype] = identifier[complex] ) identifier[U] [:, identifier[p] - literal[int] ]= identifier[numpy] . identifier[conj] ( identifier[a] [- literal[int] ::- literal[int] ]) identifier[p] = identifier[p] - literal[int] identifier[e] = identifier[numpy] . identifier[zeros] ( identifier[p] ) identifier[e] [- literal[int] ]= identifier[efinal] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[p] - literal[int] , literal[int] ,- literal[int] ): [ identifier[a] , identifier[e] [ identifier[k] - literal[int] ]]= identifier[levdown] ( identifier[a] , identifier[e] [ identifier[k] ]) identifier[U] [:, identifier[k] ]= identifier[numpy] . identifier[concatenate] (( identifier[numpy] . identifier[conj] ( identifier[a] [- literal[int] ::- literal[int] ]. identifier[transpose] ()), [ literal[int] ]*( identifier[p] - identifier[k] ))) identifier[e0] = identifier[e] [ literal[int] ]/( literal[int] - identifier[abs] ( identifier[a] [ literal[int] ]** literal[int] )) identifier[U] [ literal[int] , literal[int] ]= literal[int] identifier[kr] = identifier[numpy] . identifier[conj] ( identifier[U] [ literal[int] , literal[int] :]) identifier[kr] = identifier[kr] . identifier[transpose] () identifier[R] = identifier[numpy] . identifier[zeros] ( literal[int] , identifier[dtype] = identifier[complex] ) identifier[k] = literal[int] identifier[R0] = identifier[e0] identifier[R] [ literal[int] ]=- identifier[numpy] . identifier[conj] ( identifier[U] [ literal[int] , literal[int] ])* identifier[R0] keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[p] ): identifier[r] =- identifier[sum] ( identifier[numpy] . identifier[conj] ( identifier[U] [ identifier[k] - literal[int] ::- literal[int] , identifier[k] ])* identifier[R] [- literal[int] ::- literal[int] ])- identifier[kr] [ identifier[k] ]* identifier[e] [ identifier[k] - literal[int] ] identifier[R] = identifier[numpy] . identifier[insert] ( identifier[R] , identifier[len] ( identifier[R] ), identifier[r] ) identifier[R] = identifier[numpy] . identifier[insert] ( identifier[R] , literal[int] , identifier[e0] ) keyword[return] identifier[R] , identifier[U] , identifier[kr] , identifier[e]
def rlevinson(a, efinal): """computes the autocorrelation coefficients, R based on the prediction polynomial A and the final prediction error Efinal, using the stepdown algorithm. Works for real or complex data :param a: :param efinal: :return: * R, the autocorrelation * U prediction coefficient * kr reflection coefficients * e errors A should be a minimum phase polynomial and A(1) is assumed to be unity. :returns: (P+1) by (P+1) upper triangular matrix, U, that holds the i'th order prediction polynomials Ai, i=1:P, where P is the order of the input polynomial, A. [ 1 a1(1)* a2(2)* ..... aP(P) * ] [ 0 1 a2(1)* ..... aP(P-1)* ] U = [ .................................] [ 0 0 0 ..... 1 ] from which the i'th order prediction polynomial can be extracted using Ai=U(i+1:-1:1,i+1)'. The first row of U contains the conjugates of the reflection coefficients, and the K's may be extracted using, K=conj(U(1,2:end)). .. todo:: remove the conjugate when data is real data, clean up the code test and doc. """ a = numpy.array(a) realdata = numpy.isrealobj(a) assert a[0] == 1, 'First coefficient of the prediction polynomial must be unity' p = len(a) if p < 2: raise ValueError('Polynomial should have at least two coefficients') # depends on [control=['if'], data=[]] if realdata == True: U = numpy.zeros((p, p)) # This matrix will have the prediction # depends on [control=['if'], data=[]] else: # polynomials of orders 1:p U = numpy.zeros((p, p), dtype=complex) U[:, p - 1] = numpy.conj(a[-1::-1]) # Prediction coefficients of order p p = p - 1 e = numpy.zeros(p) # First we find the prediction coefficients of smaller orders and form the # Matrix U # Initialize the step down e[-1] = efinal # Prediction error of order p # Step down for k in range(p - 1, 0, -1): [a, e[k - 1]] = levdown(a, e[k]) U[:, k] = numpy.concatenate((numpy.conj(a[-1::-1].transpose()), [0] * (p - k))) # depends on [control=['for'], data=['k']] e0 = e[0] / (1.0 - abs(a[1] ** 2)) #% Because a[1]=1 (true polynomial) U[0, 0] = 1 #% Prediction coefficient of zeroth order kr = numpy.conj(U[0, 1:]) #% The reflection coefficients kr = kr.transpose() #% To make it into a column vector # % Once we have the matrix U and the prediction error at various orders, we can # % use this information to find the autocorrelation coefficients. R = numpy.zeros(1, dtype=complex) #% Initialize recursion k = 1 R0 = e0 # To take care of the zero indexing problem R[0] = -numpy.conj(U[0, 1]) * R0 # R[1]=-a1[1]*R[0] # Actual recursion for k in range(1, p): r = -sum(numpy.conj(U[k - 1::-1, k]) * R[-1::-1]) - kr[k] * e[k - 1] R = numpy.insert(R, len(R), r) # depends on [control=['for'], data=['k']] # Include R(0) and make it a column vector. Note the dot transpose #R = [R0 R].'; R = numpy.insert(R, 0, e0) return (R, U, kr, e)
def scan(self, ids=range(254)): """ Pings all ids within the specified list, by default it finds all the motors connected to the bus. """ return [id for id in ids if self.ping(id)]
def function[scan, parameter[self, ids]]: constant[ Pings all ids within the specified list, by default it finds all the motors connected to the bus. ] return[<ast.ListComp object at 0x7da1b15ce1d0>]
keyword[def] identifier[scan] ( identifier[self] , identifier[ids] = identifier[range] ( literal[int] )): literal[string] keyword[return] [ identifier[id] keyword[for] identifier[id] keyword[in] identifier[ids] keyword[if] identifier[self] . identifier[ping] ( identifier[id] )]
def scan(self, ids=range(254)): """ Pings all ids within the specified list, by default it finds all the motors connected to the bus. """ return [id for id in ids if self.ping(id)]
def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False): """Compute the number of true positives in an estimate given a reference. A frequency is correct if it is within a quartertone of the correct frequency. Parameters ---------- ref_freqs : list of np.ndarray reference frequencies (MIDI) est_freqs : list of np.ndarray estimated frequencies (MIDI) window : float Window size, in semitones chroma : bool If True, computes distances modulo n. If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n. Returns ------- true_positives : np.ndarray Array the same length as ref_freqs containing the number of true positives. """ n_frames = len(ref_freqs) true_positives = np.zeros((n_frames, )) for i, (ref_frame, est_frame) in enumerate(zip(ref_freqs, est_freqs)): if chroma: # match chroma-wrapped frequency events matching = util.match_events( ref_frame, est_frame, window, distance=util._outer_distance_mod_n) else: # match frequency events within tolerance window in semitones matching = util.match_events(ref_frame, est_frame, window) true_positives[i] = len(matching) return true_positives
def function[compute_num_true_positives, parameter[ref_freqs, est_freqs, window, chroma]]: constant[Compute the number of true positives in an estimate given a reference. A frequency is correct if it is within a quartertone of the correct frequency. Parameters ---------- ref_freqs : list of np.ndarray reference frequencies (MIDI) est_freqs : list of np.ndarray estimated frequencies (MIDI) window : float Window size, in semitones chroma : bool If True, computes distances modulo n. If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n. Returns ------- true_positives : np.ndarray Array the same length as ref_freqs containing the number of true positives. ] variable[n_frames] assign[=] call[name[len], parameter[name[ref_freqs]]] variable[true_positives] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c6c6e90>]]]] for taget[tuple[[<ast.Name object at 0x7da1b0fce6e0>, <ast.Tuple object at 0x7da1b0fcea40>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[name[ref_freqs], name[est_freqs]]]]]] begin[:] if name[chroma] begin[:] variable[matching] assign[=] call[name[util].match_events, parameter[name[ref_frame], name[est_frame], name[window]]] call[name[true_positives]][name[i]] assign[=] call[name[len], parameter[name[matching]]] return[name[true_positives]]
keyword[def] identifier[compute_num_true_positives] ( identifier[ref_freqs] , identifier[est_freqs] , identifier[window] = literal[int] , identifier[chroma] = keyword[False] ): literal[string] identifier[n_frames] = identifier[len] ( identifier[ref_freqs] ) identifier[true_positives] = identifier[np] . identifier[zeros] (( identifier[n_frames] ,)) keyword[for] identifier[i] ,( identifier[ref_frame] , identifier[est_frame] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[ref_freqs] , identifier[est_freqs] )): keyword[if] identifier[chroma] : identifier[matching] = identifier[util] . identifier[match_events] ( identifier[ref_frame] , identifier[est_frame] , identifier[window] , identifier[distance] = identifier[util] . identifier[_outer_distance_mod_n] ) keyword[else] : identifier[matching] = identifier[util] . identifier[match_events] ( identifier[ref_frame] , identifier[est_frame] , identifier[window] ) identifier[true_positives] [ identifier[i] ]= identifier[len] ( identifier[matching] ) keyword[return] identifier[true_positives]
def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False): """Compute the number of true positives in an estimate given a reference. A frequency is correct if it is within a quartertone of the correct frequency. Parameters ---------- ref_freqs : list of np.ndarray reference frequencies (MIDI) est_freqs : list of np.ndarray estimated frequencies (MIDI) window : float Window size, in semitones chroma : bool If True, computes distances modulo n. If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n. Returns ------- true_positives : np.ndarray Array the same length as ref_freqs containing the number of true positives. """ n_frames = len(ref_freqs) true_positives = np.zeros((n_frames,)) for (i, (ref_frame, est_frame)) in enumerate(zip(ref_freqs, est_freqs)): if chroma: # match chroma-wrapped frequency events matching = util.match_events(ref_frame, est_frame, window, distance=util._outer_distance_mod_n) # depends on [control=['if'], data=[]] else: # match frequency events within tolerance window in semitones matching = util.match_events(ref_frame, est_frame, window) true_positives[i] = len(matching) # depends on [control=['for'], data=[]] return true_positives
def channel_n(self): """Calculate the minimum number of channels based on the maximum possible channel width and the maximum length of the channels. Round up to the next even number (factor of 2 shows up twice in equation) The channel width must be greater than the hydraulic width that ensure baffle overlap. Based on the equation for the flocculator volume volume = ([max_L*channel_n] - entrancetank_L)*max_W * downstream_H :returns: number of channels :rtype: float * dimensionless """ min_hydraulic_W =\ np.amax(np.array([1, (self.max_W/self.W_min_HS_ratio).to(u.dimensionless)])) * self.W_min_HS_ratio return 2*np.ceil(((self.vol / (min_hydraulic_W * self.downstream_H) + self.ent_tank_L) / (2 * self.max_L)).to(u.dimensionless))
def function[channel_n, parameter[self]]: constant[Calculate the minimum number of channels based on the maximum possible channel width and the maximum length of the channels. Round up to the next even number (factor of 2 shows up twice in equation) The channel width must be greater than the hydraulic width that ensure baffle overlap. Based on the equation for the flocculator volume volume = ([max_L*channel_n] - entrancetank_L)*max_W * downstream_H :returns: number of channels :rtype: float * dimensionless ] variable[min_hydraulic_W] assign[=] binary_operation[call[name[np].amax, parameter[call[name[np].array, parameter[list[[<ast.Constant object at 0x7da1b06bd1e0>, <ast.Call object at 0x7da1b06bf580>]]]]]] * name[self].W_min_HS_ratio] return[binary_operation[constant[2] * call[name[np].ceil, parameter[call[binary_operation[binary_operation[binary_operation[name[self].vol / binary_operation[name[min_hydraulic_W] * name[self].downstream_H]] + name[self].ent_tank_L] / binary_operation[constant[2] * name[self].max_L]].to, parameter[name[u].dimensionless]]]]]]
keyword[def] identifier[channel_n] ( identifier[self] ): literal[string] identifier[min_hydraulic_W] = identifier[np] . identifier[amax] ( identifier[np] . identifier[array] ([ literal[int] ,( identifier[self] . identifier[max_W] / identifier[self] . identifier[W_min_HS_ratio] ). identifier[to] ( identifier[u] . identifier[dimensionless] )]))* identifier[self] . identifier[W_min_HS_ratio] keyword[return] literal[int] * identifier[np] . identifier[ceil] ((( identifier[self] . identifier[vol] /( identifier[min_hydraulic_W] * identifier[self] . identifier[downstream_H] )+ identifier[self] . identifier[ent_tank_L] )/( literal[int] * identifier[self] . identifier[max_L] )). identifier[to] ( identifier[u] . identifier[dimensionless] ))
def channel_n(self): """Calculate the minimum number of channels based on the maximum possible channel width and the maximum length of the channels. Round up to the next even number (factor of 2 shows up twice in equation) The channel width must be greater than the hydraulic width that ensure baffle overlap. Based on the equation for the flocculator volume volume = ([max_L*channel_n] - entrancetank_L)*max_W * downstream_H :returns: number of channels :rtype: float * dimensionless """ min_hydraulic_W = np.amax(np.array([1, (self.max_W / self.W_min_HS_ratio).to(u.dimensionless)])) * self.W_min_HS_ratio return 2 * np.ceil(((self.vol / (min_hydraulic_W * self.downstream_H) + self.ent_tank_L) / (2 * self.max_L)).to(u.dimensionless))
def save_image(image, image_path='_temp.png'): """Save a image. Parameters ----------- image : numpy array [w, h, c] image_path : str path """ try: # RGB imageio.imwrite(image_path, image) except Exception: # Greyscale imageio.imwrite(image_path, image[:, :, 0])
def function[save_image, parameter[image, image_path]]: constant[Save a image. Parameters ----------- image : numpy array [w, h, c] image_path : str path ] <ast.Try object at 0x7da20c795900>
keyword[def] identifier[save_image] ( identifier[image] , identifier[image_path] = literal[string] ): literal[string] keyword[try] : identifier[imageio] . identifier[imwrite] ( identifier[image_path] , identifier[image] ) keyword[except] identifier[Exception] : identifier[imageio] . identifier[imwrite] ( identifier[image_path] , identifier[image] [:,:, literal[int] ])
def save_image(image, image_path='_temp.png'): """Save a image. Parameters ----------- image : numpy array [w, h, c] image_path : str path """ try: # RGB imageio.imwrite(image_path, image) # depends on [control=['try'], data=[]] except Exception: # Greyscale imageio.imwrite(image_path, image[:, :, 0]) # depends on [control=['except'], data=[]]
def copy(self, selection, smart_selection_adaption=True): """ Copy all selected items to the clipboard using smart selection adaptation by default :param selection: the current selection :param bool smart_selection_adaption: flag to enable smart selection adaptation mode :return: """ assert isinstance(selection, Selection) self.__create_core_and_model_object_copies(selection, smart_selection_adaption)
def function[copy, parameter[self, selection, smart_selection_adaption]]: constant[ Copy all selected items to the clipboard using smart selection adaptation by default :param selection: the current selection :param bool smart_selection_adaption: flag to enable smart selection adaptation mode :return: ] assert[call[name[isinstance], parameter[name[selection], name[Selection]]]] call[name[self].__create_core_and_model_object_copies, parameter[name[selection], name[smart_selection_adaption]]]
keyword[def] identifier[copy] ( identifier[self] , identifier[selection] , identifier[smart_selection_adaption] = keyword[True] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[selection] , identifier[Selection] ) identifier[self] . identifier[__create_core_and_model_object_copies] ( identifier[selection] , identifier[smart_selection_adaption] )
def copy(self, selection, smart_selection_adaption=True): """ Copy all selected items to the clipboard using smart selection adaptation by default :param selection: the current selection :param bool smart_selection_adaption: flag to enable smart selection adaptation mode :return: """ assert isinstance(selection, Selection) self.__create_core_and_model_object_copies(selection, smart_selection_adaption)
def cli_auth(context): """ Authenticates and then outputs the resulting information. See :py:mod:`swiftly.cli.auth` for context usage information. See :py:class:`CLIAuth` for more information. """ with context.io_manager.with_stdout() as fp: with context.client_manager.with_client() as client: info = [] client.auth() if getattr(client, 'auth_cache_path', None): info.append(('Auth Cache', client.auth_cache_path)) if getattr(client, 'auth_url', None): info.append(('Auth URL', client.auth_url)) if getattr(client, 'auth_user', None): info.append(('Auth User', client.auth_user)) if getattr(client, 'auth_key', None): info.append(('Auth Key', client.auth_key)) if getattr(client, 'auth_tenant', None): info.append(('Auth Tenant', client.auth_tenant)) if getattr(client, 'auth_methods', None): info.append(('Auth Methods', client.auth_methods)) if getattr(client, 'storage_path', None): info.append(('Direct Storage Path', client.storage_path)) if getattr(client, 'cdn_path', None): info.append(('Direct CDN Path', client.cdn_path)) if getattr(client, 'local_path', None): info.append(('Local Path', client.local_path)) if getattr(client, 'regions', None): info.append(('Regions', ' '.join(client.regions))) if getattr(client, 'default_region', None): info.append(('Default Region', client.default_region)) if getattr(client, 'region', None): info.append(('Selected Region', client.region)) if getattr(client, 'snet', None): info.append(('SNet', client.snet)) if getattr(client, 'storage_url', None): info.append(('Storage URL', client.storage_url)) if getattr(client, 'cdn_url', None): info.append(('CDN URL', client.cdn_url)) if getattr(client, 'auth_token', None): info.append(('Auth Token', client.auth_token)) if not info: info.append(( 'No auth information available', 'Maybe no credentials were provided?')) fmt = '%%-%ds %%s\n' % (max(len(t) for t, v in info) + 1) for t, v in info: fp.write(fmt % (t + ':', v)) fp.flush()
def function[cli_auth, parameter[context]]: constant[ Authenticates and then outputs the resulting information. See :py:mod:`swiftly.cli.auth` for context usage information. See :py:class:`CLIAuth` for more information. ] with call[name[context].io_manager.with_stdout, parameter[]] begin[:] with call[name[context].client_manager.with_client, parameter[]] begin[:] variable[info] assign[=] list[[]] call[name[client].auth, parameter[]] if call[name[getattr], parameter[name[client], constant[auth_cache_path], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da18f810e50>, <ast.Attribute object at 0x7da18f813010>]]]] if call[name[getattr], parameter[name[client], constant[auth_url], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da18f8138e0>, <ast.Attribute object at 0x7da18f813d00>]]]] if call[name[getattr], parameter[name[client], constant[auth_user], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b01e0d90>, <ast.Attribute object at 0x7da1b01e3700>]]]] if call[name[getattr], parameter[name[client], constant[auth_key], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b01e2980>, <ast.Attribute object at 0x7da1b01e2fe0>]]]] if call[name[getattr], parameter[name[client], constant[auth_tenant], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b01e2170>, <ast.Attribute object at 0x7da1b01e0b50>]]]] if call[name[getattr], parameter[name[client], constant[auth_methods], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b01e33a0>, <ast.Attribute object at 0x7da1b01e0e50>]]]] if call[name[getattr], parameter[name[client], constant[storage_path], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b01e3040>, <ast.Attribute object at 0x7da1b01e2530>]]]] if call[name[getattr], parameter[name[client], constant[cdn_path], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b01e0850>, <ast.Attribute object at 0x7da1b01e18d0>]]]] if call[name[getattr], parameter[name[client], constant[local_path], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b01e0d60>, <ast.Attribute object at 0x7da1b01e3250>]]]] if call[name[getattr], parameter[name[client], constant[regions], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da2041da0e0>, <ast.Call object at 0x7da2041db8e0>]]]] if call[name[getattr], parameter[name[client], constant[default_region], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da2041d84c0>, <ast.Attribute object at 0x7da2041db040>]]]] if call[name[getattr], parameter[name[client], constant[region], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da2041da650>, <ast.Attribute object at 0x7da2041d8100>]]]] if call[name[getattr], parameter[name[client], constant[snet], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da2041dbaf0>, <ast.Attribute object at 0x7da2041d93f0>]]]] if call[name[getattr], parameter[name[client], constant[storage_url], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b26aeb60>, <ast.Attribute object at 0x7da1b26ad750>]]]] if call[name[getattr], parameter[name[client], constant[cdn_url], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b26afa60>, <ast.Attribute object at 0x7da1b26ad120>]]]] if call[name[getattr], parameter[name[client], constant[auth_token], constant[None]]] begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b26ac850>, <ast.Attribute object at 0x7da1b26ad9f0>]]]] if <ast.UnaryOp object at 0x7da1b26af4f0> begin[:] call[name[info].append, parameter[tuple[[<ast.Constant object at 0x7da1b26afbb0>, <ast.Constant object at 0x7da1b26ad390>]]]] variable[fmt] assign[=] binary_operation[constant[%%-%ds %%s ] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[max], parameter[<ast.GeneratorExp object at 0x7da1b26ac670>]] + constant[1]]] for taget[tuple[[<ast.Name object at 0x7da1b26aefe0>, <ast.Name object at 0x7da1b26af640>]]] in starred[name[info]] begin[:] call[name[fp].write, parameter[binary_operation[name[fmt] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da1b26ad510>, <ast.Name object at 0x7da20c795f30>]]]]] call[name[fp].flush, parameter[]]
keyword[def] identifier[cli_auth] ( identifier[context] ): literal[string] keyword[with] identifier[context] . identifier[io_manager] . identifier[with_stdout] () keyword[as] identifier[fp] : keyword[with] identifier[context] . identifier[client_manager] . identifier[with_client] () keyword[as] identifier[client] : identifier[info] =[] identifier[client] . identifier[auth] () keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[auth_cache_path] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[auth_url] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[auth_user] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[auth_key] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[auth_tenant] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[auth_methods] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[storage_path] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[cdn_path] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[local_path] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , literal[string] . identifier[join] ( identifier[client] . identifier[regions] ))) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[default_region] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[region] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[snet] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[storage_url] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[cdn_url] )) keyword[if] identifier[getattr] ( identifier[client] , literal[string] , keyword[None] ): identifier[info] . identifier[append] (( literal[string] , identifier[client] . identifier[auth_token] )) keyword[if] keyword[not] identifier[info] : identifier[info] . identifier[append] (( literal[string] , literal[string] )) identifier[fmt] = literal[string] %( identifier[max] ( identifier[len] ( identifier[t] ) keyword[for] identifier[t] , identifier[v] keyword[in] identifier[info] )+ literal[int] ) keyword[for] identifier[t] , identifier[v] keyword[in] identifier[info] : identifier[fp] . identifier[write] ( identifier[fmt] %( identifier[t] + literal[string] , identifier[v] )) identifier[fp] . identifier[flush] ()
def cli_auth(context): """ Authenticates and then outputs the resulting information. See :py:mod:`swiftly.cli.auth` for context usage information. See :py:class:`CLIAuth` for more information. """ with context.io_manager.with_stdout() as fp: with context.client_manager.with_client() as client: info = [] client.auth() if getattr(client, 'auth_cache_path', None): info.append(('Auth Cache', client.auth_cache_path)) # depends on [control=['if'], data=[]] if getattr(client, 'auth_url', None): info.append(('Auth URL', client.auth_url)) # depends on [control=['if'], data=[]] if getattr(client, 'auth_user', None): info.append(('Auth User', client.auth_user)) # depends on [control=['if'], data=[]] if getattr(client, 'auth_key', None): info.append(('Auth Key', client.auth_key)) # depends on [control=['if'], data=[]] if getattr(client, 'auth_tenant', None): info.append(('Auth Tenant', client.auth_tenant)) # depends on [control=['if'], data=[]] if getattr(client, 'auth_methods', None): info.append(('Auth Methods', client.auth_methods)) # depends on [control=['if'], data=[]] if getattr(client, 'storage_path', None): info.append(('Direct Storage Path', client.storage_path)) # depends on [control=['if'], data=[]] if getattr(client, 'cdn_path', None): info.append(('Direct CDN Path', client.cdn_path)) # depends on [control=['if'], data=[]] if getattr(client, 'local_path', None): info.append(('Local Path', client.local_path)) # depends on [control=['if'], data=[]] if getattr(client, 'regions', None): info.append(('Regions', ' '.join(client.regions))) # depends on [control=['if'], data=[]] if getattr(client, 'default_region', None): info.append(('Default Region', client.default_region)) # depends on [control=['if'], data=[]] if getattr(client, 'region', None): info.append(('Selected Region', client.region)) # depends on [control=['if'], data=[]] if getattr(client, 'snet', None): info.append(('SNet', client.snet)) # depends on [control=['if'], data=[]] if getattr(client, 'storage_url', None): info.append(('Storage URL', client.storage_url)) # depends on [control=['if'], data=[]] if getattr(client, 'cdn_url', None): info.append(('CDN URL', client.cdn_url)) # depends on [control=['if'], data=[]] if getattr(client, 'auth_token', None): info.append(('Auth Token', client.auth_token)) # depends on [control=['if'], data=[]] if not info: info.append(('No auth information available', 'Maybe no credentials were provided?')) # depends on [control=['if'], data=[]] fmt = '%%-%ds %%s\n' % (max((len(t) for (t, v) in info)) + 1) for (t, v) in info: fp.write(fmt % (t + ':', v)) # depends on [control=['for'], data=[]] fp.flush() # depends on [control=['with'], data=['client']] # depends on [control=['with'], data=['fp']]